index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
996,900 | 075a8d5236489c858d67fc84e47bc823ff1df48f | from graphics import *
win = GraphWin('Smiley Faces', 400, 400) # give title and dimensions
win.setBackground('cyan')
win.setCoords(0, 0, 400, 400)
def drawFace(center, size, window):
head = Circle(center, size * 20)
head.setFill("green")
head.draw(win)
mouth = Circle(center, size * 13)
mouth.setFill("red")
mouth.setOutline("red")
mouth.draw(win)
smile = Circle(center, size * 14)
smile.move(0, size * 4)
smile.setFill("green")
smile.setOutline("green")
smile.draw(win)
eyebrow = Circle(center, size * 4)
eyebrow.move(-size * 8, size * 10)
eyebrow.setFill('black')
eyebrow.draw(win)
eyebrow2 = eyebrow.clone()
eyebrow2.draw(win)
eyebrow2.move(size * 16, 0)
eyecircle = Circle(center, size * 4)
eyecircle.move(-size * 8, size * 9)
eyecircle.setFill('green')
eyecircle.setOutline('green')
eyecircle.draw(win)
eyecircle2 = eyecircle.clone()
eyecircle2.draw(win)
eyecircle2.move(size * 16, 0)
eyelid = Circle(center, size * 3)
eyelid.move(-size * 8, size * 8)
eyelid.setFill('brown')
eyelid.draw(win)
eyelid2 = eyelid.clone()
eyelid2.draw(win)
eyelid2.move(size * 16, 0)
eye = Circle(center, size * 3)
eye.move(-size * 8, size * 6)
eye.setFill('orange')
eye.draw(win)
eye2 = eye.clone()
eye2.draw(win)
eye2.move(size * 16, 0)
pupil = Circle(center, size)
pupil.move(-size * 9, size * 7)
pupil.setFill('blue')
pupil.draw(win)
pupil2 = pupil.clone()
pupil2.draw(win)
pupil2.move(size * 16, 0)
nose = Circle(center, size * 3)
nose.move(0, -size * 2)
nose.setOutline('yellow')
nose.setFill('yellow')
nose.draw(win)
def main():
i = 0
for i in range(1, 5):
center = Point(350, 490 - i * 110) # top to bottom, increasing radius
Face = drawFace(center, i * .8, win)
center = Point(-50 + i * 85, -50 + i * 90) # bottom left to top right, decreasing radius
Face = drawFace(center, 3 - (i * .5), win)
center = Point(340 - i * 75, -65 + i * 100) # bottom right to top left, increasing radius
Face = drawFace(center, i, win)
message = Text(Point(200, 380), 'Click anywhere to quit.')
message.setFill('blue')
message.draw(win)
win.getMouse()
win.close()
main() |
996,901 | 5f7347580b2a9ac61897efb1bb5d74dc45e111fb | # TODO: Change working directory
import os
WORKING_DIRECTORY = os.getcwd()
DEBUG_MODE = True
DEVELOP_MODE = True
|
996,902 | 1ce6f0197930932b23ab91feca26214d52deb074 | # coding=utf-8
import bootstrap # noqa
import inspect
import six
from markii import markii
from markii.markii import (
deindent,
getframes,
getprocinfo,
getrusage,
getsource,
resource
)
def test_getrusage():
try:
import resource # noqa
assert getrusage()
except ImportError:
assert getrusage() is None
def test_getsource():
def f():
return 42
assert getsource(f) == """\
def f():
return 42""".split("\n")
def test_getprocinfo_no_resource():
assert getprocinfo()
module = inspect.getmodule(getprocinfo)
old_resource = module.resource
module.resource = None
assert getprocinfo() is None
module.resource = old_resource
def test_getsource_builtin():
assert getsource(list) == ""
def test_getprocinfo():
process = getprocinfo()
if not resource:
assert process is None
assert "utime" in process
assert "stime" in process
assert "mem" in process
assert isinstance(process.get("utime"), six.string_types)
assert isinstance(process.get("stime"), six.string_types)
assert isinstance(process.get("mem"), six.string_types)
def test_deident():
source = """\
def foo():
return 42
"""
target = """\
def foo():
return 42
"""
assert deindent(source) == target
def test_deident_unindented():
source = """\
def foo():
return 42
"""
assert deindent(source) == source
def test_getframes():
def f():
raise Exception()
def g():
return f()
def h():
return g()
try:
h()
except Exception:
frames = getframes()
assert frames
assert len(frames) == 4
assert frames[0].func == "f"
assert frames[1].func == "g"
assert frames[2].func == "h"
assert frames[3].func == "test_getframes"
def test_getframes_class_instance():
class Foo(object):
@classmethod
def fm(cls):
cls.idontexist()
def gm(self):
return self.fm()
def f(self):
self.idontexist()
def g(self):
return self.f()
try:
Foo().g()
except AttributeError:
frames = getframes()
assert frames
assert len(frames) == 3
assert frames[0].func == "f"
assert frames[1].func == "g"
assert frames[2].func == "test_getframes_class_instance"
try:
Foo().gm()
except AttributeError:
frames = getframes()
assert frames
assert len(frames) == 3
assert frames[0].func == "fm"
assert frames[1].func == "gm"
assert frames[2].func == "test_getframes_class_instance"
def test_getframes_class_instance_gcd():
class Foo(object):
def f(self):
self = None # noqa
raise Exception()
def g(self):
return self.f()
try:
Foo().g()
except:
frames = getframes()
assert frames
assert len(frames) == 3
assert frames[0].func == "f"
assert frames[1].func == "g"
assert frames[2].func == "test_getframes_class_instance_gcd"
def test_rendering():
def f():
raise Exception("an error")
try:
f()
except Exception as e:
assert markii(e)
def test_rendering_unicode():
def f():
raise Exception(u"ฮฉโรงโโซหยตโคโฅรท")
try:
f()
except Exception as e:
assert markii(e)
|
996,903 | 8cbbcabe3d3185e8814f4612aafebf28d6f0c0aa | import csv, json, requests
# set the index and mapping to use
index = 'http://localhost:9200/phd/wellcome'
mapping = {
"wellcome" : {
"dynamic_templates" : [
{
"default" : {
"match" : "*",
"match_mapping_type": "string",
"mapping" : {
"type" : "multi_field",
"fields" : {
"{name}" : {"type" : "{dynamic_type}", "index" : "analyzed", "store" : "no"},
"exact" : {"type" : "{dynamic_type}", "index" : "not_analyzed", "store" : "yes"}
}
}
}
}
]
}
}
# to delete the index each time, uncomment this
d = requests.delete(index)
# check the index exists and put a mapping to it if not
im = index + '/_mapping'
exists = requests.get(im)
if exists.status_code != 200:
ri = requests.post(index)
r = requests.put(im, json.dumps(mapping))
# get the google doc at
# https://docs.google.com/a/cottagelabs.com/spreadsheets/d/1RXMhqzOZDqygWzyE4HXi9DnJnxjdp0NOhlHcB5SrSZo/edit#gid=0
# currently must manually remove the 4 link columns before the notes at the end,
# as they have the same name as the starting columns. Could ask to rename them,
# but links may not be that useful for vis anyway - they are just calculated from
# the ID columns
# also, need to strip pound signs from the values - done manually
f = csv.DictReader(open('wellcome.csv'))
# for each line, process and load a record of it into the index
for ref in f:
# could add ID checks here to combine duplicate records instead of creating new
requests.post('http://localhost:9200/phd/wellcome/', data=json.dumps(ref))
|
996,904 | 1097b01bfde041e2f861682bf8dd77687db4e414 | # CREATE TIC TAC TOE FOR 2 PLAYERS
def display_board(board):
clear_output()
print(board[1] + '|' +board[2] + '|' +board[3])
print(board[4] + '|' +board[5] + '|' +board[6])
print(board[7] + '|' +board[8] + '|' +board[9])
test_board = ['#', 'X','O','X','O','X','O','X','O','X']
display_board(test_board)
def player_input():
marker = ''
# KEEP ASKING PLAYER 1 to choose X or O
while marker != 'X' and marker != 'O':
marker = input('Player 1, choose X or O: ').upper()
if marker == 'X':
return ('X','O')
else:
return ('X','O')
# ASSIGN PLAYER 2 , the opposite marker
player1 = marker
if player1 == 'X':
player2 = 'O'
else:
player2 = 'X'
return(player1,player2)
import random
def choose_first():
flip = random.randint(0,1)
if flip == 0:
return 'Player 1'
else:
return 'Player 2'
def place_marker(board, marker, position):
board[position] = marker
def win_check(board, mark):
# WIN TIC TAC TOE?
# ALL ROWS, and check to see if they all share the same marker?
(board[1] == mark and board[2] == mark and board[3] ) or
(board[4] == mark and board[5] == mark and board[6] ) or
(board[7] == mark and board[8] == mark and board[9] )
# ALL COLUMNS, check to see if marker matches
# 2 diagonals, check to see if they match
win_check(test_board,'X')
# WHILE LOOP TO KEEP RUNNING THE GAME
print('Welcome to Tic TAC TOE')
while True:
# PLAY THE GAME
## SET EVERYTHING UP (BOARD, WHOS FIRST, CHOOSE MARKERS X,O)
the_board = [' ']
player1_marker, player2_marker = player_input()
turn = choose_first()
print(turn + ' will go first'):
play_game = input('Ready to play? y or n')
if play_game == 'y'
game_on = True
else:
game_on = False
## GAME PLAY
while game_on:
if turn == 'Player 1':
# Show the board
display_board(the_board)
# Choose a position
position = player_choice(the_board)
# Place the marker on the position
place_marker(the_board, player1_marker, position)
# Check if they won
if win_check(the_board, player1_marker):
display_board(the_board)
print('PLAYER 1 HAS WON!!')
game_on = False
else:
if full_board_check(the_board):
display_board(the_board)
print("TIE GAME!")
game_on = False
else:
turn = 'Player 2'
else:
# Show the board
display_board(the_board)
# Choose a position
position = player_choice(the_board)
# Place the marker on the position
place_marker(the_board, player2_marker, position)
# Check if they won
if win_check(the_board, player2_marker):
display_board(the_board)
print('PLAYER 2 HAS WON!!')
game_on = False
else:
if full_board_check(the_board):
display_board(the_board)
print("TIE GAME!")
game_on = False
else:
turn = 'Player 1'
# Check if there is a tie
# No ties and and no win? Then next player's turn
if not replay():
break
# BREAK OUT THE WHILE LOOP on replay()
|
996,905 | 69bbe689fe9051aa87216c11fbf3979d1baf6a87 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# bots/followfollowers.py
from GoogleNews import GoogleNews
googlenews = GoogleNews()
googlenews.search('Bolsonaro')
result = googlenews.result()
print(len(result))
for n in range(len(result)):
print(n)
for index in result[n]:
print(index, '\n', result[n][index])
exit()
|
996,906 | 6898257db5185206826b2fa2abd8cc4526660183 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 3 15:54:34 2016
@author: nathanvc
Functions to reorganize DSR data for analysis and plotting
"""
import DSR_basicfn as bf
import numpy as np
import itertools as it
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
import scipy
# -----------
# Calculate female/male offspring counts
# -----------
def FM_Counts(AllBanks, banklist, Counts):
FM_Counts = {
'Offspring': {},
'Parent': {},
'Egg': {},
'Sperm': {}
}
for key in FM_Counts:
FM_Counts[key] = {
'F_cnt': [],
'M_cnt': [],
'FM_tot': []
}
for i, b in enumerate(banklist):
# loop perform F/M count for each "posted by" group
for key in FM_Counts:
FM_Counts[key]['F_cnt'].append(len([x for x in bf.find(AllBanks[b]['Sex'], 1)
if x not in Counts[b]['Unknown_Inds'] and
AllBanks[b]['PostedBy'][x] == key]))
FM_Counts[key]['M_cnt'].append(len([x for x in bf.find(AllBanks[b]['Sex'], 2)
if x not in Counts[b]['Unknown_Inds'] and
AllBanks[b]['PostedBy'][x] == key]))
FM_Counts[key]['FM_tot'].append(FM_Counts[key]['F_cnt'][-1] +
FM_Counts[key]['M_cnt'][-1])
# for FM_Counts, add a final entry that is sum of all included banks
for key in FM_Counts:
for cnt in FM_Counts[key]:
FM_Counts[key][cnt].append(np.sum(FM_Counts[key][cnt]))
# Add entry that is proportion of total for Female and Male per bank,
# only do this for those posted by offspring or parent
for key in ['Parent','Offspring']:
# only take ratios if no zero counts
#if not bf.find(FM_Counts[key]['M_cnt'],0) and not bf.find(FM_Counts[key]['F_cnt'],0):
FM_Counts[key]['F_prop'] = [f/t if t > 0 else np.nan for f, t in zip(FM_Counts[key]['F_cnt'],
FM_Counts[key]['FM_tot'])]
FM_Counts[key]['M_prop'] = [m/t if t > 0 else np.nan for m, t in zip(FM_Counts[key]['M_cnt'],
FM_Counts[key]['FM_tot'])]
# difference in sex ratio from one
FM_Counts[key]['MF_ratio'] = [m/f-1 if f > 0 else np.nan for m, f in zip(FM_Counts[key]['M_cnt'],
FM_Counts[key]['F_cnt'])]
return FM_Counts
# -----------
# Calculate offspring counts & features per donor
# -----------
def offsp_cnts(AllBanks, banklist):
Counts = {}
for i, b in enumerate(banklist):
#print(b)
Counts[b] = {
'Unq_Donors': [],
'Unknown_Inds': [],
'Sp_kids': [],
'Egg_kids': [],
'Self_inds': [],
'Parent_inds': [],
'Offsp_Cnt': [],
'Donor_Inds': {},
'Donor_Desc': {},
'Offsp_Year': []
}
# list of unique donors
Counts[b]['Unq_Donors'] = list(set(AllBanks[b]['DonorID']))
# indices for unknown donors
Counts[b]['Unknown_Inds'] = bf.find(AllBanks[b]['DonorID'], 'unknown')
# Remove unknown donors from "unique donors" list
if Counts[b]['Unknown_Inds']:
Counts[b]['Unq_Donors'].remove('unknown')
# Identify indices for kids posted by sperm & egg donors.
# We will mostly disregard these.
Counts[b]['Sp_kids'] = bf.find(AllBanks[b]['PostedBy'], 'Sperm')
Counts[b]['Egg_kids'] = bf.find(AllBanks[b]['PostedBy'], 'Egg')
# Remove donors who only have donor-posted offspring
# this means there is no bank offspring group posted
for i, d in enumerate(Counts[b]['Unq_Donors']):
# if no kids for this donor are NOT posted by Egg/Sperm donor
# then remove that donor from list
if not list(np.setdiff1d(bf.find(AllBanks[b]['DonorID'], d),
Counts[b]['Sp_kids']+Counts[b]['Egg_kids'])):
del Counts[b]['Unq_Donors'][i]
# Make list of indices for kids for each unique donor,
# disregard kids posted by sperm or egg donor
for d in Counts[b]['Unq_Donors']:
Counts[b]['Donor_Inds'][d] = [x for x in bf.find(AllBanks[b]['DonorID'], d)
if x not in Counts[b]['Sp_kids'] +
Counts[b]['Egg_kids']]
# Counts for non-unknown donors posted by both self and parent
Counts[b]['Offsp_Cnt'] = [len(Counts[b]['Donor_Inds'][d]) for d in Counts[b]['Unq_Donors']]
# Average birthyear for donor offspring
Counts[b]['Offsp_Year'] = []
for d in Counts[b]['Unq_Donors']:
temp_yr_list=[]
#print(len(Counts[b]['Donor_Inds'][d]))
for k in Counts[b]['Donor_Inds'][d]:
temp_yr_list.append(AllBanks[b]['Birthyear'][k])
if temp_yr_list:
avg_yr=np.nanmean(temp_yr_list)
else:
avg_yr=np.nan
#print(temp_yr_list)
#print(avg_yr)
Counts[b]['Offsp_Year'].append(avg_yr)
# Donor descriptions dictionary
for d in Counts[b]['Unq_Donors']:
Counts[b]['Donor_Desc'][d] = [AllBanks[b]['DonorDesc'][x] for x in
Counts[b]['Donor_Inds'][d]]
return Counts
# -----------
# Reformat description strings organized by donor,
# split all entries on periods, take only unique entries
# -----------
def desc_split(Counts, banklist):
DescList = {}
for b in banklist:
DescList[b] = {}
for d in Counts[b]['Unq_Donors']:
DescList[b][d] = {
'AllText': [],
'Weight': [],
'Height': [],
'BloodType': [],
'Eyes': [],
'Jewish': [],
'AA': [],
'Latino': [],
'Pairs': []
}
for lst in Counts[b]['Donor_Desc'][d]:
if type(lst) is str:
DescList[b][d]['AllText'].extend(lst.split('. '))
# strip leading space
DescList[b][d]['AllText'] = [t.strip() for t in DescList[b][d]['AllText']]
# keep only unique entries (note there will still be overlap)
DescList[b][d]['AllText'] = list(set(DescList[b][d]['AllText']))
# remove empty strings
if '' in DescList[b][d]['AllText']:
DescList[b][d]['AllText'].remove('')
# Most entries contain weight and height, potentially many times,
# put these in their own fields
DescList[b][d]['Weight'] = [DescList[b][d]['AllText'][f] for f in
bf.findincludes_list
(DescList[b][d]['AllText'],
['Weight: '])]
#['Weight', 'weight'])]
# Most entries contain weight and height, potentially many times,
# put these in their own fields
DescList[b][d]['Height'] = [DescList[b][d]['AllText'][f] for f in
bf.findincludes_list
(DescList[b][d]['AllText'],
['Height', 'height'])]
# Blood type
DescList[b][d]['BloodType'] = [DescList[b][d]['AllText'][f]
for f in bf.findincludes_list
(DescList[b][d]['AllText'],
['Blood type '])]
#['Blood Type', 'blood type',
# 'Blood type', 'blood Type'])]
# Eyes
DescList[b][d]['Eyes'] = [DescList[b][d]['AllText'][f]
for f in bf.findincludes_list
(DescList[b][d]['AllText'],
['eyes', 'Eyes'])]
# Jewish
DescList[b][d]['Jewish'] = [DescList[b][d]['AllText'][f]
for f in bf.findincludes_list
(DescList[b][d]['AllText'],
['Jewish', 'jewish',
'Jew', 'jew', 'ashkenazi',
'Ashkenazi'])]
# African/black (using black doesn't work, gives you karate black
# belts and black hair), most list "african american" or specific
# african ancestry
DescList[b][d]['AA'] = [DescList[b][d]['AllText'][f]
for f in bf.findincludes_list
(DescList[b][d]['AllText'],
['African', 'african'])]
# Latino & Hispanic, this list of descriptors will need expanding
DescList[b][d]['Latino'] = [DescList[b][d]['AllText'][f]
for f in bf.findincludes_list
(DescList[b][d]['AllText'],
['Mexican', 'mexican', 'Latino',
'latino', 'Hispanic', 'hispanic',
'Cuban', 'cuban', 'Latin-american',
'Peru', 'peru', 'Puerto', 'Dominican',
'dominican', 'Brazil', 'brazil',
'venez', 'Venez', 'Salvador',
'salvador', 'Guatemal', 'guatemal',
'Colombia', 'colombia', 'Hondura',
'hondura', 'Equador', 'equador',
'Bolivia', 'bolivia'])]
for f in bf.findincludes_list(DescList[b][d]['Latino'],
['food', 'Food', 'movie', 'nut']):
del DescList[b][d]['Latino'][f]
# Find text indicating possible pairs of donors
DescList[b][d]['Pairs'] = [DescList[b][d]['AllText'][f]
for f in bf.findincludes_list
(DescList[b][d]['AllText'],
# ['same donor','Same donor','same donor'])]
['Same', 'same', 'Donor', 'donor', 'CCB', 'NECC',
'Fairfax', 'Xytex', 'TSBC', 'Cryogenic'])]
return DescList
# ---------
# Organize text information into categorical lists for plots
# ----------
# cats1 are fields that should be divided into a simple yes/no
# based on an entry existing or not in that field in DescList
# cats2 are fields that are multiple yes/no categories pulled from a
# single field
def desc_cat(DescList, Counts, banklist, cats1, cats2):
# this is lower case for eyes, cats2 is specific to eyes here, need to fix)
eye_cats_lwr = ['blue', 'green', 'hazel', 'brown']
DescCat = {}
for b in banklist:
DescCat[b] = {}
# Make field in dictionary for each category
for c in cats1 + cats2:
DescCat[b].update({c: []})
# Loop through individual donors in the same order as Counts
for d in Counts[b]['Unq_Donors']:
# Enter a 1 if category is non-empty, 0 if empty
for categ in cats1:
if DescList[b][d][categ]:
DescCat[b][categ].extend([1])
if not DescList[b][d][categ]:
DescCat[b][categ].extend([0])
# cats2 all read from same field in DescList
for i, eye in enumerate(cats2):
if bf.findincludes_list(DescList[b][d]['Eyes'],
[eye, eye_cats_lwr[i]]):
DescCat[b][eye].extend([1])
if not bf.findincludes_list(DescList[b][d]['Eyes'],
[eye, eye_cats_lwr[i]]):
DescCat[b][eye].extend([0])
return DescCat
# function for formatting text categories that take on a multiple values
# here written for bloodtype, but can be made to be more general
def cont_cat(DescList, Counts, banklist, contcats):
ContCat = {}
for b in banklist:
ContCat[b] = {}
# Make field in dictionary for each category
for c in contcats:
ContCat[b].update({c: []})
# Loop through individual donors in the same order as Counts
for d in Counts[b]['Unq_Donors']:
# Put descriptions into an ordered list
for categ in contcats:
if categ == 'BloodType':
if DescList[b][d]['BloodType']:
templist=[]
for ent in DescList[b][d]['BloodType']:
templist.append(DescList[b][d]['BloodType'][0][11:].strip('.'))
ContCat[b][categ].append(templist)
else:
ContCat[b][categ].append([])
return ContCat
# function for formatting text categories that take on a multiple values
# But no filtering or reshaping of list
# convert categorical list of lists to 0/1 valued vectors depending on category
def convert_cat(List_vals, list_cat, banklist, categories):
convert_cat={}
for b in banklist:
convert_cat[b]={}
for c in categories:
convert_cat[b].update({c: []})
for val in List_vals[b][list_cat]:
for c in categories:
if bf.findincludes(val,c):
convert_cat[b][c].extend([1])
else:
convert_cat[b][c].extend([0])
#print(single, c, convert_cat[b][c][-1], len)co
return convert_cat
def parse_weight(wt_list):
wt_list_out = wt_list.copy()
units=[]
pound_list=[]
for i, wt in enumerate(wt_list):
units.append([])
wt_list_out[i] = [st.split('Weight:')[-1].strip() for st in wt]
#print(wt_list_out[i])
for k,w in enumerate(wt_list_out[i]):
if 'ft' in w or "\'" in w or '\"' in w or '/' in w or 'Normal' in w or 'Medium Build' in w or 'Thin' in w or 'Slim' in w:
wt_list_out[i][k] = ''
units[i].append('')
continue
if w == '58 kg (128 lbs)':
wt_list_out[i][k] = 128
units[i].append('')
continue
if w == '175lbs (80kg)':
wt_list_out[i][k] = 175
units[i].append('')
continue
if w == '161 (73 kg)':
wt_list_out[i][k] = 161
units[i].append('')
continue
if w == '76 (168 lbs)':
wt_list_out[i][k] = 168
units[i].append('')
continue
if w == '? maybe 185':
wt_list_out[i][k] = 185
units[i].append('')
continue
# if w == '180 lbs (81kg)':
# wt_list_out[i][k] = 180
# units[i].append('')
# continue
spl=bf.last_digit_loc(wt_list_out[i][k])
if spl and spl<len(w)-1:
units[i].append(w[spl+1:].strip())
wt_list_out[i][k]=w[:spl+1]
else:
units[i].append('')
if '--' in wt_list_out[i][k]:
wt_list_out[i][k] = np.mean([float(w) for w in wt_list_out[i][k].strip(' ').split('--')])
elif '-' in wt_list_out[i][k]:
wt_list_out[i][k] = np.mean([float(w) for w in wt_list_out[i][k].strip(' ').split('-')])
else:
try:
float(wt_list_out[i][k])
except ValueError:
#print("Not a float")
wt_list_out[i][k] = ''
else:
wt_list_out[i][k] = float(wt_list_out[i][k])
if units[i][k]=='kg' or units[i][k]=='kgs':
wt_list_out[i][k]=2.20462*wt_list_out[i][k]
#print(wt_list_out[i])
for i, wt in enumerate(wt_list_out):
mn_w=[w for w in wt if w is not '' and w >120 and w < 300]
pound_list.extend([np.mean(mn_w).tolist()])
return(pound_list)
# -------------
# Arrange count-per donor data, bank id, individual donor ids, offpring birth
# year into a list of lists for plotting/reference.
# lists are appended in order of banklist
# --------------
def cnts_list(Counts, banklist):
list_allcnts = []
allbanks_cnts = []
donorid_list = []
allbanks_bkind = []
allbanks_offspyr = []
for i, b in enumerate(banklist):
list_allcnts.append(Counts[b]['Offsp_Cnt'])
allbanks_cnts = allbanks_cnts + Counts[b]['Offsp_Cnt']
donorid_list = donorid_list + Counts[b]['Unq_Donors']
allbanks_bkind = allbanks_bkind + [i]*len(Counts[b]['Offsp_Cnt'])
allbanks_offspyr = allbanks_offspyr + Counts[b]['Offsp_Year']
list_allcnts.append(allbanks_cnts)
return(allbanks_cnts, allbanks_bkind, list_allcnts, donorid_list, allbanks_offspyr)
# -------------
# Arrange feature data into list for all banks
# for plotting, lists are appended in order of banklist
# --------------
def feat_list(DescCat, banklist, cats):
allbanks_cat = {}
# initialize fields in dictionary
for c in cats:
allbanks_cat.update({c: []})
for b in banklist:
for c in cats:
allbanks_cat[c] = allbanks_cat[c] + DescCat[b][c]
return allbanks_cat
# Function to compile multiple categories into a single categorical list
# with integer entries. This function generates a unique integer for
# every possible combination of categories, use for "like" features
# (like eye color), label is a shorter reference to each category (use for
# generating shorter labels per integer automatically).
# ------------------
def compile_cat(allbanks_cat, cats, lab):
comp_cat = []
# loop through categories, 1 X len(cats) list, entry of for each category
for c in cats:
comp_cat.append(allbanks_cat[c])
# loop through groups of all possible sizes
k = 1
cat_out = [0]*len(allbanks_cat[cats[0]])
cat_lab = ['None']
for n in range(1, len(cats)+1):
for i in it.combinations(range(0, len(cats)), n):
temp = []
lab_temp = ''
for _ in i:
# make temporary list of lists for this grouping
temp.append(comp_cat[_])
# make label entry
lab_temp += lab[_]
lab_temp += '/'
lab_temp = lab_temp[:-1]
new_inds = bf.find(list(np.prod(np.array(temp), 0)), 1)
for b in new_inds:
# Enter value k for all with this combo.
# Note that higher values (more categories) overwrite lower.
cat_out[b] = k
cat_lab.append(lab_temp)
k = k+1
return (cat_out, cat_lab)
# Recategorize data into fewer categories
# red_cats is a list of lists, each entry is input categories to group.
# Entries in comp_cat entries that match red_cats[i] will be changed to i.
# Values not included red_cats are [] in output
# -------------------
def reduce_cat(comp_cat, red_cats):
red_out = [[] for i in range(len(comp_cat))]
for k, r in enumerate(red_cats):
inds = bf.find_list(comp_cat, r)
for i in inds:
red_out[i] = k
return red_out
# -----------------
# Function to concatenate all categories into lists over all donors that
# that include text
# -----------------
def desc_text_list(DescList, Counts, banklist, cats):
DescTextList = {}
for c in cats:
DescTextList.update({c: []})
for b in banklist:
# Loop through individual donors in the same order as Counts
for d in Counts[b]['Unq_Donors']:
# Enter a 1 if category is non-empty, 0 if empty
for categ in cats:
DescTextList[categ].append(DescList[b][d][categ])
return DescTextList
# -----------------
# Input list of lists of strings
# Join strings in each sub-list with input string in beetween
# Output list of concatenated strings
# -----------------
def single_desc_list(InputList, joinstr):
SingleTextList = []
for s in InputList:
SingleTextList.append(joinstr.join(s))
return SingleTextList
# ----------
# NLP functions
# ----------
# function to tokenize, stem & remove stop words from a language string
# ----------
def clean_split_stem(rawstring):
stop = stopwords.words('english')
out_str = rawstring.split()
porter = PorterStemmer()
out_str = [porter.stem(word) for word in out_str]
out_str = [word for word in out_str if word not in stop]
return out_str
# ----------
# function to calculate euclidean distance over all possible pairs of vectors
# ----------
def dist_all(vect_in):
# DistAll=np.empty(shape=(len(vect_in),len(vect_in)))
# DistAll[:] = np.NAN
DistAll = []
Coords = []
for j in it.combinations(range(0, len(vect_in)), 2):
DistAll.extend([scipy.spatial.distance.euclidean(vect_in[j[0]], vect_in[j[1]])])
Coords.extend([j])
# DistAll[j[0]][j[1]]=scipy.spatial.distance.euclidean(vect_in[j[0]],vect_in[j[1]])
return (DistAll, Coords)
|
996,907 | 4c19960bdf9437c6745358b6058793a0d61c1660 | ๏ปฟ# -*- coding: utf-8 -*-
"""
Created on Sat Oct 13 18:56:32 2018
@author: nnir
"""
import numpy as np
import os
#import six.moves.urllib as urllib
import sys
#import tarfile
import tensorflow as tf
#import zipfile
#from collections import defaultdict
#from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from xml.dom.minidom import Document
import datetime #่ทๅๆถ้ด๏ผ่ฎก็ฎ็จๅบๆง่กๆถ้ด
import shutil #ๅ ้ค้็ฉบๆไปถๅคน
import XmlRectFusion
import thumbGeneration as tG
import cv2
import WindowDetection as WD
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
from distutils.version import StrictVersion
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
# This is needed to display the images.
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
def cut(inputFilename,vx,vy,stepRatio,useMaskFlag): #ๆๅผๅพ็ๅพ็1.jpg
#for i in range(id):
#outputPath = "D:\\tensorflow\\models\\research\\object_detection\\cutimages\\"
#CJY at 2019.3.3 ๅขๅ
inputMaskName = inputFilename.replace("org","mask")
if useMaskFlag == 1:
if os.path.exists(inputMaskName)==True :
mask = Image.open(inputMaskName)
else:
useMaskFlag = 0
num_grid = 2
ng_step_x = vx//(num_grid*2)
ng_step_y = vy//(num_grid*2)
outputPath = os.path.join(workPath,"Temp","cutimages")
if os.path.exists(outputPath)!=True:
os.mkdir(outputPath)
im =Image.open(inputFilename)
#ๅ็งป้
dx = int(vx*stepRatio)
dy = int(vy*stepRatio)
xindex = 0
yindex = 0
index = 0
#ๅทฆไธ่งๅๅฒ
x1 = 0
y1 = 0
x2 = vx
y2 = vy
print ("ๅพๅๅคงๅฐ๏ผ",im.size) #im.size[0] ๅฎฝๅ้ซ
w = im.size[0]#ๅฎฝ
h = im.size[1]#้ซ
TEST_IMAGE_PATHS = []
#็บตๅ
while y2 <= h:
#ๆจชๅๅ
xindex = 0
while x2 <= w:
outputFilename = os.path.join(outputPath, "image_" + str(yindex) + "_" + str(xindex) + ".jpg")
#name3 = name2 + str(index)+ ".jpg"
#print n,x1,y1,x2,y2
#CJY at 2019.3.3
center_x = (x1+x2)//2
center_y = (y1+y2)//2
#CJY at 2019.3.13 ๅขๅ
if useMaskFlag == 1:
shootMaskFlag = 0
for r in range(num_grid):
if shootMaskFlag == 1:
break
for c in range(num_grid):
if mask.getpixel((center_x+ng_step_x*r,center_y+ng_step_y*c))==255: #ๅช่ฆๆๅขไฝ๏ผ255๏ผๅฐฑๅ
shootMaskFlag = 1
break
if shootMaskFlag == 1:
im2 = im.crop((x1, y1, x2, y2))
im2.save(outputFilename)
TEST_IMAGE_PATHS.append(outputFilename)
else:
im2 = im.crop((x1, y1, x2, y2))
im2.save(outputFilename)
TEST_IMAGE_PATHS.append(outputFilename)
x1 = x1 + dx
x2 = x1 + vx
xindex = xindex + 1
index = index + 1
x1 = 0
x2 = vx
y1 = y1 + dy
y2 = y1 + vy
yindex = yindex + 1
#print ("ๅพ็ๅๅฒๆๅ๏ผๅๅฒๅพๅฐ็ๅญๅพ็ๆฐไธบ%d"%(xindex*yindex))
#return [xindex,yindex]
print ("ๅพ็ๅๅฒๆๅ๏ผๅๅฒๅพๅฐ็ๅญๅพ็ๆฐไธบ%d"%(len(TEST_IMAGE_PATHS)))
return TEST_IMAGE_PATHS
def run_inference_for_single_image(image,sess,graph):
# Get handles to input and output tensors
with graph.as_default():
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks' ]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
#start1=datetime.datetime.now()
#start1Time=start1.strftime('%Y-%m-%d %H:%M:%S.%f')
#print(start1Time)
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
#end1=datetime.datetime.now()
#end1Time=end1.strftime('%Y-%m-%d %H:%M:%S.%f')
#print(end1Time)
#print('Running time: %s Seconds'%(end1-start1))
#print(output_dict['detection_boxes'])
#print("after\n")
# all outputs are float32 numpy arrays, so convert types as appropriate
#aop=output_dict['detection_boxes'][2]
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_images(workPath,model_name,path_to_labels,inputPath,outputXMLpath,DetectionWindow = 300,stepRatio = 0.5,scoreThreshold = 0.5,useMaskFlag = 1):
print("ๆญฃๅจๆฃๆต๏ผ่ฏทๅฟๅ
ณ้ญๆญค็ชๅฃ๏ผๅฆๅ๏ผๅฐ้ๅบๆฃๆต๏ผ")
#่ตทๅงๆถ้ด่ฎฐๅฝ
start=datetime.datetime.now()
startTime=start.strftime('%Y-%m-%d %H:%M:%S.%f')
print("ๆฃๆตไปปๅก่ตทๅงๆถ้ด๏ผ"+startTime)
#ๅๅค้่ฆ็่ทฏๅพ
# What model to use.
MODEL_NAME = model_name
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = os.path.join(MODEL_NAME, 'frozen_inference_graph.pb')
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(path_to_labels, 'label_map.pbtxt')
# ๅพ
ๆฃๆตๅ็ไฝ็ฝฎ
PATH_TO_TEST_IMAGES_DIR = os.path.join(workPath,"Temp","cutimages")
# Size, in inches, of the output images.
#IMAGE_SIZE = (12, 8)
# the Number of classes
NUM_CLASSES = 10
#่ฝฝๅ
ฅๅพgraph
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
#category_index = label_map_util.create_category_index(categories)
#ๅๅปบไธดๆถๆไปถๅคน
TempPath = os.path.join(workPath,"Temp")
if os.path.exists(TempPath)==True:
shutil.rmtree(TempPath) #ๆธ
็ฉบ
os.mkdir(TempPath)
else:
os.mkdir(TempPath)
#่ฎก็ฎๅพ
ๆฃๆตๆไปถๅคนไธญๆๅคๅฐๅพ
ๆฃๆตๆไปถ
num_files=0
for file in os.listdir(inputPath):
fname,ftype = os.path.splitext(file)
if ftype==".JPG" or ftype==".jpg":
num_files = num_files + 1
print("ๅพ
ๆฃๆตๆไปถๅคนไธญๅพๅๆฐ้๏ผ"+str(num_files))
#detection_graph.as_default()
with tf.Session(graph=detection_graph) as sess:
file_index=0
for file in os.listdir(inputPath):
fname,ftype = os.path.splitext(file)
if ftype!=".JPG" and ftype!=".jpg":
continue
else:
file_index=file_index+1
print("ๆฃๆต็ฌฌ%dๅพ็๏ผ%s"%(file_index,file))
sPDstart=datetime.datetime.now() #singlePicDetection
sPDstartTime=sPDstart.strftime('%Y-%m-%d %H:%M:%S.%f')
print("ๅผๅงๆถ้ด๏ผ"+sPDstartTime)
filepath = os.path.join(inputPath, file)
#CJY at 2019.7.11 ไธบไบ้ฒๆญขๅพ็ๆๅ๏ผ้ฆๅ
ๅฐ่ฏ่ฏปๅ
try:
tryimage = Image.open(filepath)
except(OSError, NameError):
print('OSError, Path:',filepath)
continue
#CJY at 2019.3.13 ่ฏปๅ้ๅขไฝๅบๅๆฉ่
inputMaskpath = filepath.replace("org","mask")
uMF_onePic = useMaskFlag
if uMF_onePic == 1:
if os.path.exists(inputMaskpath)==True :
mask = cv2.imread(inputMaskpath,cv2.IMREAD_GRAYSCALE)
#ๅฐๅขไฝๅบๅ๏ผ็ฝ่ฒ๏ผๆฉๅฑโโ่จ่
kernel = np.ones((50, 50), np.uint8)
mask = cv2.dilate(mask, kernel) # ่จ่dilate
else:
uMF_onePic = 0
#1.ๅฐๅๅงๅคงๅพๅๅฒๆๅฐๅพ
subWidth = DetectionWindow
WinStep = int(subWidth*stepRatio)
#indexRange=cut(filepath,subWidth,subWidth,stepRatio)
#numCuts=indexRange[0]*indexRange[1]
TEST_IMAGE_PATHS = cut(filepath,subWidth,subWidth,stepRatio,useMaskFlag)
numCuts=len(TEST_IMAGE_PATHS)
#ๅพ
ๆฃๆตๅ็ๅ
จ่ทฏๅพ
#TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image_{}_{}.jpg'.format(test_i,test_j)) for test_i in range(0,indexRange[1]) for test_j in range(0,indexRange[0])]
#2.ๅๅปบxmlๆไปถๅคด
doc = Document()
annotation = doc.createElement("annotation")
doc.appendChild(annotation)
folder = doc.createElement("folder")
annotation.appendChild(folder)
filename = doc.createElement("filename")
annotation.appendChild(filename)
path = doc.createElement("path")
annotation.appendChild(path)
source = doc.createElement("source")
annotation.appendChild(source)
database = doc.createElement("database")
source.appendChild(database)
size = doc.createElement("size")
annotation.appendChild(size)
width = doc.createElement("width")
size.appendChild(width)
height = doc.createElement("height")
size.appendChild(height)
depth = doc.createElement("depth")
size.appendChild(depth)
segmented = doc.createElement("segmented")
annotation.appendChild(segmented)
img =Image.open(filepath)
folder.appendChild(doc.createTextNode(os.path.split(inputPath)[-1]))
filename.appendChild(doc.createTextNode(file))
path.appendChild(doc.createTextNode(filepath))
database.appendChild(doc.createTextNode("Unknown"))
width.appendChild(doc.createTextNode(str(img.size[0])))
height.appendChild(doc.createTextNode(str(img.size[1])))
depth.appendChild(doc.createTextNode("3"))
segmented.appendChild(doc.createTextNode("0"))
cutstart=datetime.datetime.now()
cutstartTime=cutstart.strftime('%Y-%m-%d %H:%M:%S.%f')
print(cutstartTime)
#3.ๅฏนๅญๅ่ฟ่กๆฃๆตๅนถ็ๆๅฏนๅบๆๆ
objectNum=0
objectNumByCates = [] #ๅๅซไธบๆฏไธ็ฑป่ฎกๆฐ๏ผ้คไบnormal
for i in range(len(categories)-1):
objectNumByCates.append(0)
for cutIndex,imgCut_path in enumerate(TEST_IMAGE_PATHS):
'''
sCDstart=datetime.datetime.now()
sCDstartTime=sCDstart.strftime('%Y-%m-%d %H:%M:%S.%f')
print(sCDstartTime)
'''
imgCut_pre, ext = os.path.splitext(imgCut_path)
xCutIndex = int(imgCut_pre.split("_")[-1])
yCutIndex = int(imgCut_pre.split("_")[-2])
image = Image.open(imgCut_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
#image_np = load_image_into_numpy_array(image) #ๆ่่ดนๆถ้ด 0.10sๅทฆๅณ
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
#image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image, sess,detection_graph)
'''
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
#print(output_dict['detection_boxes'])
'''
#ๅๅ
ฅxmlไนๅพ่ดนๆถ้ด๏ผ0.015sๅทฆๅณ
for index,boxScore in enumerate(output_dict['detection_scores']):
if boxScore>scoreThreshold:
objectNameIndex = output_dict['detection_classes'][index]
objectName = categories[objectNameIndex-1]['name']
if objectName == "normal":
continue
passedBox=output_dict['detection_boxes'][index]
x_bais=xCutIndex*WinStep
y_bais=yCutIndex*WinStep
#label_img ็ๅๆ ็ณปไธ tensorflow ๅๆ ็ณป(ๆฐดๅนณy๏ผ็ซ็ดx)ไธไธ่ด xmin ymin xmax ymax
xmin_value=int(passedBox[1]*subWidth+x_bais)
ymin_value=int(passedBox[0]*subWidth+y_bais)
xmax_value=int(passedBox[3]*subWidth+x_bais)
ymax_value=int(passedBox[2]*subWidth+y_bais)
#ๅฆๆ่่ๆฉ่็่ฏ๏ผ่ฟ่กๆฃๆตๆก็ญ้
if uMF_onePic == 1:
if mask[ymin_value,xmin_value]!=255 or mask[ymin_value,xmax_value]!=255 and mask[ymax_value,xmin_value]!=255 and mask[ymax_value,xmax_value]!=255:#4ไธช่ง็น้ฝๅคไบๅขไฝๅบๅ
continue
objectNum =objectNum+1
objectNumByCates[objectNameIndex-1] = objectNumByCates[objectNameIndex-1]+1
object = doc.createElement("object")
annotation.appendChild(object)
name = doc.createElement("name")
object.appendChild(name)
pose = doc.createElement("pose")
object.appendChild(pose)
truncated = doc.createElement("truncated")
object.appendChild(truncated)
difficult = doc.createElement("difficult")
object.appendChild(difficult)
bndbox = doc.createElement("bndbox")
object.appendChild(bndbox)
xmin = doc.createElement("xmin")
bndbox.appendChild(xmin)
ymin = doc.createElement("ymin")
bndbox.appendChild(ymin)
xmax = doc.createElement("xmax")
bndbox.appendChild(xmax)
ymax = doc.createElement("ymax")
bndbox.appendChild(ymax)
score = doc.createElement("score")
object.appendChild(score)
name.appendChild(doc.createTextNode(objectName))
pose.appendChild(doc.createTextNode("Unspecified"))
truncated.appendChild(doc.createTextNode("0"))
difficult.appendChild(doc.createTextNode("0"))
xmin.appendChild(doc.createTextNode(str(xmin_value)))
ymin.appendChild(doc.createTextNode(str(ymin_value)))
xmax.appendChild(doc.createTextNode(str(xmax_value)))
ymax.appendChild(doc.createTextNode(str(ymax_value)))
score.appendChild(doc.createTextNode(str(boxScore)))
logfile=open(os.path.join(TempPath,"temp.txt"),'w')
logfile.write("("+str(num_files)+"/"+str(file_index)+")"+file+":"+str(numCuts)+"/"+str(cutIndex+1))
logfile.close()
#ๆพ็คบ่ฟ็จ
print("("+str(num_files)+"/"+str(file_index)+")"+file+": "+str(numCuts)+"/"+str(cutIndex+1))
'''
sCDend=datetime.datetime.now()
sCDendTime=sCDend.strftime('%Y-%m-%d %H:%M:%S.%f')
print(sCDendTime)
print('ๅๅผ ๅ็ๅฎๆๆถ้ด: %s Seconds'%(cend-cstart))
'''
'''
#ๅๅ
ฅๆไปถ
if os.path.exists(workPath + "Temp/temp.txt")==True:
os.remove(workPath + "Temp/temp.txt")
'''
#4.็ๆๆๅฎๆๆ
#(1).xmlๆไปถไฟๅญ
OnePicEnd=datetime.datetime.now()
OPEtime=OnePicEnd.strftime('%Y%m%d%H%M%S')
xmlfilename=fname+"_"+OPEtime+".XML"
xmlfullname=os.path.join(outputXMLpath,xmlfilename)
XMLfile = open(xmlfullname, "w")
XMLfile.write(doc.toprettyxml(indent=" "))
XMLfile.close()
#ๅขๅ xmlWsไธญ็ไฟๅญ
'''
xmlfullname2 = os.path.join(outputXMLwithoutSpath,fname+".XML")
XMLfile = open(xmlfullname2, "w")
XMLfile.write(doc.toprettyxml(indent=" "))
XMLfile.close()
'''
#(ๅฏ้)Xml Rect ่ๅ(่งๆ
ๅต่ๅฎ)
XmlRectFusion.SingleXmlRectFusion(xmlfullname, inputMaskpath, useMaskFlag)
#(2).็ๆ็ผฉ็ฅๅพๅๅธฆๆ ๆณจ็ผฉ็ฅๅพ
outputErrThumb = tG.GenerationThumbAndErrThumb(filepath,xmlfullname,rootPath,quality=10)
'''
#CJY at 2019.5.24 ๅคๅถxmlๅฐxml_mไธญ ,ๆฌ็งป err ๅฐerr_m
shutil.copy2(xmlfullname,outputXMLMpath)
shutil.move(outputErrThumb,outputERRMpath)
'''
'''
#ๅฐๅพ็ไธxmlๅๅญๅๅน้
๏ผ่ฎฐๅฝๅฐorgๆไปถไธ็"img_xml_namedict.txt"ไธญ
ix_file = open(os.path.join(os.path.dirname(inputPath),"img_xml_namedict.txt"),"a")
ix_file.write(file)
ix_file.write(" ")
ix_file.write(xmlfilename.replace(".XML",".xml"))
ix_file.write("\n")
'''
#ๅๅผ ๅพๅๆฃๆต็ปๆๆถ้ด
sPDend=datetime.datetime.now()
sPDendTime=sPDend.strftime('%Y-%m-%d %H:%M:%S.%f')
print(sPDendTime)
print('ๅๅผ ๅพๅๆฃๆตๆถ้ด: %s Seconds'%(sPDend-sPDstart))
#ๅฐไฟกๆฏ่ฎฐๅฝๅจdTemp.txtไธญ
print()
detailsLogfile=open(os.path.join(outputMASKpath,'dTemp.txt'),'a') #TempPath
detailsLogfile.write(fname+"\n")
detailsLogfile.write("start:"+sPDstartTime+"\n")
detailsLogfile.write("end:"+sPDendTime+"\n")
detailsLogfile.write("detection useTime:%s"%(sPDend-sPDstart)+"\n")
detailsLogfile.write("abnormNum:%s"%(str(objectNum))+"\n")
detailsLogfile.close()
#CJY at 2019.6.20 ๆฐๅขORGERRๅคไปฝ
shutil.copytree(outputERRpath,outputORGERRpath)
#ๅ ้คไธดๆถๆไปถๅคน
if os.path.exists(TempPath)==True:
shutil.rmtree(TempPath)
#ๅๅ
ฅๆ ๅฟไฝ๏ผๆฏๅฆๅฎๆๅฏนๆๆๆไปถ็ๆฃๆต
finishfile = open(os.path.join(workPath,'FinishFlag.txt'),'w')
finishfile.write("1")
finishfile.close()
end=datetime.datetime.now()
endTime=end.strftime('%Y-%m-%d %H:%M:%S.%f')
print("ๆฃๆตไปปๅก็ปๆๆถ้ด๏ผ"+endTime)
print('ๆฃๆต็จๅบ่ฟ่กๆถ้ด: %s Seconds'%(end-start))
#ๆฃๆฅๆฏๅฆๆๅ
ถไปไผ่ฏๆญฃๅจ่ฟ่ก
#if 'session' in locals() and session is not None:
# print('Close interactive session')
# session.close()
#1.่ฎพๅฎๅๅง่พๅ
ฅๅๆฐ๏ผๅ
ฑ6ไธช๏ผ
workPath = "" #ๅทฅไฝ่ทฏๅพ
rootPath = "" #ๅพ
ๆฃๆตๅพๅๆไปถๅคน(org)ๆๅจๆ น่ทฏๅพ
model_name = "" #ๆฃๆตๆจกๅ่ทฏๅพ
path_to_labels = "" #ๆ ็ญพ่ทฏๅพ
DetectionWindow = 640
stepRatio = 0.5
scoreThreshold = 0.5
W_model_name = ""
W_path_to_labels = ""
W_resizeRatio = 0.125
W_scoreThreshold = 0.5
#2.ไปsys่ทๅๅฏนๅบๅฝไปค่กๅๆฐ
#python Detection.py E:/myWork/clear/20181115412/20181115/1019/1 D:/ADoWS/DetectionAbnormity/ModelInUse D:/ADoWS/DetectionAbnormity/Data 300 0.5 0.5 D:/ADoWS/DetectionWindow/ModelInUse D:/ADoWS/DetectionWindow/Data 0.25 0.5
if len(sys.argv) != 4 and len(sys.argv) != 5 and len(sys.argv) != 6 and len(sys.argv) != 7 and len(sys.argv) != 9 and len(sys.argv) != 10 and len(sys.argv) != 11:
print('Usage: python Detection.py rootPath model_name path_to_labels DetectionWindow stepRatio scoreThrehold')
exit(1)
elif len(sys.argv) == 4:
workPath = os.path.dirname(sys.argv[0])
rootPath = sys.argv[1]
model_name = sys.argv[2]
path_to_labels = sys.argv[3]
elif len(sys.argv) == 5:
workPath = os.path.dirname(sys.argv[0])
rootPath = sys.argv[1]
model_name = sys.argv[2]
path_to_labels = sys.argv[3]
DetectionWindow = int(sys.argv[4]) if int(sys.argv[4])>0 else 300
elif len(sys.argv) == 6:
workPath = os.path.dirname(sys.argv[0])
rootPath = sys.argv[1]
model_name = sys.argv[2]
path_to_labels = sys.argv[3]
DetectionWindow = int(sys.argv[4]) if int(sys.argv[4])>0 else 300
stepRatio = float(sys.argv[5]) if float(sys.argv[5])>0 else 0.5
elif len(sys.argv) == 7:
workPath = os.path.dirname(sys.argv[0])
rootPath = sys.argv[1]
model_name = sys.argv[2]
path_to_labels = sys.argv[3]
DetectionWindow = int(sys.argv[4]) if int(sys.argv[4])>0 else 300
stepRatio = float(sys.argv[5]) if float(sys.argv[5])>0 else 0.5
scoreThreshold = float(sys.argv[6]) if (float(sys.argv[6])>=0 and float(sys.argv[6])<=1) else 0.5
#ๅ ๅ
ฅ็ชๆทๆฃๆต
elif len(sys.argv) == 9:
workPath = os.path.dirname(sys.argv[0])
rootPath = sys.argv[1]
model_name = sys.argv[2]
path_to_labels = sys.argv[3]
DetectionWindow = int(sys.argv[4]) if int(sys.argv[4])>0 else 300
stepRatio = float(sys.argv[5]) if float(sys.argv[5])>0 else 0.5
scoreThreshold = float(sys.argv[6]) if (float(sys.argv[6])>=0 and float(sys.argv[6])<=1) else 0.5
#็ชๆทๆฃๆตๅๆฐ
W_model_name = sys.argv[7]
W_path_to_labels = sys.argv[8]
elif len(sys.argv) == 10:
workPath = os.path.dirname(sys.argv[0])
rootPath = sys.argv[1]
model_name = sys.argv[2]
path_to_labels = sys.argv[3]
DetectionWindow = int(sys.argv[4]) if int(sys.argv[4])>0 else 300
stepRatio = float(sys.argv[5]) if float(sys.argv[5])>0 else 0.5
scoreThreshold = float(sys.argv[6]) if (float(sys.argv[6])>=0 and float(sys.argv[6])<=1) else 0.5
#็ชๆทๆฃๆตๅๆฐ
W_model_name = sys.argv[7]
W_path_to_labels = sys.argv[8]
W_resizeRatio = float(sys.argv[9]) if float(sys.argv[9])>0 else 0.25
elif len(sys.argv) == 11:
workPath = os.path.dirname(sys.argv[0])
rootPath = sys.argv[1]
model_name = sys.argv[2]
path_to_labels = sys.argv[3]
DetectionWindow = int(sys.argv[4]) if int(sys.argv[4])>0 else 300
stepRatio = float(sys.argv[5]) if float(sys.argv[5])>0 else 0.5
scoreThreshold = float(sys.argv[6]) if (float(sys.argv[6])>=0 and float(sys.argv[6])<=1) else 0.5
#็ชๆทๆฃๆตๅๆฐ
W_model_name = sys.argv[7]
W_path_to_labels = sys.argv[8]
W_resizeRatio = float(sys.argv[9]) if float(sys.argv[9])>0 else 0.25
W_scoreThreshold = float(sys.argv[10]) if (float(sys.argv[10])>=0 and float(sys.argv[10])<=1) else 0.5
#ๅจSpyderไธญ่ฟ่กpyๆไปถๆถ๏ผๅๆถไธๅๆณจ้
'''
rootPath = "D:/myWork/clear/20181115412/20181115/1019/train"
model_name = "D:/ADoWS/DetectionAbnormity/ModelInUse"
path_to_labels = "D:/ADoWS/DetectionAbnormity/Data"
W_model_name = "D:/ADoWS/DetectionWindow/ModelInUse"
W_path_to_labels = "D:/ADoWS/DetectionWindow/Data"
#'''
#3.้จๅๅญ่ทฏๅพ็ๆ
inputPath = rootPath + "/org"
outputXMLpath = rootPath + "/xml"
outputTHpath = rootPath + "/th"
outputERRpath = rootPath + "/err"
outputORGERRpath = rootPath + "/orgerr"
outputXMLwithoutSpath = rootPath + "/xmlWs"
outputMASKpath = rootPath + "/mask"
outputXMLMpath = rootPath + "/xml_m"
outputERRMpath = rootPath + "/err_m"
if os.path.exists(rootPath)!=True:
exit(1)
if os.path.exists(outputXMLpath)!=True:
os.mkdir(outputXMLpath)
else:
shutil.rmtree(outputXMLpath)
os.mkdir(outputXMLpath)
if os.path.exists(outputTHpath)!=True:
os.mkdir(outputTHpath)
else:
shutil.rmtree(outputTHpath)
os.mkdir(outputTHpath)
if os.path.exists(outputERRpath)!=True:
os.mkdir(outputERRpath)
else:
shutil.rmtree(outputERRpath)
os.mkdir(outputERRpath)
if os.path.exists(outputMASKpath)!=True:
os.mkdir(outputMASKpath)
#if os.path.exists(outputXMLwithoutSpath)!=True:
# os.mkdir(outputXMLwithoutSpath)
if os.path.exists(outputORGERRpath)==True:
shutil.rmtree(outputORGERRpath)
'''
if os.path.exists(outputXMLMpath)!=True:
os.mkdir(outputXMLMpath)
else:
shutil.rmtree(outputXMLMpath)
os.mkdir(outputXMLMpath)
if os.path.exists(outputERRMpath)!=True:
os.mkdir(outputERRMpath)
else:
shutil.rmtree(outputERRMpath)
os.mkdir(outputERRMpath)
'''
if __name__ == "__main__":
if os.path.exists(os.path.join(outputMASKpath,'dTemp.txt'))==True:
os.remove(os.path.join(outputMASKpath,'dTemp.txt'))
WD.run_inference_for_images(workPath,W_model_name,W_path_to_labels,inputPath,outputMASKpath,W_resizeRatio,W_scoreThreshold)
if os.path.exists(os.path.join(rootPath,"img_xml_namedict.txt"))==True:
os.remove(os.path.join(rootPath,"img_xml_namedict.txt"))
print("scoreThreshold:",scoreThreshold)
run_inference_for_images(workPath,model_name,path_to_labels,inputPath,outputXMLpath,DetectionWindow,stepRatio,scoreThreshold,1)
#ๅ ้คๆ ็จ็ๆไปถๅคน
#if os.path.exists(outputMASKpath)==True:
# shutil.rmtree(outputMASKpath) |
996,908 | 430257b0b41e9f8bcbcb34199ab78ffb2e8eb3ed | #! /usr/bin/env python3
import sys
input_file = "Baby_Shark.txt"
print("Output: Baby_Shark.txt")
with open(input_file, 'r', newline='') as filereader:
for row in filereader:
print("{}".format(row.strip()))
|
996,909 | c854d1a8b23e9c1d5ca26c3222ef60f02076cc3d | """Locally Selective Combination of Parallel Outlier Ensembles (LSCP).
Adapted from the original implementation.
"""
# Author: Zain Nasrullah <zain.nasrullah.zn@gmail.com>
# License: BSD 2 clause
# system imports
import collections
import warnings
# numpy
import numpy as np
# sklearn imports
from sklearn.neighbors import KDTree
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.validation import check_random_state
# PyOD imports
from pyod.models.base import BaseDetector
from pyod.utils.stat_models import pearsonr
from pyod.utils.utility import argmaxn
from pyod.utils.utility import generate_bagging_indices
from pyod.utils.utility import standardizer
from pyod.utils.utility import check_detector
# TODO: find random state that is causing runtime warning in pearson
class LSCP(BaseDetector):
""" Locally Selection Combination in Parallel Outlier Ensembles
LSCP is an unsupervised parallel outlier detection ensemble which selects
competent detectors in the local region of a test instance. This
implementation uses an Average of Maximum strategy. First, a heterogeneous
list of base detectors is fit to the training data and then generates a
pseudo ground truth for each train instance is generated by
taking the maximum outlier score.
For each test instance:
1) The local region is defined to be the set of nearest training points in
randomly sampled feature subspaces which occur more frequently than
a defined threshold over multiple iterations.
2) Using the local region, a local pseudo ground truth is defined and the
pearson correlation is calculated between each base detector's training
outlier scores and the pseudo ground truth.
3) A histogram is built out of pearson correlation scores; detectors in
the largest bin are selected as competent base detectors for the given
test instance.
4) The average outlier score of the selected competent detectors is taken
to be the final score.
See :cite:`zhao2019lscp` for details.
Parameters
----------
detector_list : List, length must be greater than 1
Base unsupervised outlier detectors from PyOD. (Note: requires fit and
decision_function methods)
local_region_size : int, optional (default=30)
Number of training points to consider in each iteration of the local
region generation process (30 by default).
local_max_features : float in (0.5, 1.), optional (default=1.0)
Maximum proportion of number of features to consider when defining the
local region (1.0 by default).
n_bins : int, optional (default=10)
Number of bins to use when selecting the local region
random_state : RandomState, optional (default=None)
A random number generator instance to define the state of the random
permutations generator.
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e.
the proportion of outliers in the data set. Used when fitting to
define the threshold on the decision function (0.1 by default).
Attributes
----------
decision_scores_ : numpy array of shape (n_samples,)
The outlier scores of the training data.
The higher, the more abnormal. Outliers tend to have higher
scores. This value is available once the detector is fitted.
threshold_ : float
The threshold is based on ``contamination``. It is the
``n_samples * contamination`` most abnormal samples in
``decision_scores_``. The threshold is calculated for generating
binary outlier labels.
labels_ : int, either 0 or 1
The binary labels of the training data. 0 stands for inliers
and 1 for outliers/anomalies. It is generated by applying
``threshold_`` on ``decision_scores_``.
Examples
--------
>>> from pyod.utils.data import generate_data
>>> from pyod.utils.utility import standardizer
>>> from pyod.models.lscp import LSCP
>>> from pyod.models.lof import LOF
>>> X_train, y_train, X_test, y_test = generate_data(
... n_train=50, n_test=50,
... contamination=0.1, random_state=42)
>>> X_train, X_test = standardizer(X_train, X_test)
>>> detector_list = [LOF(), LOF()]
>>> clf = LSCP(detector_list)
>>> clf.fit(X_train)
LSCP(...)
"""
def __init__(self, detector_list, local_region_size=30,
local_max_features=1.0, n_bins=10,
random_state=None, contamination=0.1):
super(LSCP, self).__init__(contamination=contamination)
self.detector_list = detector_list
self.n_clf = len(self.detector_list)
self.local_region_size = local_region_size
self.local_region_min = 30
self.local_region_max = 200
self.local_max_features = local_max_features
self.local_min_features = 0.5
self.local_region_iterations = 20
self.local_region_threshold = int(self.local_region_iterations / 2)
self.n_bins = n_bins
self.n_selected = 1
self.random_state = random_state
def fit(self, X, y=None):
"""Fit detector. y is ignored in unsupervised methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
# check detector_list
if len(self.detector_list) < 2:
raise ValueError("The detector list has less than 2 detectors.")
for detector in self.detector_list:
check_detector(detector)
# check random state and input
self.random_state = check_random_state(self.random_state)
X = check_array(X)
self._set_n_classes(y)
self.n_features_ = X.shape[1]
# normalize input data
self.X_train_norm_ = X
train_scores = np.zeros([self.X_train_norm_.shape[0], self.n_clf])
# fit each base detector and calculate standardized train scores
for k, detector in enumerate(self.detector_list):
detector.fit(self.X_train_norm_)
train_scores[:, k] = detector.decision_scores_
self.train_scores_ = train_scores
# set decision scores and threshold
self.decision_scores_ = self._get_decision_scores(X)
self._process_decision_scores()
return self
def decision_function(self, X):
"""Predict raw anomaly score of X using the fitted detector.
The anomaly score of an input sample is computed based on different
detector algorithms. For consistency, outliers are assigned with
larger anomaly scores.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only
if they are supported by the base estimator.
Returns
-------
anomaly_scores : numpy array of shape (n_samples,)
The anomaly score of the input samples.
"""
# check whether model has been fit
check_is_fitted(self, ['training_pseudo_label_', 'train_scores_',
'X_train_norm_', 'n_features_'])
# check input array
X = check_array(X)
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1}."
"".format(self.n_features_, X.shape[1]))
# get decision scores and return
decision_scores = self._get_decision_scores(X)
return decision_scores
def _get_decision_scores(self, X):
""" Helper function for getting outlier scores on test data X (note:
model must already be fit)
Parameters
----------
X : numpy array, shape (n_samples, n_features)
Test data
Returns
-------
pred_scores_ens : numpy array, shape (n_samples,)
Outlier scores for test samples
"""
# raise warning if local region size is outside acceptable limits
if (self.local_region_size < self.local_region_min) or (
self.local_region_size > self.local_region_max):
warnings.warn("Local region size of {} is outside "
"recommended range [{}, {}]".format(
self.local_region_size, self.local_region_min,
self.local_region_max))
# standardize test data and get local region for each test instance
X_test_norm = X
test_local_regions = self._get_local_region(X_test_norm)
# calculate test scores
test_scores = np.zeros([X_test_norm.shape[0], self.n_clf])
for k, detector in enumerate(self.detector_list):
test_scores[:, k] = detector.decision_function(X_test_norm)
# generate standardized scores
train_scores_norm, test_scores_norm = standardizer(self.train_scores_,
test_scores)
# generate pseudo target for training --> for calculating weights
self.training_pseudo_label_ = np.max(train_scores_norm,
axis=1).reshape(-1, 1)
# placeholder for ensemble predictions
pred_scores_ens = np.zeros([X_test_norm.shape[0], ])
# iterate through test instances (test_local_regions
# indices correspond to x_test)
for i, test_local_region in enumerate(test_local_regions):
# get pseudo target and training scores in local region of
# test instance
local_pseudo_ground_truth = self.training_pseudo_label_[
test_local_region,].ravel()
local_train_scores = train_scores_norm[test_local_region, :]
# calculate pearson correlation between local pseudo ground truth
# and local train scores
pearson_corr_scores = np.zeros([self.n_clf, ])
for d in range(self.n_clf):
pearson_corr_scores[d,] = pearsonr(
local_pseudo_ground_truth, local_train_scores[:, d])[0]
# return best score
pred_scores_ens[i,] = np.mean(
test_scores_norm[
i, self._get_competent_detectors(pearson_corr_scores)])
return pred_scores_ens
def _get_local_region(self, X_test_norm):
""" Get local region for each test instance
Parameters
----------
X_test_norm : numpy array, shape (n_samples, n_features)
Normalized test data
Returns
-------
final_local_region_list : List of lists, shape of [n_samples, [local_region]]
Indices of training samples in the local region of each test sample
"""
# Initialize the local region list
local_region_list = [[]] * X_test_norm.shape[0]
if self.local_max_features > 1.0:
warnings.warn(
"Local max features greater than 1.0, reducing to 1.0")
self.local_max_features = 1.0
if self.X_train_norm_.shape[1] * self.local_min_features < 1:
warnings.warn(
"Local min features smaller than 1, increasing to 1.0")
self.local_min_features = 1.0
# perform multiple iterations
for _ in range(self.local_region_iterations):
# if min and max are the same, then use all features
if self.local_max_features == self.local_min_features:
features = range(0, self.X_train_norm_.shape[1])
warnings.warn("Local min features equals local max features; "
"use all features instead.")
else:
# randomly generate feature subspaces
features = generate_bagging_indices(
self.random_state,
bootstrap_features=False,
n_features=self.X_train_norm_.shape[1],
min_features=int(
self.X_train_norm_.shape[1] * self.local_min_features),
max_features=int(
self.X_train_norm_.shape[1] * self.local_max_features))
# build KDTree out of training subspace
tree = KDTree(self.X_train_norm_[:, features])
# Find neighbors of each test instance
_, ind_arr = tree.query(X_test_norm[:, features],
k=self.local_region_size)
# add neighbors to local region list
for j in range(X_test_norm.shape[0]):
local_region_list[j] = local_region_list[j] + \
ind_arr[j, :].tolist()
# keep nearby points which occur at least local_region_threshold times
final_local_region_list = [[]] * X_test_norm.shape[0]
for j in range(X_test_norm.shape[0]):
final_local_region_list[j] = [item for item, count in
collections.Counter(
local_region_list[j]).items() if
count > self.local_region_threshold]
return final_local_region_list
def _get_competent_detectors(self, scores):
""" Identifies competent base detectors based on correlation scores
Parameters
----------
scores : numpy array, shape (n_clf,)
Correlation scores for each classifier (for a specific
test instance)
Returns
-------
candidates : List
Indices for competent detectors (for given test instance)
"""
# create histogram of correlation scores
scores = scores.reshape(-1, 1)
# TODO: handle when Pearson score is 0
# if scores contain nan, change it to 0
if np.isnan(scores).any():
scores = np.nan_to_num(scores)
if self.n_bins > self.n_clf:
warnings.warn(
"The number of histogram bins is greater than the number of "
"classifiers, reducing n_bins to n_clf.")
self.n_bins = self.n_clf
hist, bin_edges = np.histogram(scores, bins=self.n_bins)
# find n_selected largest bins
max_bins = argmaxn(hist, n=self.n_selected)
candidates = []
# iterate through bins
for max_bin in max_bins:
# determine which detectors are inside this bin
selected = np.where((scores >= bin_edges[max_bin])
& (scores <= bin_edges[max_bin + 1]))
# add to list of candidates
candidates = candidates + selected[0].tolist()
return candidates
def __len__(self):
return len(self.detector_list)
def __getitem__(self, index):
return self.detector_list[index]
def __iter__(self):
return iter(self.detector_list)
|
996,910 | 983096de8c3532c77e36392f27fff8f1c6fb1d3c | class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
from functools import reduce
from operator import mul
dic = {}
s = 1
# ans = []
if 0 in nums:
# ans.append(0)
ans = 0
else:
ans = nums[0]
for i in nums:
if not i:
s += 1
continue
try:
dic[s].append(i)
except:
dic[s] = [i]
for v in dic.values():
ret = reduce(mul, v)
# ans.append(ret)
if ret > ans:
ans = ret
if ret < 0:
for j in range(len(v)):
if v[j] < 0 and j < len(v) - 1:
ret = reduce(mul, v[j + 1:])
if ret > ans:
ans = ret
break
for k in range(len(v) - 1, 0, -1):
if v[k] < 0:
ret = reduce(mul, v[:k])
if ret > ans:
ans = ret
break
return ans
if __name__ == '__main__':
washing = Solution()
li = [0, 1, 43, 5, 0, 0, 4, 77, 2, 1, 0, -3, -23, -44, 0, 8]
# li = [0, 0, 0, 0, 0, 0, 0]
print(washing.maxProduct(li))
|
996,911 | 114f3282b9f14931eca27afb03101192269e423b | import random
class Card(object):
RANKS = ["A","2","3","4","5","6","7","8","9","10","J","Q","K"]
SUITS = ["C","S","H","D"]
def __init__(self, rank, suit, isFaceUp = True):
self.rank = rank
self.suit = suit
self.isFaceUp = isFaceUp
def __str__(self):
if self.isFaceUp:
return str(self.rank)+str(self.suit)
else:
return "XX"
def flipCard(self):
if self.isFaceUp:
self.isFaceUp = False
else:
self.isFaceUp = True
class Hand(object):
def __init__(self):
self.cards = []
def __str__(self):
if len(self.cards) == 0:
return "<Empty Hand>"
output = ""
for card in self.cards:
output += card.__str__() + "\t"
return output
def addCard(self, card):
self.cards.append(card)
def giveCard(self, card, otherHand):
if card in self.cards:
self.cards.remove(card)
otherHand.addCard(card)
def clearCards(self):
self.cards = []
class Deck(Hand):
def __init__(self):
super(Deck, self).__init__()
self.populate()
self.shuffle()
def populate(self):
for rank in Card.RANKS:
for suit in Card.SUITS:
self.addCard(Card(rank, suit))
def shuffle(self):
random.shuffle(self.cards)
def dealCard(self, hands, numberOfCards):
for i in range(numberOfCards):
for hand in hands:
if len(self.cards)!=0:
self.giveCard(self.cards[0], hand)
else:
break
|
996,912 | b0b065d767b7c4c4022d54fad49336fa4382429e | class Solution:
def rob(self, nums: List[int]) -> int:
"""
้ฆๅ
๏ผ้ฆๅฐพๆฟ้ดไธ่ฝๅๆถ่ขซๆข๏ผ้ฃไนๅชๅฏ่ฝๆไธ็งไธๅๆ
ๅต๏ผ
่ฆไน้ฝไธ่ขซๆข๏ผ่ฆไน็ฌฌไธ้ดๆฟๅญ่ขซๆขๆๅไธ้ดไธๆข๏ผ่ฆไนๆๅไธ้ดๆฟๅญ่ขซๆข็ฌฌไธ้ดไธๆขใ
"""
n = len(nums)
if n == 1: return nums[0]
return max(self.rob_range(nums, 0, n - 2), self.rob_range(nums, 1, n - 1))
def rob_range(self, nums: List[int], start, end) -> int:
nums = nums[start:end + 1]
if not nums:
return 0
size = len(nums)
if size == 1:
return nums[0]
first, second = nums[0], max(nums[0], nums[1])
for i in range(2, size):
first, second = second, max(first + nums[i], second)
return second
class Solution:
""":cvar
https://leetcode-cn.com/problems/house-robber-ii/solution/213-da-jia-jie-she-iidong-tai-gui-hua-jie-gou-hua-/
"""
def rob(self, nums: [int]) -> int:
def my_rob(nums):
cur, pre = 0, 0
for num in nums:
cur, pre = max(pre + num, cur), cur
return cur
return max(my_rob(nums[:-1]), my_rob(nums[1:])) if len(nums) != 1 else nums[0]
class Solution:
def rob(self, nums: List[int]) -> int:
##ๆฟๅญๆๆไบไธไธชๅๅ๏ผๅๅฆๆๆขไบ็ฌฌไธๅฎถ๏ผๅฐฑไธ่ฝๆขๆๅไธๅฎถ๏ผๅ ไธบ้ฆๅฐพ็ธ่ฟไบ๏ผๆไปฅ็ฌฌไธๅฎถๅๆๅไธๅฎถๅช่ฝๆขๅ
ถไธญ็ไธๅฎถ๏ผๆ่
้ฝไธๆข๏ผ
##้ฃ่ฟ้ๅ้ไธไธ๏ผๅฆๆๆ็ฌฌไธๅฎถๅๆๅไธๅฎถๅๅซๅปๆ๏ผๅ็ฎไธ้่ฝๆข็ๆๅคงๅผ๏ผ็ถๅๆฏ่พไธคไธชๅผๅๅ
ถไธญ่พๅคง็ไธไธชๅณไธบๆๆฑใ
def robOne(self, numsOne: List[int]) -> int:
# 198้ข๏ผๆๅฎถๅซ่ไธ็ไปฃ็
##dp[i] ่กจ็คบ [0, i] ๅบ้ดๅฏไปฅๆขๅคบ็ๆๅคงๅผ๏ผๅฏนๅฝๅiๆฅ่ฏด๏ผๆๆขๅไธๆขไธค็งไบๆฅ็้ๆฉ๏ผไธๆขๅณไธบ dp[i-1]๏ผ็ญไปทไบๅปๆ nums[i] ๅชๆข [0, i-1] ๅบ้ดๆๅคงๅผ๏ผ๏ผๆขๅณไธบ dp[i-2] + nums[i]๏ผ็ญไปทไบๅปๆ nums[i-1]๏ผใๅณ้ๆฉไธค่
ไธญๆๅคงๅผไธบdp[i]
dp = [0 for _ in numsOne]
if len(numsOne) == 1:
return numsOne[0]
dp[0] = numsOne[0]
dp[1] = max(numsOne[0], numsOne[1])
for i in range(2, len(numsOne), 1):
dp[i] = max(dp[i - 1], dp[i - 2] + numsOne[i])
return dp[-1]
if not nums: # ็ฉบๆฐ็ป
return 0
if len(nums) == 1:
return nums[0]
nums_0 = nums[1:]
nums_n = nums[:len(nums) - 1]
return max(robOne(self, nums_n), robOne(self, nums_0))
|
996,913 | efd2a8d8299907d5dfebd31e7697e5e203aa0941 | class Solution(object):
def maximumUniqueSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
ans = 0
l = s = 0
mem = {}
n = len(nums)
for r in range(n):
s += nums[r]
while l < r and nums[r] in mem:
s -= nums[l]
del mem[nums[l]]
l += 1
mem[nums[r]] = r
ans = max(ans, s)
return ans
# ๆ่ทฏ๏ผๆปๅจ็ชๅฃ |
996,914 | 27ff80c4c6f90ba3bf36ad514095afae3956875a | """Creates a dictionary given fastalign's outputs
Author: Antonios Anastasopoulos <aanastas@andrew.cmu.edu>
"""
import argparse
from collections import Counter
import string
def align(textfile, alignmentfile, l1, l2, N):
outputfile=f"dict.{l1}-{l2}"
# Read the text and the alignment files
with open(textfile, mode='r', encoding='utf-8') as inp:
lines = inp.readlines()
with open(alignmentfile, mode='r') as inp:
allines = inp.readlines()
assert(len(lines) == len(allines))
# Get counts over aligned word pairs
d = {}
allwords = {}
for line, al in zip(lines[:N], allines[:N]):
try:
sents = line.strip().lower().split('|||')
leftside = sents[0].strip().split()
rightside = sents[1].strip().split()
als = [(int(k.split('-')[0]), int(k.split('-')[1])) for k in al.strip().split()]
for i,j in als:
if leftside[i] in d:
if rightside[j] in d[leftside[i]]:
d[leftside[i]][rightside[j]] += 1
else:
d[leftside[i]][rightside[j]] = 1
else:
d[leftside[i]] = {}
d[leftside[i]][rightside[j]] = 1
for w in leftside:
if w in allwords:
allwords[w] += 1
else:
allwords[w] = 1
except:
pass
# Allow different alignment probability thresholds for different word-pair occurence counts
# TODO(): This should probably be tuned to the smaller amount of data we have
count_thresholds = [20, 5, 2]
prob_thresholds = [0.5, 0.6, 0.9]
# Write out the word pairs with probabilities above the thresholds
with open(f"{outputfile}.txt", 'w') as outall:
N = len(allwords)
print(N)
#N = 40
counter = Counter(allwords)
for word, count in counter.most_common(N):
if word in d and (not any(c in string.punctuation for c in word)):
for trans in d[word]:
if trans and (not any(c in string.punctuation for c in trans)):
for c_t,p_t in zip(count_thresholds,prob_thresholds):
if count > c_t:
if d[word][trans] >= p_t * count:
outall.write(f"{word}\t{trans}\n")
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "-input_dir", help="input text", type=str)
parser.add_argument("-a", "-alignment", help="input alignment", type=str)
parser.add_argument("-l1", "-l1", help="l1", type=str)
parser.add_argument("-l2", "-l2", help="l2 file", type=str)
parser.add_argument("-n", "-number", default=1000000, help="number of lines to use", type=int)
args = parser.parse_args()
align(args.i, args.a, args.l1, args.l2, args.n)
|
996,915 | 4e2f9a63a9bfa9f61fa28a5bf0952acb76dd32a3 | from .base import *
ALLOWED_HOSTS = ['101.101.219.148'] |
996,916 | 8cbb595886461c6bb2b47f4330eeacc426a014d6 | from typing import List
class Solution:
def findShortestSubArray(self, nums: List[int]) -> int:
hashmap = {}
for n in nums:
if n in hashmap:
hashmap[n] += 1
else:
hashmap[n] = 1
maximum = max(hashmap.values())
max_nums = []
for key, value in hashmap.items():
if value == maximum:
max_nums.append(key)
minimum = len(nums)
for max_num in max_nums:
idx1 = 0
idx2 = len(nums) - 1
while nums[idx1] != max_num:
idx1 += 1
while nums[idx2] != max_num:
idx2 -= 1
size = idx2 - idx1 + 1
minimum = min(minimum, size)
return minimum
if __name__ == "__main__":
print(Solution().findShortestSubArray([1, 2, 2, 3, 1])) # 2
print(Solution().findShortestSubArray([1, 2, 2, 3, 1, 4, 2])) # 6
|
996,917 | 4b34b685512657cf9be2b555d82eca60c6ba0d2a | #!/usr/bin/env python
# table auto-generator for zling.
# author: Zhang Li <zhangli10@baidu.com>
kBucketItemSize = 4096
matchidx_blen = [0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7] + [8] * 1024
matchidx_code = []
matchidx_bits = []
matchidx_base = []
while len(matchidx_code) < kBucketItemSize:
for bits in range(2 ** matchidx_blen[len(matchidx_base)]):
matchidx_code.append(len(matchidx_base))
matchidx_base.append(len(matchidx_code) - 2 ** matchidx_blen[len(matchidx_base)])
f_blen = open("ztable_matchidx_blen.inc", "w")
f_base = open("ztable_matchidx_base.inc", "w")
f_code = open("ztable_matchidx_code.inc", "w")
for i in range(0, matchidx_base.__len__()):
f_blen.write("%4u," % matchidx_blen[i] + "\n\x20" [int(i % 16 != 15)])
f_base.write("%4u," % matchidx_base[i] + "\n\x20" [int(i % 16 != 15)])
for i in range(0, matchidx_code.__len__()):
f_code.write("%4u," % matchidx_code[i] + "\n\x20" [int(i % 16 != 15)])
|
996,918 | cee551fc70f7c5e1388692cf9227f4bd17640f05 | from collections import deque
import sys
def bfs(s, trail):
q = deque()
q.append((s, 0))
trail[s] = 0
while q:
p, step = q.popleft()
for np in edge[p]:
nstep = step + 1
if trail[np] > nstep:
q.append((np, nstep))
trail[np] = nstep
N = int(input())
edge = [[] for _ in range(N)]
for s in sys.stdin.readlines():
a, b = map(int, s.split())
edge[a - 1].append(b - 1)
edge[b - 1].append(a - 1)
INF = 10 ** 9
path0 = [INF] * N
pathN = [INF] * N
bfs(0, path0)
bfs(N - 1, pathN)
fennec = 0
snuke = 0
for i in range(N):
if path0[i] <= pathN[i]:
fennec += 1
else:
snuke += 1
ans = 'Fennec' if fennec > snuke else 'Snuke'
print(ans)
|
996,919 | bc30dc5c2b523bbb7a02e58ad5850e6a987a6a7a | from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
import uuid
import os
# Create your models here.
class Course(models.Model):
coursename= models.CharField(max_length=100, blank=True, null=True)
description=models.CharField(max_length=500, blank=True, null=True)
course_ka_photo = models.ImageField(default='media/lol.jpg', blank=True, null=True)
def __str__(self):
return self.coursename
class Student(models.Model):
def get_upload_to(instance, filename):
upload_to = 'user_avatars'
ext = filename.split('.')[-1]
if instance.pk:
filename = '{}.{}'.format(instance.pk, ext)
else:
filename = '{}.{}'.format(uuid.uuid4().hex, ext)
return upload_to + '/' + filename
username=models.IntegerField(validators=[MinValueValidator(0),MaxValueValidator(1000000000000)], blank=True, null=True,unique=True)
name = models.CharField(max_length=50)
email = models.EmailField()
course=models.ForeignKey(Course,on_delete=models.CASCADE, null=True)
avatar = models.ImageField(upload_to=get_upload_to, default='user_avatars/default-avatar.jpg', blank=False, null=False)
def __str__(self):
return str(self.username)
class Subject(models.Model):
subjectname=models.CharField(max_length=50,blank=True,null=True)
course=models.ForeignKey(Course,on_delete=models.CASCADE)
def __str__(self):
return self.subjectname
class Student_attendance(models.Model):
student=models.ForeignKey(Student, on_delete=models.CASCADE)
attendance=models.IntegerField(validators=[MinValueValidator(0),MaxValueValidator(100)], blank=True, null=True)
def __str__(self):
return str(self.attendance)
class Follow(models.Model):
follower = models.ForeignKey(Student, related_name="person_following")
followed = models.ForeignKey(Student, related_name="person_followed")
time = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.follower) + "-" + str(self.followed)
|
996,920 | c3a3391a849fb40a225415a1778a96af61c45f50 | # coding: utf-8
from boost_collections.zskiplist.zskiplist_level import ZskiplistLevel
class ZskiplistNode(object):
def __init__(self, level, score, ele=None):
super(ZskiplistNode, self).__init__()
self.ele = ele
self.score = score
self.backward = None
self.level = [ZskiplistLevel() for _ in range(0, level)]
|
996,921 | fd2701ae304e6c2b046ef3ae3c00c9807d816882 | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse
from django.template import loader
from django.views import View
from .models import *
from django.core.exceptions import ValidationError
from django.contrib import messages
from django.views.decorators.csrf import csrf_protect
from django.utils.decorators import method_decorator
from django.contrib.auth import logout as site_logout
from django.utils import timezone
from django.template.loader import render_to_string
from .CCF_Posts import CCFilterPosts
from .CCF_Ads import CCFilterAds
try:
from django.utils import simplejson as json
except ImportError:
import json
# Create your views here.
class welcome(View):
template_name = 'PNapp/welcome.html'
def get(self,request):
return render(request, self.template_name)
@method_decorator(csrf_protect)
def post(self,request):
#post request for Login existing user
if request.POST['button'] == "Login":
#process post request
email = request.POST['email']
password = request.POST['password']
#querry for user by email
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
messages.info(request, "User with email: "+email+" does not exist")
return render(request, self.template_name)
#autheniticate password
if User.autheniticate(user,password):
#create session for this user
request.session['user_pk'] = user.id
return redirect('index/')
else:
messages.info(request, "Wrong Password")
return render(request, self.template_name)
#post request for Registering a new user
elif request.POST['button'] == "Register":
#process requset
name = request.POST['name']
surname = request.POST['surname']
email = request.POST['email']
password = request.POST['password']
confirm = request.POST['confirm']
if password == confirm:
#create user
u = User(name=name, surname=surname, email=email, password=password)
#validate the model before saving
try:
u.full_clean()
except ValidationError as v:
messages.info(request, "ValidationError:"+str(v.message_dict))
return render(request, self.template_name)
#save and redirect
u.save()
request.session['user_pk'] = u.id
return redirect('index/')
else:
messages.info(request, "Passwords don't match")
return render(request, self.template_name)
class index(View):
template_name = 'PNapp/index.html'
def get(self, request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
#get the posts for this users newsfeed ordere by Colab.Cluster.Filter
posts_filtered = CCFilterPosts(user)
#get 9-18 connections to display a portion of the network
connections = Connection.objects.filter(receiver=user,accepted=True) | Connection.objects.all().filter(creator=user,accepted=True)
friends = []
for conn in connections[:9]:
if conn.creator == user:
friends.append(conn.receiver)
else:
friends.append(conn.creator)
context = {'user':user,'friends':friends, 'posts_list':posts_filtered,'template_name':"index",}
return render(request, self.template_name, context=context)
#Since we user jquery/ajax this is depreciated. Only in case js is disabled.
def post(self, request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
context = {'user':user,}
if request.POST.get("button", False):
#if user posted a new post
if request.POST["button"] == "Submit status":
status = request.POST['status']
p = Post(creator=user, creation_date=timezone.now(), text=status)
#validate the model before saving
try:
p.full_clean()
except ValidationError as v:
messages.info(request, "ValidationError:"+str(v.message_dict))
return render(request, self.template_name)
#save and redirect
p.save()
return redirect('/index/')
#if user posted new comment
if request.POST.get("comment-button", False):
post_id = request.POST["comment-button"]
post = Post.objects.get(pk=post_id)
text = request.POST['comment']
c= Comment(creator=user, post_id=post, text=text, creation_date=timezone.now())
try:
c.full_clean()
except ValidationError as v:
messages.info(request, "ValidationError:"+str(v.message_dict))
return render(request, self.template_name)
#save and redirect
c.save()
return redirect('/index/')
return render(request, self.template_name)
def logout(request):
#delete any sessions and cookies
site_logout(request)
#return to welcome page
return redirect('/')
class profile(View):
template_name = 'PNapp/profile.html'
def get(self, request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
context = {'user':user,'template_name':"profile",}
return render(request, self.template_name, context=context)
def post(self, request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
# If user pressed save his new details
if request.POST["button"] == "Save Changes":
# Make the changes he did
user.name = request.POST['name']
user.surname = request.POST['surname']
user.phone = request.POST['phone']
user.university = request.POST['university']
user.degree_subject = request.POST['degree_subject']
user.company = request.POST['company']
user.position = request.POST['position']
#update skills
for skill_name in request.POST.getlist('skill'):
if (not skill_name.isspace()) and (skill_name): #whitepsace only not allowed
skill_name = skill_name.strip().lower() #remove leading/trailing whitespace and only lowercase
try:
skill = Skill.objects.get(name=skill_name)
except Skill.DoesNotExist:
skill = Skill.objects.create(name=skill_name)
user.skills.add(skill)
#check privacy changes
self.UpdatePrivacy(request,user)
#check if profile photo changes
self.UpdateProfilePhoto(request,user)
try:
user.full_clean()
except ValidationError as v:
messages.info(request, "ValidationError:"+str(v.message_dict))
return render(request, self.template_name)
user.save()
messages.success(request, "Info updated successfully.")
return redirect('/profile/')
return render(request, self.template_name)
def UpdatePrivacy(self,request,user):
if request.POST.get("phone_privacy", False):
user.phone_public = True
else:
user.phone_public = False
if request.POST.get("university_privacy", False):
user.university_public = True
else:
user.university_public = False
if request.POST.get("degree_subject_privacy", False):
user.degree_subject_public = True
else:
user.degree_subject_public = False
if request.POST.get("company_privacy", False):
user.company_public = True
else:
user.company_public = False
if request.POST.get("position_privacy", False):
user.position_public = True
else:
user.position_public = False
if request.POST.get("skills_privacy", False):
user.skills_public = True
else:
user.skills_public = False
def UpdateProfilePhoto(self,request,user):
if request.FILES.get('image-file',False):
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.utils import timezone
import datetime
#get image
myfile = request.FILES['image-file']
#save image
fs = FileSystemStorage()
now = datetime.datetime.now()
filename = fs.save('profpics/'+now.strftime("%Y/%m/%d//")+str(myfile.name), myfile)
#change image url in db
user.profile_photo = fs.url(filename).replace('media/','')
class network(View):
template_name = 'PNapp/network.html'
def get(self, request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
context = {'user':user,'friends':user.get_friends(),'template_name':"network",}
return render(request, self.template_name, context=context)
class mymessages(View):
template_name = 'PNapp/messages.html'
def get(self, request, conversation_pk=-1):
user = UserSessionCheck(request)
if not user:
return redirect('/')
#get conversations
conversations = user.get_conversations()
if conversations is not None:
#get target conversation
if conversation_pk == -1: #default to first convo
target_conversation = conversations.first()
else:
target_conversation = Conversation.objects.get(id=conversation_pk)
if target_conversation is not None:
context = { 'user':user,
'conversations':conversations,
'target_conversation':target_conversation,
'messages':target_conversation.get_messages(),
'template_name':"messages",}
return render(request, self.template_name, context=context)
context = {'template_name':"messages",}
return render(request, self.template_name, context=context)
#depreciated view since we use jquery/ajax. Only in case js is disabled.
def post(self, request, conversation_pk=-1):
user = UserSessionCheck(request)
if not user:
return redirect('/')
#new message in chat
if 'message'in request.POST:
text=request.POST['message']
Message.objects.create(text=text,creator=user,conversation=get_object_or_404(Conversation, pk=conversation_pk))
return redirect('/messages/'+str(conversation_pk))
#new message from overview (convo might not exist)
if 'send message' in request.POST:
target_user=User.objects.get(id=request.POST['send message'])
#find the conversation between these two
conversation=Conversation.objects.filter(creator=user,receiver=target_user)\
| Conversation.objects.filter(creator=target_user,receiver=user)
if not conversation:
#conversation doesnt exist, create
conversation=Conversation.objects.create(creator=user,receiver=target_user)
return redirect('/messages/'+str(conversation.id))
return redirect('/messages/'+str(conversation.first().id))
class search(View):
template_name = 'PNapp/search.html'
def get(self, request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
query = request.GET["search_text"]
#if any word of the query is either a name or a surname then add user to set (not case-sensitive)
users = set()
for str in query.split():
result = User.objects.filter(name__icontains=str) | User.objects.filter(surname__icontains=str)
users.update(set(result))
context = {'users':users,}
return render(request, self.template_name, context=context)
class overview(View):
template_name = 'PNapp/overview.html'
def get(self, request, pk):
user = UserSessionCheck(request)
if not user:
return redirect('/')
target_user = User.objects.get(id=pk)
#get status of friendship(none,connected,request_exists) in order to decide the context of add button
connected_users = Connection.objects.filter(creator=user,receiver=target_user,accepted=True).exists() | Connection.objects.filter(creator=target_user,receiver=user,accepted=True).exists()
request_exists = Connection.objects.filter(creator=user,receiver=target_user).exists() | Connection.objects.filter(creator=target_user,receiver=user).exists()
context = { 'user':user,
'target_user':target_user,
'friends': target_user.get_friends(),
'connected_users':connected_users,
'request_exists':request_exists}
return render(request, self.template_name, context)
#depreciated because of jquery/ajax
def post(self, request, pk):
userid = request.POST['add user']
receiver = User.objects.get(id=userid)
try:
creator = User.objects.get(id=request.session['user_pk'])
except KeyError: #user not logged in
return redirect('/')
#if 'add user' in request.POST:
conn = Connection.objects.create(creator=creator,receiver=receiver,accepted=False)
friends = creator.get_friends() #get all target_user's friends
#get new context
context = {'target_user':receiver,'friends':friends, 'connected_users':False,'request_exists':True}
return render(request, self.template_name, context=context)
class settings(View):
template_name = 'PNapp/settings.html'
def get(self, request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
context = {'user':user,'template_name':"settings",}
return render(request, self.template_name, context=context)
def post(self, request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
context = {'user':user,'template_name':"settings",}
# If user choose to save his new credentials
new_email = request.POST['email']
if request.POST["button"] == "Save Changes":
# If the submitted email is not the one that user had until now
if user.email != new_email:
# If the new email is already used
if User.objects.filter(email=new_email).exists():
# Then show message that user with that email already exists
messages.info(request, "User with email: " + new_email + " already exists.")
return render(request, self.template_name)
# If password is different from the password's confirmation
if request.POST['password'] != request.POST['cpassword']:
messages.info(request, "Passwords should be the same.")
return render(request, self.template_name)
# Make the changes he did
user.email = request.POST['email']
if request.POST.get("email_privacy", False):
user.email_public = True
else:
user.email_public = False
user.password = request.POST['password']
try:
user.full_clean()
except ValidationError as v:
messages.info(request, "ValidationError:"+str(v.message_dict))
return render(request, self.template_name)
user.save()
messages.success(request, "Changes made successfully.")
return redirect('/settings/')
return render(request, self.template_name, context=context)
class advertisments(View):
template_name = 'PNapp/advertisments.html'
def get(self, request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
ads = CCFilterAds(user) #USE CCF HERE to sort ads
context = { 'template_name':"advertisments",
'ads': ads,
'user': user,}
return render(request, self.template_name, context=context)
class notifications(View):
template_name ='PNapp/notifications.html'
def get(self, request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
context = {'template_name': "notifications",
'friend_requests': user.get_friend_requests(),
'notifications': user.get_notifications(),
'user':user,
}
return render(request, self.template_name, context=context)
##################AJAX VIEWS#############################################
from django.views.decorators.csrf import csrf_exempt
def interest(request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
#get post with pid
postid = request.POST['postid']
post = get_object_or_404(Post, id=postid)
#check if this user already expressed interest in this post
if not Interest.objects.filter(creator=user,post=post).exists():
Interest.objects.create(creator=user,post=post,creation_date=timezone.now())
return JsonResponse({'total_interests': post.total_interests()})
else:
return JsonResponse({"error":"User already interested."})
def friend_request(request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
#got a accept/reject on a friendship requets?
friend_request = Connection.objects.get(id=request.POST['fr_id'])
if request.POST["action"] == "Accept":
friend_request.accepted = True
friend_request.save()
elif request.POST["action"] == "Reject":
friend_request.delete()
return JsonResponse({})
def new_message(request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
#Create the new message
conversation = get_object_or_404(Conversation, id=request.POST["convo_id"])
Message.objects.create(text=request.POST["message"],creator=user,creation_date=timezone.now(),conversation=conversation)
return JsonResponse({"user_id":user.id, "profile_photo_url":user.profile_photo.url})
def new_ad(request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
#create a new ad
ad = Advertisment.objects.create(title=request.POST['title'], creator=user, details=request.POST['details'], creation_date=timezone.now())
for skill in json.loads(request.POST['skills']):
if (not skill.isspace()) and (skill): #whitepsace only not allowed
skill = skill.strip().lower() #remove leading/trailing whitespace and only lowercase
if not Skill.objects.filter(name=skill).exists():
Skill.objects.create(name=skill)
ad.skills.add(skill)
return render(request,"PNapp/ad.html",context={"ad":ad,"user":user})
def ad_apply(request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
try:
ad = Advertisment.objects.get(id=request.POST['ad_id'])
if user in ad.applicants.all():
return JsonResponse({"message":"already applied"})
else:
ad.applicants.add(user)
return JsonResponse({"message":"successfully applied"})
except KeyError:
return JsonResponse({"message":"couldnt find ad"})
def post_submit(request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
status = request.POST['status']
if status.isspace():
return HttpResponse("")
post = Post.objects.create(creator=user, creation_date=timezone.now(), text=status)
return render(request,"PNapp/post.html",context={"post":post})
def comment_submit(request):
user = UserSessionCheck(request)
if not user:
return redirect('/')
post = get_object_or_404(Post,id=request.POST["post_id"])
text = request.POST['comment']
if text.isspace():
return HttpResponse("")
c = Comment.objects.create(creator=user, post_id=post, text=text, creation_date=timezone.now())
data = '<div class="comment"><a class="comment-avatar pull-left" href="/overview/'+str(user.id)+\
'"><img src="'+str(user.profile_photo.url)+'"></a><div class="comment-text">'+\
text+'</div></div>'
return HttpResponse(data)
################ MICS ########################################################
def UserSessionCheck(request):
#get current user's details and check if he is logged in indeed
try:
return User.objects.get(id=request.session['user_pk'])
except KeyError:
return None
|
996,922 | c5505e4a67d3dbf6ea2b6524ffbfd7fba179ca8a |
import SimpleITK as sitk
def create_composite(dim, transformations):
"""
Creates a composite sitk transform based on a list of sitk transforms.
:param dim: The dimension of the transformation.
:param transformations: A list of sitk transforms.
:return: The composite sitk transform.
"""
compos = sitk.Transform(dim, sitk.sitkIdentity)
for transformation in transformations:
compos.AddTransform(transformation)
return compos
def flipped_dimensions(transformation, size):
"""
Heuristically checks for flipped dimensions. Checks for changes in sign for each dimension.
:param transformation: The sitk transformation.
:param size: The size to check, usually the image size.
:return: List of booleans for each dimension, where True indicates a flipped dimension.
"""
dim = len(size)
# transform start point
start = [0.0] * dim
transformed_start = transformation.TransformPoint(start)
flipped = [False] * dim
for i in range(dim):
# set current end point and transform it
end = [0.0] * dim
end[i] = size[i]
transformed_end = transformation.TransformPoint(end)
# check, if transformed_start and transformed_end changed position
flipped[i] = transformed_start[i] > transformed_end[i]
return flipped
|
996,923 | 8c6309f7e150d0ded0d1c06031b0c65158e87df6 | #!/usr/bin/env python3
#coding:utf-8
class LNode(object):
def __init__(self, x=None):
self.val = x
self.next = None
def isLoop(head):
"""
ๆนๆณๅ่ฝ๏ผๅคๆญๅ้พ่กจๆฏๅฆๆ็ฏ
่พๅ
ฅๅๆฐ๏ผhead: ้พ่กจๅคด็ป็น
่ฟๅๅผ๏ผ่ฅๆ ็ฏ๏ผ่ฟๅ None๏ผ่ฅๆ็ฏ๏ผ่ฟๅๅ
ฅ็ฏ็ป็น
"""
if head is None or head.next is None:
return None
hash_set = set()
cur = head.next
while cur:
if cur in hash_set:
return (True, cur.val)
else:
hash_set.add(cur)
cur = cur.next
return (False, cur)
def constructLinkedList(n):
"""
ๆนๆณๅ่ฝ๏ผๅๅปบๅ้พ่กจ
่พๅ
ฅๅๆฐ๏ผn: ้็ฉบ็ป็น็้ฟๅบฆ
"""
head = LNode()
cur = head
for i in range(n):
tmp = LNode()
tmp.val = i
cur.next = tmp
cur = tmp
return head
def constructLinkedListHasRing(n):
"""
ๆนๆณๅ่ฝ๏ผๅๅปบๆ็ฏๅ้พ่กจ
่พๅ
ฅๅๆฐ๏ผn: ้็ฉบ็ป็น็้ฟๅบฆ
"""
mid = n // 2
head = LNode()
cur = head
for i in range(n):
tmp = LNode()
tmp.val = i
cur.next = tmp
cur = tmp
if i == mid:
m = cur
cur.next = m
return head
if __name__ == "__main__":
head1 = constructLinkedList(8)
head2 = constructLinkedListHasRing(8)
print(isLoop(head1))
print(isLoop(head2))
|
996,924 | 1f632bfa31612f6d0512f2159e23350d4b7a728a | from django.shortcuts import render
# Create your views here.
import django_filters
from rest_framework import viewsets, filters
from rest_framework import status
from rest_framework.response import Response
from .models import Condition, Entry
from .serializer import ConditionSerializer, EntrySerializer
class ConditionViewSet(viewsets.ModelViewSet):
queryset = Condition.objects.all()
serializer_class = ConditionSerializer
#print('message 100')
def create(self, request, *args, **kwargs):
conditions = request.data
is_many = isinstance(conditions, list)
if not is_many:
#import pdb; pdb.set_trace()
return super(ConditionViewSet, self).create(request, *args, **kwargs)
else:
# site-packages\rest_framework\mixins.py class CreateModelMixin(object):
for condition in conditions:
serializer = self.get_serializer(data=condition)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(condition)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class EntryViewSet(viewsets.ModelViewSet):
# https://stackoverflow.com/questions/33866396/django-rest-framework-json-array-post
# Django REST framework JSON array post
# https://stackoverflow.com/questions/19253363/named-json-array-in-django-rest-framework
# Named JSON array in Django REST Framework
# https://stackoverflow.com/questions/45917656/bulk-create-using-listserializer-of-django-rest-framework
# bulk create using ListSerializer of Django Rest Framework
queryset = Entry.objects.all()
serializer_class = EntrySerializer
def create(self, request, *args, **kwargs):
entries = request.data
is_many = isinstance(entries, list)
if not is_many:
return super(EntryViewSet, self).create(request, *args, **kwargs)
else:
for entry in entries:
serializer = self.get_serializer(data=entry)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(entry)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
from django.views import generic
class ConditionListView(generic.ListView):
model = Condition
paginate_by = 20
# ใฏใใใฆใฎ Django ใขใใชไฝๆใใใฎ 3 | Django documentation | Django
# https://docs.djangoproject.com/ja/1.11/intro/tutorial03/
from django.http import HttpResponse
def index(request):
latest_condition_list = Condition.objects.order_by('-created_at')
output = ', '.join([c.description for c in latest_condition_list])
return HttpResponse(output)
# ใฏใใใฆใฎ Django ใขใใชไฝๆใใใฎ 4 | Django documentation | Django
# https://docs.djangoproject.com/ja/1.11/intro/tutorial04/
# genericใไฝฟใฃใListViewใฏ <app>/<model>_list.html ใจใใdefaultใฎtemplateใไฝฟใ
# template_name ใงๆๅฎใงใใ
# ListViewใฏใ่ชๅ็ใซ็ๆใใใใณใณใใญในใๅคๆฐใฏ <model>_list ใซใชใใพใใ
# context_object_name ๅฑๆงใไธใใใจๆๅฎใงใใ
from django.views import generic
class ConditionListView(generic.ListView):
# template_name = 'app_name/index.html'
# context_object_name = 'latest_question_list'
model = Condition
def get_queryset(self):
return Condition.objects.order_by('-created_at')
# return Condition.objects.order_by('-created_at')[:5]
class ConditionDetailView(generic.DetailView):
model = Condition
# def get_queryset(self):
# return Condition.objects.filter(condition.id=pk)
class SerialListView(generic.ListView):
model = Condition
template_name = 'meas/serial_list.html'
def get_queryset(self):
return Condition.objects.values('serial').distinct()
class SeriesListView(generic.ListView):
model = Condition
template_name = 'meas/series_list.html'
def get_queryset(self):
#import pdb; pdb.set_trace()
return Condition.objects.values('series', 'description').distinct()
class UlidListView(generic.ListView):
model = Condition
template_name = 'meas/ulid_list.html'
#def get_queryset(self):
# return Condition.objects.values('ulid').distinct()
# get 1 object, filter multi objects
# from django.shortcuts import get_object_or_404
def SeriesDetailView(request, series_id):
condition = Condition.objects.filter(series=series_id)
return render(request, 'meas/series_detail.html', {'condition': condition})
def SerialDetailView(request, pk):
condition = Condition.objects.filter(serial=pk)
#condition = get_object_or_404(Condition, serial = pk)
return render(request, 'meas/serial_detail.html', {'condition': condition})
#return render(request, 'meas/series_list.html', {'condition': condition})
# one ULID has one condition
def UlidDetailView(request, ulid):
# Object output
#condition = Condition.objects.get(ulid=ulid)
condition = get_object_or_404(Condition, ulid=ulid)
# Queryset output
#condition = Condition.objects.filter(ulid=ulid)
entry = Entry.objects.filter(ulid=ulid)
power = Entry.objects.filter(ulid=ulid, item='OpticalPower')
ber = Entry.objects.filter(ulid=ulid, item='Pre-FEC_ber')
#import pdb; pdb.set_trace()
#return render(request, 'meas/ulid_detail.html', {'condition': condition})
return render(request, 'meas/ulid_detail.html', {'condition': condition, 'entry': entry, 'power': power, 'ber': ber})
from django.http import HttpResponse
from django.template import Context, loader
def Serial01Index(request):
#return HttpResponse("Serial 01 Index")
object_list = Condition.objects.values('serial').distinct()
template = loader.get_template('meas/serial_list.html')
context = ({'condition_list': object_list,})
return HttpResponse(template.render(context))
def Serial01Detail(request, sid):
return HttpResponse("Serial 01 Detail")
from django.shortcuts import get_object_or_404, render_to_response
def Serial02Index(request):
#return HttpResponse("Serial 02 Index")
object_list = Condition.objects.values('serial').distinct()
return render_to_response('meas/serial_list.html', {'condition_list': object_list})
def Serial02Detail(request, serial_id):
#return HttpResponse("Serial 02 Detail")
condition = Condition.objects.filter(serial=serial_id)
return render_to_response('meas/serial_detail.html',{'condition': condition})
class EntryListView(generic.ListView):
model = Entry
paginate_by = 20
class EntryDetailView(generic.DetailView):
model = Entry |
996,925 | 360866ae533bc88c058dbf80e6e59605e5d6d7b5 | import datetime
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
class AddForm(forms.Form):
search = forms.CharField(max_length = 50, widget=forms.TextInput(attrs={'class': 'form-control'}))
def clean_search(self):
clean_search = self.cleaned_data['search']
if len(clean_search) > 50:
raise ValidationError('The input is too big', code = 'invalid')
return clean_search
class SubmitForm(forms.Form):
code = forms.CharField(max_length = 10, widget=forms.TextInput(attrs={'class': 'form-control'}))
description = forms.CharField(max_length = 50, widget=forms.TextInput(attrs={'class': 'form-control'}))
quantity = forms.CharField(max_length = 3, widget=forms.TextInput(attrs={'class': 'form-control'}))
def clean_search(self):
clean_search = self.cleaned_data['search']
if len(clean_search) > 25:
raise ValidationError('The input is too big', code = 'invalid')
return clean_search
|
996,926 | 26dd4af69534d57c9f1138e8c9c1fe19ad46974b | # -*- coding: utf-8 -*-
from __future__ import division
import math
#COMECE SEU CODIGO AQUI
a=int(input('digite um numero: '))
b=20
c=10
d=5
e=2
f=1
g=(a%b)
if g==0
print a/b
else (g%c)/10
if (g%c)/10 == 0
print g/b
else (g
|
996,927 | f85aee2b1ea851122ddf645ff8ca8c0f38c1f3fe | #!/usr/bin/python
# Example use:
# ~ $ percent 26,943,452,560 27,089,972,296
# +146519736
# +0.543804605864%
import sys
def remove_commas(str):
return str.replace(",", "")
before = float(remove_commas(sys.argv[1]))
after = float(remove_commas(sys.argv[2]))
diff = after - before
if diff > 0:
sign = "+"
else:
sign = ""
print(sign + str(diff))
print(sign + str((float(diff) / float(before)) * 100.0) + "%")
|
996,928 | f2e0d70e41381839039f9280b3a09905cb85f7c1 | import datetime
from project.server import db
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Answer(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
comments = db.Column(db.String(255))
question_id = db.Column(db.Integer)
section_id = db.Column(db.Integer)
attachments = db.relationship('Attachments', backref='answer', lazy=True) |
996,929 | 609f6ecb4194c6118f4eb1bfc30812a1fc2987bd | import mysql.connector
import csv
import time
#---------------------------
# COUNTRY
#---------------------------
def uploadCountryData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
country = row['country']
try:
query = "INSERT INTO Country (country) VALUES (\"%s\")"%(country)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
#---------------------------
# METALBAND
#---------------------------
def uploadMetalBandData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
bandName = row['bandName'].replace('\"', '\'')
fans = row['fans']
formed = row['formed']
origin = row['origin']
split = row['split']
try:
query = "INSERT INTO MetalBand (bandName, fans, formed, origin, split) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")"%(bandName, fans, formed, origin, split)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
#---------------------------
# METALSTYLE
#---------------------------
def uploadMetalStyleData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
SID = row['SID']
bandName = row['bandName'].replace('\"', '\'')
style = row['style']
try:
query = "INSERT INTO MetalStyle (SID, bandName, style) VALUES (\"%s\",\"%s\",\"%s\")"%(SID, bandName, style)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
#---------------------------
# POPULATION
#---------------------------
def uploadPopulationData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
PID = row['PID']
country = row['country']
year = row['year']
population = row['population']
try:
query = "INSERT INTO Population (PID, country, year, population) VALUES (\"%s\",\"%s\",\"%s\",\"%s\")"%(PID, country, year, population)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
#---------------------------
# TERRORATTACK
#---------------------------
def uploadTerrorAttackData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
AID = row['AID']
EID = row['EID']
attackTypeID = row['attackTypeID']
attackType = row['attackType']
try:
query = "INSERT INTO TerrorAttack (AID, EID, attackTypeID, attackType) VALUES (\"%s\",\"%s\",\"%s\",\"%s\")"%(AID, EID, attackTypeID, attackType)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
#---------------------------
# TERROREVENT
#---------------------------
def uploadTerrorEventData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
EID = row['EID']
eventDate = row['eventDate']
approxDate = row['approxDate'].replace('\"', '\'')
extended = row['extended']
resolution = row['resolution']
LID = row['LID']
summary = row['summary'].replace('\"', '\'').replace('\\','\\\\')
crit1 = row['crit1']
crit2 = row['crit2']
crit3 = row['crit3']
doubtterr = row['doubtterr']
alternativeID = row['alternativeID']
alternative = row['alternative'].replace('\"', '\'')
multiple = row['multiple']
success = row['success']
suicide = row['suicide']
nkill = row['nkill']
nkillus = row['nkillus']
nkillter = row['nkillter']
nwound = row['nwound']
nwoundus = row['nwoundus']
nwoundte = row['nwoundte']
property = row['property']
propextentID = row['propextentID']
propextent = row['propextent'].replace('\"', '\'').replace('\\','\\\\')
propvalue = row['propvalue']
propcomment = row['propcomment'].replace('\"', '\'').replace('\\','\\\\')
addnotes = row['addnotes'].replace('\"', '\'')
weapdetail = row['weapdetail'].replace('\"', '\'')
gname = row['gname'].replace('\"', '\'')
gsubname = row['gsubname'].replace('\"', '\'')
gname2 = row['gname2'].replace('\"', '\'')
gsubname2 = row['gsubname2'].replace('\"', '\'')
gname3 = row['gname3'].replace('\"', '\'')
gsubname3 = row['gsubname3'].replace('\"', '\'')
motive = row['motive'].replace('\"', '\'')
guncertain1 = row['guncertain1']
guncertain2 = row['guncertain2']
guncertain3 = row['guncertain3']
individual = row['individual']
nperps = row['nperps']
nperpcap = row['nperpcap']
claimed = row['claimed']
claimmodeID = row['claimmodeID']
claimmode = row['claimmode'].replace('\"', '\'')
claim2 = row['claim2']
claimmode2ID = row['claimmode2ID']
claimmode2 = row['claimmode2'].replace('\"', '\'')
claim3 = row['claim3']
claimmode3ID = row['claimmode3ID']
claimmode3 = row['claimmode3'].replace('\"', '\'')
compclaim = row['compclaim']
ishostkid = row['ishostkid']
nhostkid = row['nhostkid']
nhostkidus = row['nhostkidus']
nhours = row['nhours']
ndays = row['ndays']
divert = row['divert'].replace('\"', '\'')
country = row['country'].replace('\"', '\'')
ransom = row['ransom']
ransomamt = row['ransomamt']
ransomamtus = row['ransomamtus']
ransompaid = row['ransompaid']
ransompaidus = row['ransompaidus']
ransomnote = row['ransomnote'].replace('\"', '\'')
hostkidoutcomeID = row['hostkidoutcomeID']
hostkidoutcome = row['hostkidoutcome'].replace('\"', '\'')
nreleased = row['nreleased']
scite1 = row['scite1'].replace('\"', '\'').replace('\\','\\\\')
scite2 = row['scite2'].replace('\"', '\'').replace('\\','\\\\')
scite3 = row['scite3'].replace('\"', '\'').replace('\\','\\\\')
dbsource = row['dbsource'].replace('\"', '\'')
INT_LOG = row['INT_LOG']
INT_IDEO = row['INT_IDEO']
INT_MISC = row['INT_MISC']
INT_ANY = row['INT_ANY']
try:
query = "INSERT INTO TerrorEvent (EID, eventDate, approxDate, extended, resolution, LID, summary, crit1, crit2, crit3, doubtterr, alternativeID, alternative, multiple, success, suicide, nkill, nkillus, nkillter, nwound, nwoundus, nwoundte, property, propextentID, propextent, propvalue, propcomment, addnotes, weapdetail, gname, gsubname, gname2, gsubname2, gname3, gsubname3, motive, guncertain1, guncertain2, guncertain3, individual, nperps, nperpcap, claimed, claimmodeID, claimmode, claim2, claimmode2ID, claimmode2, claim3, claimmode3ID, claimmode3, compclaim, ishostkid, nhostkid, nhostkidus, nhours, ndays, divert, country, ransom, ransomamt, ransomamtus, ransompaid, ransompaidus, ransomnote, hostkidoutcomeID, hostkidoutcome, nreleased, scite1, scite2, scite3, dbsource, INT_LOG, INT_IDEO, INT_MISC, INT_ANY) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")"%(EID, eventDate, approxDate, extended, resolution, LID, summary, crit1, crit2, crit3, doubtterr, alternativeID, alternative, multiple, success, suicide, nkill, nkillus, nkillter, nwound, nwoundus, nwoundte, property, propextentID, propextent, propvalue, propcomment, addnotes, weapdetail, gname, gsubname, gname2, gsubname2, gname3, gsubname3, motive, guncertain1, guncertain2, guncertain3, individual, nperps, nperpcap, claimed, claimmodeID, claimmode, claim2, claimmode2ID, claimmode2, claim3, claimmode3ID, claimmode3, compclaim, ishostkid, nhostkid, nhostkidus, nhours, ndays, divert, country, ransom, ransomamt, ransomamtus, ransompaid, ransompaidus, ransomnote, hostkidoutcomeID, hostkidoutcome, nreleased, scite1, scite2, scite3, dbsource, INT_LOG, INT_IDEO, INT_MISC, INT_ANY)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + " " + EID + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("FILE " + str(e) + '\n')
#---------------------------
# TERRORLOCATION
#---------------------------
def uploadTerrorLocationData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
LID = row['LID']
countryID = row['countryID']
country = row['country']
regionID = row['regionID']
region = row['region']
provstate = row['provstate']
city = row['city']
latitude = row['latitude']
longitude = row['longitude']
specificity = row['specificity']
vicinity = row['vicinity']
location = row['location'].replace('\"', '\'').replace('\\','\\\\')
try:
query = "INSERT INTO TerrorLocation (LID, countryID, country, regionID, region, provstate, city, latitude, longitude, specificity, vicinity, location) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")"%(LID, countryID, country, regionID, region, provstate, city, latitude, longitude, specificity, vicinity, location)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
#---------------------------
# TERRORRELATION
#---------------------------
def uploadTerrorRelationData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
RID = row['RID']
EID = row['EID']
related = row['related']
try:
query = "INSERT INTO TerrorRelation (RID, EID, related) VALUES (\"%s\",\"%s\",\"%s\")"%(RID, EID, related)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
#---------------------------
# TERRORTARGET
#---------------------------
def uploadTerrorTargetData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
TID = row['TID']
EID = row['EID']
targTypeID = row['targTypeID']
targType = row['targType'].replace('\"', '\'')
targSubtypeID = row['targSubtypeID']
targSubtype = row['targSubtype'].replace('\"', '\'')
corp = row['corp'].replace('\"', '\'')
target = row['target'].replace('\"', '\'')
nationalityID = row['nationalityID']
nationality = row['nationality'].replace('\"', '\'')
try:
query = "INSERT INTO TerrorTarget (TID, EID, targTypeID, targType, targSubtypeID, targSubtype, corp, target, nationalityID, nationality) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")"%(TID, EID, targTypeID, targType, targSubtypeID, targSubtype, corp, target, nationalityID, nationality)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
#---------------------------
# TERRORWEAPON
#---------------------------
def uploadTerrorWeaponData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
WID = row['WID']
EID = row['EID']
weapTypeID = row['weapTypeID']
weapType = row['weapType']
weapSubtypeID = row['weapSubtypeID']
weapSubtype = row['weapSubtype']
try:
query = "INSERT INTO TerrorWeapon (WID, EID, weapTypeID, weapType, weapSubtypeID, weapSubtype) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")"%(WID, EID, weapTypeID, weapType, weapSubtypeID, weapSubtype)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
#---------------------------
# WEATHER
#---------------------------
def uploadWeatherData(fileName, db):
try:
with open(fileName, 'r') as dataFile:
print '[+] Importing \'%s\''%fileName
dataReader = csv.DictReader(dataFile, delimiter=',', quotechar='"')
counter = 0
for row in dataReader:
LID = row['LID']
weatherDate = row['weatherDate']
rain = row['rain']
temperature = row['temperature']
station = row['station']
try:
query = "INSERT INTO Weather (LID, weatherDate, rain, temperature, station) VALUES (\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")"%(LID, weatherDate, rain, temperature, station)
db.cursor().execute(query)
db.commit()
counter += 1
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("INSERT " + str(e) + '\n')
print '[+] successfully imported %d entries'%counter
except Exception, e:
print '[-] Failed to open \'%s\''%fileName
#-------------------------------------------------------
# MAIN
#-------------------------------------------------------
def main():
startTime = time.time()
try:
# db setup
db = mysql.connector.connect(user='dbProject',
password='db2018',
host='127.0.0.1',
database='dbProject')
cursor = db.cursor()
except Exception, e:
print '[-] failed to connect to db'
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("CONNECT " + str(e) + '\n')
return
try:
db.cursor().execute("TRUNCATE TABLE Country;")
db.cursor().execute("TRUNCATE TABLE MetalBand;")
db.cursor().execute("TRUNCATE TABLE MetalStyle;")
db.cursor().execute("TRUNCATE TABLE Population;")
db.cursor().execute("TRUNCATE TABLE TerrorAttack;")
db.cursor().execute("TRUNCATE TABLE TerrorEvent;")
db.cursor().execute("TRUNCATE TABLE TerrorLocation;")
db.cursor().execute("TRUNCATE TABLE TerrorRelation;")
db.cursor().execute("TRUNCATE TABLE TerrorTarget;")
db.cursor().execute("TRUNCATE TABLE TerrorWeapon;")
db.cursor().execute("TRUNCATE TABLE Weather;")
db.commit()
print '[+] Truncated db entries'
except Exception, e:
with open('../logs/error.log', 'a') as errorLog:
errorLog.write("TRUNCATE " + str(e) + '\n')
uploadCountryData("../data/frames/country.csv", db)
uploadMetalBandData("../data/frames/metalBand.csv", db)
uploadMetalStyleData("../data/frames/metalStyle.csv", db)
uploadPopulationData("../data/frames/population.csv", db)
uploadTerrorAttackData("../data/frames/terrorAttack.csv", db)
uploadTerrorEventData("../data/frames/terrorEvent.csv", db)
uploadTerrorLocationData("../data/frames/terrorLocation.csv", db)
uploadTerrorRelationData("../data/frames/terrorRelation.csv", db)
uploadTerrorTargetData("../data/frames/terrorTarget.csv", db)
uploadTerrorWeaponData("../data/frames/terrorWeapon.csv", db)
uploadWeatherData("../data/frames/weather.csv", db)
endTime = time.time()
elapsedTime = endTime - startTime
print '[+] Finished data import in %.2f s'%elapsedTime
if __name__ == '__main__':
main()
|
996,930 | 52bab96e3ed0e7a980c640973726d8aed7301dc2 | LINKS_REPR = [
(
"Link(link_id='d7dd01d6-9577-4076-b7f2-911b231044f8', link_type='ethernet',"
" project_id='4b21dfb3-675a-4efa-8613-2f7fb32e76fe', suspend=False, nodes=["
"{'adapter_number': 0, 'label': {'rotation': 0, 'style': 'font-family:"
" TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0"
";', 'text': 'e0/0', 'x': 69, 'y': 27}, 'node_id':"
" 'de23a89a-aa1f-446a-a950-31d4bf98653c', 'port_number': 0}, {'adapter_number':"
" 0, 'label': {'rotation': 0, 'style': 'font-family: TypeWriter;font-size: 10.0"
";font-weight: bold;fill: #000000;fill-opacity: 1.0;', 'text': 'e1', 'x': -4,"
" 'y': 18}, 'node_id': 'da28e1c0-9465-4f7c-b42c-49b2f4e1c64d', 'port_number': 1"
"}], filters={}, capturing=False, capture_file_path=None,"
" capture_file_name=None, capture_compute_id=None)"
),
(
"Link(link_id='cda8707a-79e2-4088-a5f8-c1664928453b', link_type='ethernet',"
" project_id='4b21dfb3-675a-4efa-8613-2f7fb32e76fe', suspend=False, nodes="
"[{'adapter_number': 1, 'label': {'rotation': 0, 'style': 'font-family:"
" TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0"
";', 'text': 'e1/0', 'x': 17, 'y': 67}, 'node_id':"
" 'de23a89a-aa1f-446a-a950-31d4bf98653c', 'port_number': 0}, {'adapter_number':"
" 1, 'label': {'rotation': 0, 'style': 'font-family: TypeWriter;font-size: 10.0"
";font-weight: bold;fill: #000000;fill-opacity: 1.0;', 'text': 'e1/0', 'x': 42,"
" 'y': -7}, 'node_id': '0d10d697-ef8d-40af-a4f3-fafe71f5458b', 'port_number': 0"
"}], filters={}, capturing=False, capture_file_path=None,"
" capture_file_name=None, capture_compute_id=None)"
),
(
"Link(link_id='d1f77e00-29d9-483b-988e-1dad66aa0e5f', link_type='ethernet',"
" project_id='4b21dfb3-675a-4efa-8613-2f7fb32e76fe', suspend=True, nodes=[],"
" filters={}, capturing=False, capture_file_path=None, capture_file_name=None,"
" capture_compute_id=None)"
),
(
"Link(link_id='374b409d-90f2-44e8-b70d-a9d0b2844fd5', link_type='ethernet',"
" project_id='4b21dfb3-675a-4efa-8613-2f7fb32e76fe', suspend=False, nodes=[],"
" filters={}, capturing=False, capture_file_path=None, capture_file_name=None,"
" capture_compute_id=None)"
),
(
"Link(link_id='fb27704f-7be5-4152-8ecd-1db6633b2bd9', link_type='ethernet',"
" project_id='4b21dfb3-675a-4efa-8613-2f7fb32e76fe', suspend=False, nodes="
"[{'adapter_number': 0, 'label': {'rotation': 0, 'style': 'font-family:"
" TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0"
";', 'text': 'Management1', 'x': 37, 'y': -9}, 'node_id':"
" '8283b923-df0e-4bc1-8199-be6fea40f500', 'port_number': 0}, {'adapter_number':"
" 0, 'label': {'rotation': 0, 'style': 'font-family: TypeWriter;font-size: 10.0"
";font-weight: bold;fill: #000000;fill-opacity: 1.0;', 'text': 'e0', 'x': 27,"
" 'y': 55}, 'node_id': 'da28e1c0-9465-4f7c-b42c-49b2f4e1c64d', 'port_number': 0"
"}], filters={}, capturing=False, capture_file_path=None,"
" capture_file_name=None, capture_compute_id=None)"
),
(
"Link(link_id='4d9f1235-7fd1-466b-ad26-0b4b08beb778', link_type='ethernet',"
" project_id='4b21dfb3-675a-4efa-8613-2f7fb32e76fe', suspend=False, nodes="
"[{'adapter_number': 2, 'label': {'rotation': 0, 'style': 'font-family:"
" TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0"
";', 'text': 'e1', 'x': 69, 'y': 31}, 'node_id':"
" '8283b923-df0e-4bc1-8199-be6fea40f500', 'port_number': 0}, {'adapter_number':"
" 0, 'label': {'rotation': 0, 'style': 'font-family: TypeWriter;font-size: 10.0"
";font-weight: bold;fill: #000000;fill-opacity: 1.0;', 'text': 'eth0', 'x': -9,"
" 'y': 28}, 'node_id': 'ef503c45-e998-499d-88fc-2765614b313e', 'port_number': 0"
"}], filters={}, capturing=False, capture_file_path=None,"
" capture_file_name=None, capture_compute_id=None)"
),
(
"Link(link_id='52cdd27d-fa97-47e7-ab99-ea810c20e614', link_type='ethernet',"
" project_id='4b21dfb3-675a-4efa-8613-2f7fb32e76fe', suspend=False, nodes="
"[{'adapter_number': 0, 'label': {'rotation': 0, 'style': 'font-family:"
" TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0"
";', 'text': 'eth1', 'x': 8, 'y': 70}, 'node_id':"
" 'cde85a31-c97f-4551-9596-a3ed12c08498', 'port_number': 1}, {'adapter_number':"
" 0, 'label': {'rotation': 0, 'style': 'font-family: TypeWriter;font-size: 10.0"
";font-weight: bold;fill: #000000;fill-opacity: 1.0;', 'text': 'e7', 'x': 71,"
" 'y': -1}, 'node_id': 'da28e1c0-9465-4f7c-b42c-49b2f4e1c64d', 'port_number': 7"
"}], filters={}, capturing=False, capture_file_path=None,"
" capture_file_name=None, capture_compute_id=None)"
),
]
|
996,931 | 14113bd0021b849cc3d59818b8c7ad6af7447013 | # Creates html files with a page source string.
def createHtmlFile(fileName, html):
with open(f"{fileName}.html", "w") as file:
file.write(html)
file.close() |
996,932 | e42bfe25d52e045b0d058561eb1f23d79c92ab5f | from django.db import models
from django.urls import reverse
# import datetime
import django
class Saloon(models.Model):
name=models.CharField(max_length=250)
ad_first=models.CharField(max_length=250)
ad_second=models.CharField(max_length=250)
city=models.CharField(max_length=250)
country=models.CharField(max_length=250)
pincode=models.CharField(max_length=250)
image=models.ImageField(blank=True,upload_to="profile_pics")
password=models.CharField(max_length=250)
email=models.EmailField()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("admin")
class Post(models.Model):
saloon=models.ForeignKey(Saloon,related_name="saloon_post",on_delete=models.PROTECT)
title=models.CharField(max_length=250)
type_post=models.IntegerField()
image=models.ImageField(blank=True,upload_to="profile_pics")
description=models.TextField(blank=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("admin")
class UserSaloon(models.Model):
name=models.CharField(max_length=250)
city=models.CharField(max_length=250)
country=models.CharField(max_length=250)
image=models.ImageField(blank=True,upload_to="profile_pics")
password=models.CharField(max_length=250)
email=models.EmailField()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("admin")
class Like(models.Model):
post=models.ForeignKey(Post,related_name="post_like",on_delete=models.PROTECT)
user=models.ForeignKey(UserSaloon,related_name="user_like",on_delete=models.PROTECT)
time=models.DateTimeField(default=django.utils.timezone.now)
def __str__(self):
return self.post
def get_absolute_url(self):
return reverse("admin")
class Comment(models.Model):
post=models.ForeignKey(Post,related_name="post_comment",on_delete=models.PROTECT)
user=models.ForeignKey(UserSaloon,related_name="user_comment",on_delete=models.PROTECT)
time=models.DateTimeField(default=django.utils.timezone.now)
comment=models.TextField()
def __str__(self):
return self.post
def get_absolute_url(self):
return reverse("admin")
class Subscribed(models.Model):
saloon=models.ForeignKey(Saloon,related_name="saloon_subscribe",on_delete=models.PROTECT)
user=models.ForeignKey(UserSaloon,related_name="user_subscribe",on_delete=models.PROTECT)
time=models.DateTimeField(default=django.utils.timezone.now)
# comment=models.TextField()
def __str__(self):
return self.saloon
def get_absolute_url(self):
return reverse("admin")
class Files(models.Model):
# file=models.FileField(blank=True,upload_to="saloon")
image=models.ImageField(blank=True,upload_to="profile_pics")
def __str__(self):
return self.image.name |
996,933 | 57ae0e8367bb6ec4116785ce6803810bbc7a2673 | """
=============
Multipage PDF
=============
This is a demo of creating a pdf file with several pages,
as well as adding metadata and annotations to pdf files.
If you want to use a multipage pdf file using LaTeX, you need
to use `from matplotlib.backends.backend_pgf import PdfPages`.
This version however does not support `attach_note`.
"""
import datetime
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import sqlite3
import matplotlib.ticker as ticker
DEPTH_SCALE = 98.0
TENSION_SCALE = 19.53125
CCL_FACTOR = 51.1
CCL_OFFSET = 0 # -511
# Create the PdfPages object to which we will save the pages:
# The with statement makes sure that the PdfPages object is closed properly at
# the end of the block, even if an Exception occurs.
with PdfPages('logPlot.pdf') as pdf:
page = 0
fig, axis = plt.subplots(
1, 2, # 1 row, 2 cols
gridspec_kw={'width_ratios':[1, 2]},
sharey=True,
figsize=(8.27, 11.69)
)
con = sqlite3.connect('../data/LLan-123.db')
rows = con.execute("SELECT COUNT(*) FROM 'acq_20190917_134352' \
order by id_seq asc").fetchall()[0][0]
totPages = int(rows / 100) - 1
while (page < totPages):
cur = con.cursor()
result = cur.execute(
"""SELECT * FROM {} where id_seq > {} and id_seq < {}"""
.format('acq_20190917_134352', page*100,
(page+1)*100)
).fetchall()
y = []
x1 = []
x2 = []
for var in result:
y.append(var[1] / DEPTH_SCALE)
x1.append(var[2] + CCL_OFFSET)
x2.append(var[3] * TENSION_SCALE)
plt.rc('text', usetex=False)
[ax.clear() for ax in axis] # takes ~ 47.0 ms!!
axis[0].plot(x1, y, label="CCL", linewidth=1.0)
axis[1].plot(x2, y, label="Tension", linewidth=1.0)
# plt.title('Tension Plot')
axis_xmin = [-10, 0]
axis_xmax = [10, 20000]
fig.legend(ncol=2, loc='upper center',
mode="expand",
borderaxespad=0.)
# To plot/log down. TODO: Evaluate movement
fig.gca().invert_yaxis()
axis_major_step = []
axis_minor_step = []
for i, _ in enumerate(axis_xmin):
axis_major_step.append(int((axis_xmax[i] - axis_xmin[i]) \
/ (2 * (1 + i)))
)
axis_minor_step.append(axis_major_step[i] / 5)
axis[i].set_xlim(axis_xmin[i],axis_xmax[i])
axis[0].tick_params(axis='y', which='major', pad=7)
axis[0].yaxis.tick_right()
for i, ax in enumerate(axis):
ax.xaxis.tick_top()
ax.xaxis.set_ticks_position('top')
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.tick_params(which='major', width=1.00)
ax.tick_params(which='major', length=10)
ax.tick_params(which='minor', width=0.75)
ax.tick_params(which='minor', length=5)
ax.grid(b=True, which='minor', axis='both')
ax.grid(which='major', axis='both', linewidth=2)
ax.xaxis.set_major_locator(
ticker.MultipleLocator(axis_major_step[i]))
ax.xaxis.set_minor_locator(
ticker.MultipleLocator(axis_minor_step[i]))
ax.yaxis.set_major_locator(ticker.MultipleLocator(10))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(2))
print("Printed page :", page)
page += 1
# or you can pass a Figure object to pdf.savefig
pdf.savefig(fig)
plt.close()
con.close()
|
996,934 | 77e5162b142059fa64f6d4f68eca73769bf313f1 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ArticleItem(scrapy.Item):
description = scrapy.Field()
description2 = scrapy.Field()
titre = scrapy.Field()
soustitre = scrapy.Field()
similaire = scrapy.Field()
price = scrapy.Field()
position = scrapy.Field()
image_urls = scrapy.Field()
image_name = scrapy.Field()
# images = scrapy.Field() |
996,935 | 8cb460003da2132b7d8f360d5de030ec3d7b9204 | import sublime
import sublime_plugin
import os
class ProjectFolderListener(sublime_plugin.EventListener):
def on_activated_async(self, view):
dir_name = self.get_dir_name(view)
if dir_name == None:
return
self.add_folder_to_project(dir_name)
def on_close(self, view):
return
project_data = sublime.active_window().project_data();
try:
folders = project_data['folders']
new_folders = [f for f in folders if f['path'] != self.get_dir_name(view)]
project_data['folders'] = new_folders
sublime.active_window().set_project_data(project_data)
except:
pass
def get_dir_name(self, view):
dir = None
try:
dir = os.path.dirname(view.file_name())
except:
pass
return dir
def add_folder_to_project(self, dir_name):
folder = {
'follow_symlinks': True,
'path': dir_name,
'folder_exclude_patterns': ['.*'],
# maybe, we need to edit .gitignore,
# so do not exclude files that it's name begin with dot
# 'file_exclude_patterns': ['.*'],
}
project_data = sublime.active_window().project_data();
try:
folders = project_data['folders']
for f in folders:
if f['path'] == dir_name:
return
folders.append(folder)
except:
folders = [folder]
if project_data is None:
project_data = {}
project_data['folders'] = folders
sublime.active_window().set_project_data(project_data)
|
996,936 | dbfcfc7755e3833e1c85e8021efc09cdb0391f63 | import json
from copy import copy
from functools import reduce
class ComputeGraph(object):
"""Class for calculations with tables.
Table is a list of dict-like objects without omissions.
Operations are specified in format of Computing Graph.
Computing(including reading) occurs separately from specifications.
Supported operations: Map, Sort, Fold, Reduce, Join.
Public methods:
__init__(docs, save=None),
Map(mapper),
Sort(*args),
Fold(folder, begin_state),
Reduce(reducer, *columns),
Join(on, key, strategy)
Compute().
"""
def __init__(self, docs, save=None):
"""Initializing new ComputeGraph.
:param docs: file or another ComputeGraph.
If file - table will be read from there,
if ComputeGraph - table will be taken as the result of computing it.
:param save: file, where result will be written,
if None - result is not written anywhere.
"""
self.table = []
self.docs = docs
self.save = save
if isinstance(docs, ComputeGraph):
self.dependencies = [docs]
else:
self.dependencies = []
self.operations = []
self.is_computed = False
def Map(self, mapper):
"""Add Map operation
:param mapper: generator, which will be called from every table line.
"""
self.operations.append({'operation': 'map',
'args': [mapper]})
def Sort(self, *args):
"""Add Sort operation
:param args: columns, by which table will be sorted lexicographically.
"""
self.operations.append({'operation': 'sort',
'args': args})
def Fold(self, folder, begin_state):
"""Add Fold operation.
:param folder: combining function.
:param begin_state: state to begin.
"""
self.operations.append({'operation': 'fold',
'args': [folder, begin_state]})
def Reduce(self, reducer, *columns):
"""Add Reduce operation.
:param reducer: generator, which will be called for lines
with same value of columns.
:param columns: columns to group by.
"""
self.operations.append({'operation': 'reduce',
'args': [reducer, *columns]})
def Join(self, on, key, strategy):
"""Add Join operation.
:param on: another ComputeGraph to join with.
It should by computed before begin of execution this join.
:param key: columns on which tables are joined.
Both tables should contain them.
:param strategy: one of these:
['inner', 'left outer', 'right outer', 'full outer', 'cross'].
Behavior is similar to SQL Join operation.
"""
self.dependencies.append(on)
self.operations.append({'operation': 'join',
'args': [on, key, strategy]})
def Compute(self, verbose=False):
"""Execute all of operations declared earlier(including reading).
After this, status of graph switches from 'not computed' to 'computed'.
If calculation this graph requires calculation of other graphs first,
they will be calculated.
:param verbose: if True, performed operations will be displayed.
"""
for graph in self.dependencies:
if not graph.is_computed:
graph.Compute(verbose)
if isinstance(self.docs, str):
with open(self.docs, 'r') as file:
for line in file.readlines():
self.table.append(json.loads(line))
elif isinstance(self.docs, ComputeGraph):
self.table = copy(self.docs.table)
for command in self.operations:
if verbose:
print(command['operation'])
getattr(self, '_' + command['operation'])(*command['args'])
self.is_computed = True
if self.save:
with open(self.save, 'w') as file:
for line in self.table:
file.write(json.dumps(line) + '\n')
def _map(self, mapper):
new_table = []
for line in self.table:
new_lines = mapper(line)
if isinstance(new_lines, dict):
new_table.append(new_lines)
else:
for new_line in new_lines:
new_table.append(new_line)
self.table = new_table
def _sort(self, *args):
for i, line in enumerate(self.table):
if isinstance(line, list) or isinstance(line, tuple):
line = line[0]
sort_args = [line[i] for i in args]
self.table[i] = (sort_args, line)
self.table = sorted(self.table, key=lambda item: item[0])
self.table = [i[1] for i in self.table]
def _fold(self, folder, begin_state):
result = reduce(lambda x, y: folder(x, y), [begin_state] + self.table)
self.table = [result]
def _reduce(self, reducer, *columns):
self._sort(*columns)
new_table = []
def columns_equal(i, j):
flag = True
for column in columns:
if self.table[i][column] != self.table[j][column]:
return False
return True
index = begin = end = 0
while index < len(self.table):
begin = index
index += 1
while index < len(self.table) and columns_equal(begin, index):
index += 1
end = index
lines = list(reducer(self.table[begin:end]))
for line in lines:
if isinstance(line, list):
new_table.append(line[0])
elif isinstance(line, dict):
new_table.append(line)
self.table = new_table
def _join(self, on, key, strategy='inner'):
def cross_join(self_table, on_table):
if not self_table or not on_table:
return []
table = list(map(lambda self_line:
list(map(lambda on_line:
dict(self_line, **on_line),
on_table)),
self_table))
table = [item for items in table for item in items]
return table
if strategy == 'cross':
self.table = cross_join(self.table, on.table)
return
for line in self.table:
line['__parent_table'] = 'self'
for tmp_key in key:
line['__' + tmp_key] = line[tmp_key]
line.pop(tmp_key, None)
for line in on.table:
new_line = copy(line)
new_line['__parent_table'] = 'on'
for tmp_key in key:
new_line['__' + tmp_key] = new_line[tmp_key]
new_line.pop(tmp_key, None)
self.table.append(new_line)
new_keys = ['__' + tmp_key for tmp_key in key]
self._sort(*new_keys)
def join_reducer(columns):
self_lines = []
on_lines = []
for tmp_line in columns:
parent = tmp_line['__parent_table']
tmp_line.pop('__parent_table', None)
for tmp_key in key:
arg = tmp_line['__' + tmp_key]
tmp_line.pop('__' + tmp_key, None)
tmp_line[tmp_key] = arg
if parent == 'self':
self_lines.append(tmp_line)
elif parent == 'on':
on_lines.append(tmp_line)
if strategy in ['full outer', 'right outer'] and not self_lines:
self_lines.append({self_key: None
for self_key in self.table[0].keys()})
if strategy in ['full outer', 'left outer'] and not on_lines:
on_lines.append({on_key: None
for on_key in on.table[0].keys()})
new_columns = cross_join(self_lines, on_lines)
for column in new_columns:
yield column
self._reduce(join_reducer, *new_keys)
|
996,937 | 7314719bfbdc85b0d6041ce5a518231bc76816e1 | from django.contrib import admin
from .models import Movie, Director
admin.site.register(Movie)
admin.site.register(Director) |
996,938 | 71465e59a3182ccfeb7bf5b7ebb479985e88196c | from products import product, Product
from summations import summation, Sum
|
996,939 | 01f5d95db09511851a07425ae694f76f180d1c43 | class Solution:
def numTrees(self, n: int) -> int:
if n==0 or n==1:
return 1
count =0
for i in range(1, n+1):
#LEFT SUBTREE i-1
#RIGHT SUBTREE n-i
count += self.numTrees(i-1)* self.numTrees(n-i)
return count |
996,940 | ab443d5e0fbad5bef22a701ebc3cd029c9a2f50a | import random
import numpy as np
from Player import Player
import matplotlib.pyplot as plt
NB_GAME = 10 # number of games each player will be playing
class PublicGoodsGame:
def __init__(self, n, m, p, runs, generations, r, c, mu, s):
self.n = n
self.m = m
self.p = p
self.runs = runs
self.generations = generations
self.r = r
self.c = c
self.mu = mu
self.s = s
self.players = [Player(random.randint(0, n), i) for i in range(self.m)]
self.average_payoffs = [0 for i in range(self.m)]
self.state = []
def run_two_person_game(self):
yy = [[] for i in range(self.n+1)]
time_average_frequency = [0 for i in range(self.n+1)]
for i in range(self.runs):
print("Run {}".format(i))
for j in range(self.generations):
self.average_payoffs = [0 for i in range(self.m)]
for game_number in range(NB_GAME):
# create pairs
random.shuffle(self.players)
self.state = [np.random.choice(['C', 'D'], p=[self.p, 1-self.p]) for i in range(self.m)]
for group in range(self.m//self.n):
players = self.players[group*self.n:(group+1)*self.n]
# negotiations stage ------------------------------------------------
self.negotiations(players)
# play the game --------------------------------------------------
self.play_game(players)
# update process ------------------------------------------------
self.update_process()
""" count = [0 for i in range(self.n+1)]
for player in self.players:
count[player.strat] += 1
for j in range(self.n+1):
yy[j].append(count[j])
xx = np.arange(0,self.generations,1)
plt.title("Strategies frequencies over generations")
plt.xlabel("generations")
plt.ylabel("Strategy frequency")
for i in range(self.n+1):
plt.plot(xx, yy[i], label=r"$C_{}$".format(i))
plt.legend()
plt.show()"""
# check if all players have the same strategy
strats = [player.strat for player in self.players]
if len(set(strats)) == 1:
time_average_frequency[strats[0]] += 1
# time_average_frequency = [(time/self.generations) for time in time_average_frequency]
x = [i+1 for i in range(self.n+1)]
labels = [r"$C_{}$".format(i) for i in range(self.n+1)]
plt.title("Time-averaged-frequencies")
plt.xlabel("Strategies")
plt.ylabel("Frequency")
plt.bar(x, time_average_frequency, color="royalblue")
plt.xticks(x, labels)
plt.show()
def negotiations(self, players):
while not self.is_stationary_state(players):
player = random.choice(players)
if player.change_thought(self.state, players):
player.update_thought(self.state)
def play_game(self, players):
k = 0 # number of cooperators
for player in players:
if self.state[player.id] == 'C':
k += 1
gain = (self.r*self.c*k)/self.n
for player in players:
if self.state[player.id] == "C":
self.average_payoffs[player.id] += (gain-self.c)/NB_GAME
else:
self.average_payoffs[player.id] += (gain)/NB_GAME
def update_process(self):
player_1, player_2 = self.get_random_players()
if random.random() < self.mu:
new_strat = random.choice([i for i in range(self.n+1) if i != player_1.strat])
player_1.strat = new_strat
else:
delta =self. average_payoffs[player_2.id] - self.average_payoffs[player_1.id]
if random.random() < 1/(1+np.exp(-self.s*delta)):
player_1.strat = player_2.strat
def get_random_players(self):
return random.sample(self.players, 2)
def is_stationary_state(self, players):
for player in players:
if player.change_thought(self.state, players):
return False
return True |
996,941 | a1098c6f2ca46a1d8d269be49876caab295dce20 | import scrapy.cmdline as cmdline
cmdline.execute(['scrapy','crawl','job51']) |
996,942 | e539783358f49461d765f378bb0474b4da7ea9fd | import numpy as np
import numpy.linalg as npla
import matplotlib.pyplot as plt
from problem_2a import QR_fact_iter
from problem_6b import lancoiz
def main():
B = np.random.random((100, 100))
Q, R = QR_fact_iter(B)
D = np.diag(np.arange(1, Q.shape[1] + 1))
A = np.dot(np.dot(Q, D), Q.T)
Q, H, rvals = lancoiz(A)
plt.xlabel("Ritz Values")
plt.ylabel("Number of iterations")
plt.title("Ritz Values Vs. No. of iterations")
plt.scatter(rvals[:, 1], rvals[:, 0])
plt.savefig("problem_6c.png")
plt.show()
if __name__ == '__main__':
main() |
996,943 | d3af1c9da6ac0e4a3a9f512a4179b23bc2676b73 | from django.conf.urls import patterns, url
from Conferencia import views
urlpatterns = patterns('',
url(r'^$', views.indice, name='indice'),
url(r'^mostrarFormConferencia/$',views.mostrarFormConferencia, name='mostrarFormConferencia'),
url(r'^editarDatosConferencia/$', views.editarDatosConferencia, name='editarDatosConferencia'),
url(r'^mostrarTiposDeArticulos/$', views.mostrarTiposDeArticulos, name='mostrarTiposDeArticulos'),
url(r'^aceptablesNota/$', views.aceptablesNota, name='aceptablesNota'),
url(r'^comprobarPresidente/(?P<vista_sigue>\w+)/$', views.comprobarPresidente, name='comprobarPresidente'),
url(r'^desempatar/$', views.desempatar, name='desempatar'),
url(r'^reiniciarSeleccion/$', views.reiniciarSeleccion, name='reiniciarSeleccion'),
url(r'^mostrarFormComprobar/(?P<vista_sigue>\w+)/$', views.mostrarFormComprobar, name='mostrarFormComprobar'),
url(r'^agregarAceptado/(?P<articulo_id>\d+)/$', views.agregarAceptado, name='agregarAceptado'),
url(r'^mostrarEstadoArticulos/$', views.mostrarEstadoArticulos, name='mostrarEstadoArticulos'),
url(r'^elegirEspeciales/$', views.elegirEspeciales, name='elegirEspeciales'),
url(r'^agregarEspecial/(?P<articulo_id>\d+)/$', views.agregarEspecial, name='agregarEspecial'),
url(r'^llenarDiccionarioTopicos/$', views.llenarDiccionarioTopicos, name='llenarDiccionarioTopicos'),
url(r'^pedirTipoDeEvento/$',views.pedirTipoDeEvento, name = 'pedirTipoDeEvento'),
url(r'^generarListaArticulosSesion/(?P<evento_tipo>\w+)/$',views.generarListaArticulosSesion, name = 'generarListaArticulosSesion'),
url(r'^desempatarPorPaises/$', views.desempatarPorPaises, name='desempatarPorPaises'),
#url(r'^comprobarEmailComite/$',views.comprobarEmailComite, name='comprobarEmailComite'),
)
|
996,944 | 074859e90d8560d64674a6f2169ebe697dd47b9e | # -*- coding: utf-8 -*-
__virtualname__ = 'priv'
def call(hub, ctx):
return ctx.func(*ctx.args, **ctx.kwargs)
|
996,945 | 24a7c23b807372ba418850c3e8df23774f4c532b | import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
import time
from PIL import Image
import mss
import cv2
import pyautogui as gui
# This is the path to the Tensorflow object detection API
sys.path.append("/home/malachi/.local/lib/python3.6/site-packages/tensorflow/models/research/object_detection/")
# Object detection imports
# Here are the imports from the object detection module.
from utils import label_map_util
from utils import visualization_utils as vis_util
# Variables
MODEL_NAME = 'ball_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'object-detection.pbtxt')
NUM_CLASSES = 1
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def finder(show_image):
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
with mss.mss() as sct:
# Part of the screen to capture
monitor = {'top': 20, 'left': 0, 'width': 800, 'height': 600}
while True:
last_time = time.time()
image = sct.grab(monitor)
image_np = np.array(image)
image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
#print('np array shape: {}'.format(np.shape(image_np)))
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
ball_dict = {}
for i, b in enumerate(boxes[0]):
if classes[0][i] == 1:
if scores[0][i] > 0.5:
mid_x = (boxes[0][i][3] + boxes[0][i][1]) / 2
mid_y = (boxes[0][i][2] + boxes[0][i][0]) / 2
apx_distance = round( (1-(boxes[0][i][3] - boxes[0][i][1]))**4, 3)
ball_dict[apx_distance] = [mid_x, mid_y, scores[0][i]]
cv2.putText(image_np, '{}'.format(apx_distance), (int(mid_x*800), int(mid_y*600)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,255), 2)
x_move = mid_x - 0.5
y_move = mid_y - 0.5
get_to_x = x_move/0.5
time.sleep(0.05)
if get_to_x > 0.15:
gui.keyDown('d')
time.sleep(0.01)
gui.keyUp('d')
elif get_to_x < -0.15:
gui.keyDown('a')
time.sleep(0.01)
gui.keyUp('a')
print(get_to_x)
"""
if apx_distance <= 0.5:
if mid_x > 0.3 and mid_x < 0.7:
cv2.putText(image_np, 'Hitting ball', (int(mid_x*800)-50, int(mid_y*600)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 3)
"""
if len(ball_dict) > 0:
closest = sorted(ball_dict.keys())[0]
ball_choice = ball_dict[closest]
#print('fps: {0}'.format(1 / (time.time()-last_time)))
if show_image == 1:
cv2.imshow('object detection', image_np)
#print('fps: {0}'.format(1 / (time.time()-last_time)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
for i in range(4):
time.sleep(1)
gui.keyDown('w')
finder(1) |
996,946 | f6df1b9dcb9e672406df3885cebf22de3a26aeaa | import random
from random import shuffle
from django.db import models
from django.db.models import Model, TextField, JSONField, IntegerField, CharField
from django.db.models.signals import post_init
from django.dispatch import receiver
from cities.card import Card
class Game(Model):
name = CharField(max_length=255)
state = JSONField()
last_card = CharField(max_length=2, null=True)
active_player = IntegerField()
p1_code = CharField(max_length=255, null=True)
p2_code = CharField(max_length=255, null=True)
p1_name = CharField(max_length=255, null=True)
p2_name = CharField(max_length=255, null=True)
@receiver(post_init, sender=Game)
def post_init(sender, instance, **kwargs):
state = instance.state
instance.p1_hand = [Card(x) for x in state['p1_hand']]
instance.p2_hand = [Card(x) for x in state['p2_hand']]
instance.p1_board = [Card(x) for x in state['p1_board']]
instance.p2_board = [Card(x) for x in state['p2_board']]
instance.discard = [Card(x) for x in state['discard']]
instance.deck = [Card(x) for x in state['deck']]
def new_game(name: str, active_player: int = None) -> Game:
deck = []
for color in 'gbryw':
for card in ["*"] * 3 + [str(x) for x in range(2, 11)]:
deck.append(color + card)
random.shuffle(deck)
p1, p2 = [], []
for x in range(8):
p1.append(deck.pop())
p2.append(deck.pop())
state = dict(
p1_hand=p1,
p2_hand=p2,
deck=deck,
p1_board=[],
p2_board=[],
discard=[]
)
if active_player is None:
active_player = random.choice([1, 2])
return Game.objects.create(name=name, state=state, active_player=active_player)
|
996,947 | 91b6035c63f293adaddaf62d3ce4578411cbe3b9 | import time
import json
import requests
from pyquery import PyQuery as pq
from urllib import parse
def get_wymx(page):
# area = 1ๅ
ๅฐ 2ๆธฏๅฐ 3ๆฌง็พ 4ๆฅ้ฉ 999ๅ
ถไป
url = 'http://ent.sina.com.cn/ku/star_search_index.d.html?area=999&page='+str(page)
hd = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36",
}
res = requests.get(url, headers=hd)
res.encoding = 'utf-8'
doc = pq(res.text)
# print(doc('.module-box ul'))
# data = doc('.tv-list')
# print(data.text())
j_list = []
for n, item in enumerate(doc('.tv-list li').items(), 1):
print(n)
print(item.text())
info = item.find('a').attr('href')
img = item.find('img').attr('src')
print('ไธป้กต:'+info)
print('img:http:'+img)
item_data = {
'url': info,
'img': 'http:'+img,
'info': item.text()
}
j_list.append(item_data)
if len(j_list) > 0:
data = {
'data': j_list
}
jd = json.dumps(data)
f = open("D://pachong//mx//mx_qt_"+str(page)+'.json', 'w', encoding="utf-8")
f.write(jd)
f.close()
for page in range(220):
print('page--->'+str(page+1))
get_wymx(page+1)
|
996,948 | beb16ca46aa13561876e0cf0a986511359700788 | import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
titanic = sns.load_dataset('titanic')
titanic.head()
# Exercises
#
# ** Recreate the plots below using the titanic dataframe. There are very few hints since most of the plots can be done
# with just one or two lines of code and a hint would basically give away the solution. Keep careful attention
# to the x and y labels for hints.**
sns.jointplot(x = 'fare', y = 'age', data = titanic)
plt.show()
sns.displot(titanic['fare'], kde = False, bins = 30, color='red')
plt.show()
sns.boxplot(x='class', y='age', data=titanic)
plt.show()
sns.swarmplot(x='class', y='age', data=titanic)
plt.show()
sns.countplot(x='sex', data=titanic)
plt.show()
sns.heatmap(titanic.corr(), cmap='coolwarm')
plt.title('titanic corr')
plt.show()
g = sns.FacetGrid(data=titanic,col='sex')
g.map(plt.hist,'age')
plt.show()
|
996,949 | 1d4dc3b4ec8e5c4afa2cd08e9db1f5db6169165b | import re
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Type, cast
import fastjsonschema
from openslides_backend.models.base import model_registry
from openslides_backend.models.fields import (
BaseRelationField,
BaseTemplateField,
BooleanField,
CharArrayField,
CharField,
ColorField,
DecimalField,
Field,
FloatField,
GenericRelationField,
GenericRelationListField,
HTMLPermissiveField,
HTMLStrictField,
IntegerField,
JSONField,
NumberArrayField,
RelationField,
RelationListField,
TimestampField,
)
from openslides_backend.models.helper import calculate_inherited_groups_helper
from openslides_backend.models.models import Meeting, Model
from openslides_backend.shared.patterns import KEYSEPARATOR, Collection
SCHEMA = fastjsonschema.compile(
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Schema for initial and example data.",
"type": "object",
"patternProperties": {
"^[a-z_]+$": {
"type": "object",
"patternProperties": {
"^[1-9][0-9]*$": {
"type": "object",
"properties": {"id": {"type": "number"}},
"required": ["id"],
"additionalProperties": True,
}
},
"additionalProperties": False,
}
},
"additionalProperties": False,
}
)
class CheckException(Exception):
pass
def check_string(value: Any) -> bool:
return value is None or isinstance(value, str)
color_regex = re.compile("^#[0-9a-f]{6}$")
def check_color(value: Any) -> bool:
return value is None or bool(isinstance(value, str) and color_regex.match(value))
def check_number(value: Any) -> bool:
return value is None or type(value) == int
def check_float(value: Any) -> bool:
return value is None or type(value) in (int, float)
def check_boolean(value: Any) -> bool:
return value is None or value is False or value is True
def check_string_list(value: Any) -> bool:
return check_x_list(value, check_string)
def check_number_list(value: Any) -> bool:
return check_x_list(value, check_number)
def check_x_list(value: Any, fn: Callable) -> bool:
if value is None:
return True
if not isinstance(value, list):
return False
return all([fn(sv) for sv in value])
def check_decimal(value: Any) -> bool:
if value is None:
return True
if isinstance(value, str):
pattern = r"^-?(\d|[1-9]\d+)\.\d{6}$"
if re.match(pattern, value):
return True
return False
def check_json(value: Any, root: bool = True) -> bool:
if value is None:
return True
if not root and (isinstance(value, int) or isinstance(value, str)):
return True
if isinstance(value, list):
return all(check_json(x, root=False) for x in value)
if isinstance(value, dict):
return all(check_json(x, root=False) for x in value.values())
return False
checker_map: Dict[Type[Field], Callable[..., bool]] = {
CharField: check_string,
HTMLStrictField: check_string,
HTMLPermissiveField: check_string,
GenericRelationField: check_string,
IntegerField: check_number,
TimestampField: check_number,
RelationField: check_number,
FloatField: check_float,
BooleanField: check_boolean,
CharArrayField: check_string_list,
GenericRelationListField: check_string_list,
NumberArrayField: check_number_list,
RelationListField: check_number_list,
DecimalField: check_decimal,
ColorField: check_color,
JSONField: check_json,
}
class Checker:
modes = ("internal", "external", "all")
def __init__(
self,
data: Dict[str, Dict[str, Any]],
mode: str = "all",
is_partial: bool = False,
) -> None:
"""
The checker checks the data without access to datastore.
It differentiates between import data from the same organization instance,
typically using the meeting.clone action, or from another organization,
typically the meeting.import action with data from OS3.
To check all included collections, use 'all'. Typical usage is he check of
the example-data.json.
Mode:
external: checks that there are no relations to collections
outside the meeting, except users. The users must be included in data
and will be imported as new users
internal: assumes that all relations to collections outside
the meeting are valid, because the original instance is the same.
The integrity of this kind of relations is not checked, because there
is no database involved in command line version. Users are not included
in data, because they exist in same database.
all: All collections are valid and has to be in the data
is_partial=True disables the check, that *all* collections have to be
explicitly given, so a non existing (=empty) collection will not raise
an error. Additionally, missing fields (=None) are ok, if they are not
required nor have a default (so required fields or fields with defaults
must be present).
"""
self.data = data
self.is_partial = is_partial
self.mode = mode
self.models: Dict[str, Type["Model"]] = {
collection.collection: model_registry[collection]
for collection in model_registry
}
meeting_collections = [
"meeting",
"group",
"personal_note",
"tag",
"agenda_item",
"list_of_speakers",
"speaker",
"topic",
"motion",
"motion_submitter",
"motion_comment",
"motion_comment_section",
"motion_category",
"motion_block",
"motion_change_recommendation",
"motion_state",
"motion_workflow",
"motion_statute_paragraph",
"poll",
"option",
"vote",
"assignment",
"assignment_candidate",
"mediafile",
"projector",
"projection",
"projector_message",
"projector_countdown",
"chat_group",
"chat_message",
]
if self.mode == "all":
self.allowed_collections = [
"organization",
"user",
"resource",
"organization_tag",
"theme",
"committee",
] + meeting_collections
else:
self.allowed_collections = meeting_collections
# TODO: mediafile blob handling.
if self.mode == "external":
self.allowed_collections.append("user")
self.errors: List[str] = []
self.check_migration_index()
self.template_prefixes: Dict[
str, Dict[str, Tuple[str, int, int]]
] = defaultdict(dict)
self.generate_template_prefixes()
def check_migration_index(self) -> None:
if "_migration_index" in self.data:
migration_index = self.data.pop("_migration_index")
if (
not isinstance(migration_index, int)
or migration_index < -1
or migration_index == 0
):
self.errors.append(
f"The migration index is not -1 or >=1, but {migration_index}."
)
def get_fields(self, collection: str) -> Iterable[Field]:
return self.models[collection]().get_fields()
def generate_template_prefixes(self) -> None:
for collection in self.allowed_collections:
for field in self.get_fields(collection):
if not isinstance(field, BaseTemplateField):
continue
field_name = field.get_template_field_name()
parts = field_name.split("$")
prefix = parts[0]
suffix = parts[1]
if prefix in self.template_prefixes[collection]:
raise ValueError(
f"the template prefix {prefix} is not unique within {collection}"
)
self.template_prefixes[collection][prefix] = (
field_name,
len(prefix),
len(suffix),
)
def is_template_field(self, field: str) -> bool:
return "$_" in field or field.endswith("$")
def is_structured_field(self, field: str) -> bool:
return "$" in field and not self.is_template_field(field)
def is_normal_field(self, field: str) -> bool:
return "$" not in field
def make_structured(self, field: BaseTemplateField, replacement: Any) -> str:
if type(replacement) not in (str, int):
raise CheckException(
f"Invalid type {type(replacement)} for the replacement of field {field}"
)
return field.get_structured_field_name(replacement)
def to_template_field(
self, collection: str, structured_field: str
) -> Tuple[str, str]:
"""Returns template_field, replacement"""
parts = structured_field.split("$")
descriptor = self.template_prefixes[collection].get(parts[0])
if not descriptor:
raise CheckException(
f"Unknown template field for prefix {parts[0]} in collection {collection}"
)
return (
descriptor[0],
structured_field[descriptor[1] + 1 : len(structured_field) - descriptor[2]],
)
def run_check(self) -> None:
self.check_json()
self.check_collections()
for collection, models in self.data.items():
for id_, model in models.items():
if model["id"] != int(id_):
self.errors.append(
f"{collection}/{id_}: Id must be the same as model['id']"
)
self.check_model(collection, model)
if self.errors:
errors = [f"\t{error}" for error in self.errors]
raise CheckException("\n".join(errors))
def check_json(self) -> None:
try:
SCHEMA(self.data)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise CheckException(f"JSON does not match schema: {str(e)}")
def check_collections(self) -> None:
c1 = set(self.data.keys())
c2 = set(self.allowed_collections)
err = "Collections in file do not match with models.py."
if not self.is_partial and c2 - c1:
err += f" Missing collections: {', '.join(c2-c1)}."
raise CheckException(err)
if c1 - c2:
err += f" Invalid collections: {', '.join(c1-c2)}."
raise CheckException(err)
def check_model(self, collection: str, model: Dict[str, Any]) -> None:
errors = self.check_normal_fields(model, collection)
if not errors:
errors = self.check_template_fields(model, collection)
if not errors:
self.check_types(model, collection)
self.check_relations(model, collection)
self.check_calculated_fields(model, collection)
def check_normal_fields(self, model: Dict[str, Any], collection: str) -> bool:
model_fields = set(
x
for x in model.keys()
if self.is_normal_field(x) or self.is_template_field(x)
)
all_collection_fields = set(
field.get_own_field_name()
for field in self.models[collection]().get_fields()
)
required_or_default_collection_fields = set(
field.get_own_field_name()
for field in self.models[collection]().get_fields()
if field.required or field.default is not None
)
necessary_fields = (
required_or_default_collection_fields
if self.is_partial
else all_collection_fields
)
errors = False
if diff := necessary_fields - model_fields:
error = f"{collection}/{model['id']}: Missing fields {', '.join(diff)}"
self.errors.append(error)
errors = True
if diff := model_fields - all_collection_fields:
error = f"{collection}/{model['id']}: Invalid fields {', '.join(f'{field} (value: {model[field]})' for field in diff)}"
self.errors.append(error)
errors = True
for field in self.models[collection]().get_fields():
if (fieldname := field.get_own_field_name()) in model_fields:
try:
field.validate(model[fieldname], model)
except AssertionError as e:
error = f"{collection}/{model['id']}: {str(e)}"
self.errors.append(error)
errors = True
return errors
def check_template_fields(self, model: Dict[str, Any], collection: str) -> bool:
"""
Only checks that for each replacement a structured field exists and
not too many structured fields. Does not check the content.
Returns True on errors.
"""
errors = False
for template_field in self.get_fields(collection):
if not isinstance(template_field, BaseTemplateField):
continue
field_error = False
replacements = model.get(template_field.get_template_field_name())
if replacements is None:
replacements = []
if not isinstance(replacements, list):
self.errors.append(
f"{collection}/{model['id']}/{template_field.get_own_field_name()}: Replacements for the template field must be a list"
)
field_error = True
continue
for replacement in replacements:
if not isinstance(replacement, str):
self.errors.append(
f"{collection}/{model['id']}/{template_field.get_own_field_name()}: Each replacement for the template field must be a string"
)
field_error = True
if field_error:
errors = True
continue
replacement_collection = None
if template_field.replacement_collection:
replacement_collection = (
template_field.replacement_collection.collection
)
for replacement in replacements:
structured_field = self.make_structured(template_field, replacement)
if structured_field not in model:
self.errors.append(
f"{collection}/{model['id']}/{template_field.get_own_field_name()}: Missing {structured_field} since it is given as a replacement"
)
errors = True
if replacement_collection:
try:
as_id = int(replacement)
except (TypeError, ValueError):
self.errors.append(
f"{collection}/{model['id']}/{template_field.get_own_field_name()}: Replacement {replacement} is not an integer"
)
if not self.find_model(replacement_collection, as_id):
self.errors.append(
f"{collection}/{model['id']}/{template_field.get_own_field_name()}: Replacement {replacement} does not exist as a model of collection {replacement_collection}"
)
for field in model.keys():
if self.is_structured_field(field):
try:
_template_field, _replacement = self.to_template_field(
collection, field
)
if (
template_field.get_own_field_name() == _template_field
and _replacement
not in model[template_field.get_own_field_name()]
):
self.errors.append(
f"{collection}/{model['id']}/{field}: Invalid structured field. Missing replacement {_replacement} in {template_field.get_own_field_name()}"
)
errors = True
except CheckException as e:
self.errors.append(
f"{collection}/{model['id']}/{field} error: " + str(e)
)
errors = True
return errors
def check_types(self, model: Dict[str, Any], collection: str) -> None:
for field in model.keys():
if self.is_template_field(field):
continue
field_type = self.get_type_from_collection(field, collection)
enum = self.get_enum_from_collection_field(field, collection)
checker: Optional[Callable[..., bool]] = None
for _type in type(field_type).mro():
if _type in checker_map:
checker = checker_map[_type]
break
else:
raise NotImplementedError(
f"TODO implement check for field type {field_type}"
)
if not checker(model[field]):
error = f"{collection}/{model['id']}/{field}: Type error: Type is not {field_type}"
self.errors.append(error)
# check if required field is not empty
# committee_id is a special case, because it is filled after the
# replacement
# is_active_in_organization_id is also skipped, see PR #901
skip_fields = (Meeting.committee_id, Meeting.is_active_in_organization_id)
if (
field_type.required
and field_type.check_required_not_fulfilled(model, False)
and field_type not in skip_fields
):
error = f"{collection}/{model['id']}/{field}: Field required but empty."
self.errors.append(error)
if enum and model[field] not in enum:
error = f"{collection}/{model['id']}/{field}: Value error: Value {model[field]} is not a valid enum value"
self.errors.append(error)
def get_type_from_collection(self, field: str, collection: str) -> Field:
if self.is_structured_field(field):
field, _ = self.to_template_field(collection, field)
field_type = self.models[collection]().get_field(field)
return field_type
def get_enum_from_collection_field(
self, field: str, collection: str
) -> Optional[Set[str]]:
if self.is_structured_field(field):
field, _ = self.to_template_field(collection, field)
field_type = self.models[collection]().get_field(field)
return field_type.constraints.get("enum")
def check_relations(self, model: Dict[str, Any], collection: str) -> None:
for field in model.keys():
try:
self.check_relation(model, collection, field)
except CheckException as e:
self.errors.append(
f"{collection}/{model['id']}/{field} error: " + str(e)
)
def check_relation(
self, model: Dict[str, Any], collection: str, field: str
) -> None:
if self.is_template_field(field):
return
field_type = self.get_type_from_collection(field, collection)
basemsg = f"{collection}/{model['id']}/{field}: Relation Error: "
replacement = None
if self.is_structured_field(field):
_, replacement = self.to_template_field(collection, field)
if isinstance(field_type, RelationField):
foreign_id = model[field]
if not foreign_id:
return
foreign_collection, foreign_field = self.get_to(field, collection)
if foreign_collection in self.allowed_collections:
self.check_reverse_relation(
collection,
model["id"],
model,
foreign_collection,
foreign_id,
foreign_field,
basemsg,
replacement,
)
elif self.mode == "external":
self.errors.append(
f"{basemsg} points to {foreign_collection}/{foreign_id}, which is not allowed in an external import."
)
elif isinstance(field_type, RelationListField):
foreign_ids = model[field]
if not foreign_ids:
return
foreign_collection, foreign_field = self.get_to(field, collection)
if foreign_collection in self.allowed_collections:
for foreign_id in foreign_ids:
self.check_reverse_relation(
collection,
model["id"],
model,
foreign_collection,
foreign_id,
foreign_field,
basemsg,
replacement,
)
elif self.mode == "external":
self.errors.append(
f"{basemsg} points to {foreign_collection}/foreign_id, which is not allowed in an external import."
)
elif isinstance(field_type, GenericRelationField) and model[field] is not None:
foreign_collection, foreign_id = self.split_fqid(model[field])
foreign_field = self.get_to_generic_case(
collection, field, foreign_collection
)
if foreign_collection in self.allowed_collections:
self.check_reverse_relation(
collection,
model["id"],
model,
foreign_collection,
foreign_id,
foreign_field,
basemsg,
replacement,
)
elif self.mode == "external":
self.errors.append(
f"{basemsg} points to {foreign_collection}/{foreign_id}, which is not allowed in an external import."
)
elif (
isinstance(field_type, GenericRelationListField)
and model[field] is not None
):
for fqid in model[field]:
foreign_collection, foreign_id = self.split_fqid(fqid)
foreign_field = self.get_to_generic_case(
collection, field, foreign_collection
)
if foreign_collection in self.allowed_collections:
self.check_reverse_relation(
collection,
model["id"],
model,
foreign_collection,
foreign_id,
foreign_field,
basemsg,
replacement,
)
elif self.mode == "external":
self.errors.append(
f"{basemsg} points to {foreign_collection}/{foreign_id}, which is not allowed in an external import."
)
elif collection == "motion" and field == "recommendation_extension":
RECOMMENDATION_EXTENSION_REFERENCE_IDS_PATTERN = re.compile(
r"\[(?P<fqid>\w+/\d+)\]"
)
recommendation_extension = model["recommendation_extension"]
if recommendation_extension is None:
recommendation_extension = ""
possible_rerids = RECOMMENDATION_EXTENSION_REFERENCE_IDS_PATTERN.findall(
recommendation_extension
)
for fqid_str in possible_rerids:
re_collection, re_id_ = fqid_str.split(KEYSEPARATOR)
if re_collection != "motion":
self.errors.append(
basemsg + f"Found {fqid_str} but only motion is allowed."
)
if not self.find_model(re_collection, int(re_id_)):
self.errors.append(
basemsg
+ f"Found {fqid_str} in recommendation_extension but not in models."
)
def get_to(self, field: str, collection: str) -> Tuple[str, Optional[str]]:
if self.is_structured_field(field):
field, _ = self.to_template_field(collection, field)
field_type = cast(BaseRelationField, self.models[collection]().get_field(field))
return (
field_type.get_target_collection().collection,
field_type.to.get(field_type.get_target_collection()),
)
def check_calculated_fields(
self,
model: Dict[str, Any],
collection: str,
) -> None:
if collection != "mediafile":
return
access_group_ids = model["access_group_ids"]
parent_is_public = None
parent_inherited_access_group_ids = None
if model.get("parent_id"):
parent = self.find_model(collection, model["parent_id"])
# relations are checked beforehand, so parent always exists
assert parent
parent_is_public = parent["is_public"]
parent_inherited_access_group_ids = parent["inherited_access_group_ids"]
is_public, inherited_access_group_ids = calculate_inherited_groups_helper(
access_group_ids, parent_is_public, parent_inherited_access_group_ids
)
if is_public != model["is_public"]:
self.errors.append(
f"{collection}/{model['id']}: is_public is wrong. {is_public} != {model['is_public']}"
)
if set(inherited_access_group_ids) != set(
model["inherited_access_group_ids"] or []
):
self.errors.append(
f"{collection}/{model['id']}: inherited_access_group_ids is wrong"
)
def find_model(self, collection: str, id: int) -> Optional[Dict[str, Any]]:
return self.data.get(collection, {}).get(str(id))
def check_reverse_relation(
self,
collection: str,
id: int,
model: Dict[str, Any],
foreign_collection: str,
foreign_id: int,
foreign_field: Optional[str],
basemsg: str,
replacement: Optional[str],
) -> None:
if foreign_field is None:
raise ValueError("Foreign field is None.")
foreign_field_type = self.get_type_from_collection(
foreign_field, foreign_collection
)
actual_foreign_field = foreign_field
if self.is_template_field(foreign_field):
if replacement:
actual_foreign_field = cast(
BaseTemplateField, foreign_field_type
).get_structured_field_name(replacement)
else:
replacement_collection = cast(
BaseTemplateField, foreign_field_type
).replacement_collection
if replacement_collection:
replacement = model.get(f"{replacement_collection.collection}_id")
if not replacement:
self.errors.append(
f"{basemsg} points to {foreign_collection}/{foreign_id}/{foreign_field},"
f" but there is no replacement for {replacement_collection}"
)
actual_foreign_field = self.make_structured(
cast(BaseTemplateField, foreign_field_type), replacement
)
foreign_model = self.find_model(foreign_collection, foreign_id)
foreign_value = (
foreign_model.get(actual_foreign_field)
if foreign_model is not None
else None
)
fqid = f"{collection}/{id}"
error = False
if isinstance(foreign_field_type, RelationField):
error = foreign_value != id
elif isinstance(foreign_field_type, RelationListField):
error = not foreign_value or id not in foreign_value
elif isinstance(foreign_field_type, GenericRelationField):
error = foreign_value != fqid
elif isinstance(foreign_field_type, GenericRelationListField):
error = not foreign_value or fqid not in foreign_value
else:
raise NotImplementedError()
if error:
self.errors.append(
f"{basemsg} points to {foreign_collection}/{foreign_id}/{actual_foreign_field},"
" but the reverse relation for it is corrupt"
)
def split_fqid(self, fqid: str) -> Tuple[str, int]:
try:
collection, _id = fqid.split("/")
id = int(_id)
if self.mode == "external" and collection not in self.allowed_collections:
raise CheckException(f"Fqid {fqid} has an invalid collection")
return collection, id
except (ValueError, AttributeError):
raise CheckException(f"Fqid {fqid} is malformed")
def split_collectionfield(self, collectionfield: str) -> Tuple[str, str]:
collection, field = collectionfield.split("/")
if collection not in self.allowed_collections:
raise CheckException(
f"Collectionfield {collectionfield} has an invalid collection"
)
if field not in [
field.get_own_field_name()
for field in self.models[collection]().get_fields()
]:
raise CheckException(
f"Collectionfield {collectionfield} has an invalid field"
)
return collection, field
def get_to_generic_case(
self, collection: str, field: str, foreign_collection: str
) -> str:
"""Returns all reverse relations as collectionfields"""
to = cast(BaseRelationField, self.models[collection]().get_field(field)).to
if isinstance(to, dict):
if Collection(foreign_collection) not in to.keys():
raise CheckException(
f"The collection {foreign_collection} is not supported "
"as a reverse relation in {collection}/{field}"
)
return to[Collection(foreign_collection)]
for cf in to:
c, f = self.split_collectionfield(cf.collection)
if c == foreign_collection:
return f
raise CheckException(
f"The collection {foreign_collection} is not supported as a reverse relation in {collection}/{field}"
)
|
996,950 | c204ea4360025bdcb57aab4dcd2e14e8a9d54244 | from __future__ import print_function
import piplates.RELAYplate as RELAY
import piplates.DAQCplate as DAQC
import time
ppADDR=1
ADCchan=0
print('reading adc channel', ADCchan)
while True:
adcread = DAQC.getADC(ppADDR, ADCchan)
print("ADC reading #, val: ", ADCchan, adcread)
time.sleep(1.0)
ADCchan = ADCchan + 1
if ADCchan > 7:
ADCchan = 0
|
996,951 | dd27eaf628372321fc50146b7596ca90554bee81 | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from math import floor
from skimage import transform as tf
from numpy.linalg import norm
from numpy.linalg import inv
from util import *
#The local affine Algorithm
def local_affine(ori_img,proto_img,e,regions,is_in_regions,distance_funs,affine_funs):
new_img = np.zeros(proto_img.shape)
for i in range(new_img.shape[0]):
for j in range(new_img.shape[1]):
tmp_point = np.array([i,j])
flag = is_in_regions(tmp_point,regions)
if flag >= 0 :
#When the points is in V_i
affine_point = affine_funs[flag](tmp_point)
new_img[i][j] = ori_img[int(affine_point[0])][int(affine_point[1])]
else:
#When the points is not in V_i
weights = weight(tmp_point,distance_funs,e)
#Compute the new position
affine_point = transform(np.array([i,j]),weights,affine_funs)
#Compute the value of the pixel value
new_img[i][j] = linear_interpolation(affine_point,ori_img)
return new_img
#Preprocess the data
def preprocess(ori_path,proto_path,ori_points_path,proto_points_path,regions_path,distance_item):
ori_img = Image.open(ori_path)
proto_img = Image.open(proto_path)
ori_img = np.array(ori_img)
proto_img = np.array(proto_img)
a = np.array([1.0*ori_img.shape[0]/proto_img.shape[0],1.0*ori_img.shape[1]/proto_img.shape[1]])
a = np.array([1,1])
#Load control points data
try:
proto_dict = load_data(proto_points_path)
ori_dict = load_data(ori_points_path)
except BaseException:
return ('The control points format is not correct,please change it'),None
#Load the regions data we want to use
try:
regions = load_region(regions_path)
except BaseException:
return ('The control regions choose is not correct,please change it'),None
#For plot the control points on the face which is useless for the GUI and algorithms
ori_dict_plot = {}
proto_dict_plot = {}
for region in regions:
ori_tmp = []
proto_tmp = []
for key in region:
if key not in ori_dict or key not in proto_dict:
return ('The control points format is not correct,please change it'),None
ori_tmp.append(ori_dict[key])
proto_tmp.append(proto_dict[key])
ori_dict_plot[','.join(region)] = ori_tmp
proto_dict_plot[','.join(region)] = proto_tmp
#Change the dictionary data to list data
regions_points = []
q_regions_points = []
p_regions_points = []
affine_funs = []
affine_dict = {}
distance_funs = []
for i,keys in enumerate(regions):
src = []
dst = []
for key in keys:
if key not in ori_dict or key not in proto_dict:
return ('The control regions choose is not correct,please change it'),None
affine_dict[str(proto_dict[key])] = ori_dict[key]
src.append(proto_dict[key])
dst.append(ori_dict[key])
#For different type of regions do different actions
if len(keys) == 1:
regions_points.append(src)
p_regions_points.append(src[0])
q_regions_points.append(dst[0])
affine_funs.append(linear_affine_fun(np.array(dst[0])-np.array(src[0])))
distance_funs.append(distance_fun(src,distance_item))
elif len(keys) == 2:
n=3
if n < 0:
regions_points.append(src)
affine_funs.append(affine_fun(np.array(src),np.array(dst)))
distance_funs.append(distance_fun(src,distance_item))
else:
src_aug_points = line_points(src[0],src[1],n)
dst_aug_points = line_points(dst[0],dst[1],n)
n = n+1
for i in range(n):
src_tmp = src_aug_points[i:(i+2)]
dst_tmp = dst_aug_points[i:(i+2)]
regions_points.append(src_tmp)
affine_funs.append(similarity_fun(np.array(src_tmp),np.array(dst_tmp)))
distance_funs.append(distance_fun(src_tmp,distance_item))
elif len(keys) == 3:
regions_points.append(src)
affine_funs.append(affine_fun(np.array(src),np.array(dst)))
distance_funs.append(distance_fun(src,distance_item))
return (ori_img,proto_img,regions_points,is_in_regions_fun,distance_funs,affine_funs),(ori_dict_plot,proto_dict_plot,q_regions_points,p_regions_points)
|
996,952 | d9057b3347ad8f84cb7e59a37c3f633e709cfe17 | from PIL import Image, ImageDraw, ImageFont
class ImageText:
"""
ImageText
:param kwargs: strings, font, color, image_width, image_height, image_padding
strings: list of strings
font: string path to font
color: RGB tuple
image_width: integer
image_height: integer
image_padding: integer
Required: font
"""
def __init__(self, **kwargs):
strings = kwargs.get('strings', list())
self._font = kwargs.get('font', None)
if not self._font:
raise TypeError("Missing required field 'font'")
self._color = kwargs.get('color', (0, 0, 0))
self._image_width = kwargs.get('image_width', 250)
self._image_height = kwargs.get('image_height', 250)
self._image_padding = kwargs.get('image_padding', 0)
self._text = "\n".join(filter(None, strings)).rstrip()
self._font_size = self.__calculate_font_size()
self._image_font = ImageFont.truetype(self._font, self._font_size)
self._aggregate_height = sum([self.__calculate_string_size(self._image_font, s)[1] for s in strings])
self._strings_detail = list()
previous_str_size = (0, 0)
for s in strings:
size = self.__calculate_string_size(self._image_font, s)
start_x = (self._image_padding + (self._image_width - size[0]) / 2) - self._image_padding
start_y = ((self._image_height - self._aggregate_height) / 2) + previous_str_size[1]
self._strings_detail.append((s, size, (start_x, start_y)))
previous_str_size = size
def draw_text(self, drawer):
"""
Draw text to image.
:param drawer: ImageDraw object
:return: void
"""
for s_details in self._strings_detail:
drawer.text((s_details[2][0], s_details[2][1]), s_details[0],
font=self._image_font, fill=self._color, align='center')
def draw_mask_placeholder(self, drawer):
"""
Draw the placeholder for text
:param drawer: ImageDraw object
:return: void
"""
for s_details in self._strings_detail:
end_x = s_details[2][0] + s_details[1][0] + 5 # 5 px for bottom padding
end_y = s_details[2][1] + s_details[1][1] + 5 # 5 px for bottom padding
drawer.rectangle((s_details[2], (end_x, end_y)), fill=(255, 255, 255))
def get_font(self):
"""
Return Font object
:return: PIL Font object
"""
return self.__get_font(self._font_size)
def __calculate_font_size(self):
start_font_size = 10
max_width = self._image_width - (self._image_padding * 2)
# Check font size on each iteration until the width is too long for image.
# Once its too large, return the previous value.
while True:
font_size = start_font_size + 1
font = ImageFont.truetype(self._font, font_size)
string_size = self.__calculate_string_size(font, self._text)
if string_size[0] > max_width:
return start_font_size
start_font_size += 1
def __calculate_string_size(self, font, string):
img = Image.new('RGB', (self._image_width, self._image_height))
draw = ImageDraw.Draw(img)
return draw.textsize(string, font)
def __get_font(self, font_size):
return ImageFont.truetype(self._font, font_size)
|
996,953 | d2f5ccd8f2a28a2a060e4e85035cc432ba0e27b0 | """
Copyright 2013 LogicBlox, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
Neither the name of LogicBlox nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
"""
import unittest
import sys
import os
import socket
import blox.connect.ConnectBloxAsync_pb2
DEFAULT_ADMIN_PORT = 55182
class AsyncConnection:
"""ConnectBlox Async Connection"""
def __init__(self):
self.port = os.getenv('LB_CONNECTBLOX_ASYNC_SERVER_ADMIN_PORT', DEFAULT_ADMIN_PORT)
if not isinstance(self.port, int) and not self.port.isdigit():
raise RuntimeError("Connection port must be an integer but is %s" % self.port)
self.port = int(self.port)
self.host = "localhost"
self.reqid = 0
self.response_buffer = {}
# returns response message
def call(self, req):
request_id = self.send(req)
response_id, response = self.receive_next()
if response_id != request_id:
raise RuntimeError("request/response id mismatch")
return response
# returns a request_id
def send(self, msg):
txt = msg.SerializeToString()
self.reqid = self.reqid + 1;
self.sendsize(self.reqid)
self.sendsize(len(txt))
self.sock.sendall(txt)
return self.reqid
# returns a tuple of response_id and message
def receive_next(self):
response = blox.connect.ConnectBloxAsync_pb2.AsyncAdminResponse()
response_id = self.readsize()
msglen = self.readsize()
serialized = self.receiveall(msglen)
response.ParseFromString(serialized)
return (response_id, response)
def receiveall(self, msglen):
msg = []
while msglen:
chunk = self.sock.recv(msglen)
if len(chunk) == 0:
raise RuntimeError("socket connection broken")
msg.append(chunk)
msglen -= len(chunk)
return "".join(msg)
def sendsize(self, x):
b1 = ((x >> 24) & 0xff)
b2 = ((x >> 16) & 0xff)
b3 = ((x >> 8) & 0xff)
b4 = ((x >> 0) & 0xff)
b = bytearray([b1, b2, b3, b4])
self.sock.sendall(b)
def readsize(self):
s = self.receiveall(4)
b = bytearray(s)
return ((b[0] & 0xff) << 24) | ((b[1] & 0xff) << 16) | ((b[2] & 0xff) << 8) | ((b[3] & 0xff) << 0);
def open(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
996,954 | fe1279a76a73730a15b60638e40e182b25ba470a | from math import sqrt
def suite(n):
#Initialisation
#i = 0
u = 1
for i in range(n):
u = sqrt(1+u)
return u
|
996,955 | 42e7b6ea5d66afec87d04482f7f54589421571f1 | #!/usr/bin/env python3
import sys
def input():
return sys.stdin.readline()[:-1]
def factorization(n):
arr = []
temp = n
for i in range(2, int(-(-n**0.5//1))+1):
if temp%i==0:
cnt=0
while temp%i==0:
cnt+=1
temp //= i
arr.append([i, cnt])
if temp!=1:
arr.append([temp, 1])
if arr==[]:
arr.append([n, 1])
return arr
def main():
N = int(input())
ans = 1e18
for i in range(1, N + 1):
if i * i > N:
break
if N % i != 0:
continue
j = N // i
ans = min(ans, i + j - 2)
print(ans)
if __name__ == '__main__':
main()
|
996,956 | 001a6effcb0534f2f3cf51555a54acbf44c9d3c0 | """Solution to task 4 from lesson 10."""
def dict_with_attrs(*args):
"""Return class extended from dict with predefined attributes."""
class CustomDict(dict):
__slots__ = args
def __init__(self, *args, **kwargs):
super(CustomDict, self).__init__(*args)
for k, v in kwargs.iteritems():
setattr(self, k, v)
return CustomDict
def dict_with_attrs2(*args):
"""Returns class with predefined attributes that's behaves like dict."""
class CustomDict(object):
__slots__ = args
__dict__ = {}
def __init__(self, *args, **kwargs):
super(CustomDict, self).__init__()
if args:
self.__dict__.update(*args)
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
def __delitem__(self, key):
del self.__dict__[key]
def __getattr__(self, name):
return self.__dict__[name]
return CustomDict
def main():
Test = dict_with_attrs('test', 'other')
d = Test({'a': 1}, test='test')
print d['a']
print d.test
d.other = 'Hey!'
d[10] = 11
print d[10]
# # This shall fails:
d.unknown = 42
if __name__ == '__main__':
main()
|
996,957 | 2108929ea10a2413c6dfaf6e803a2a281022ac70 | def cantidad_jugadas(matriz):
total = 0
for i in matriz:
fila = i
for j in fila:
if(j!=""):
total = total + 1
return total
def imprimir_matriz(matriz):
for i in matriz:
fila = i
print("|",end="")
for j in fila:
if j != '':
print(j,end="")
else:
print(" ",end="")
print("|")
def actualizar_jugada(matriz,fila,col,caracter):
matriz[fila][col] = caracter
triqui = list()
triqui.append(['','',''])
triqui.append(['','',''])
triqui.append(['','',''])
print(cantidad_jugadas(triqui))
actualizar_jugada(triqui,0,0,"X")
actualizar_jugada(triqui,1,0,"X")
imprimir_matriz(triqui)
print(cantidad_jugadas(triqui))
actualizar_jugada(triqui,2,2,"O")
print(cantidad_jugadas(triqui))
imprimir_matriz(triqui)
# Crear una funcion, que asigne una jugada aleatoria |
996,958 | 59143c3ed347577d3484a411fe2493e1742f71fe | from ControlActions import *
from Gestures import *
class ConfigReader:
def __init__(self):
pass
@classmethod
def fromPath(cls, path):
return {}
@classmethod
def default(cls):
return {
PALM: MOVE,
FIST: LEFT_CLICK,
KNIFE: ESCAPE,
ZERO: RIGHT_CLICK,
NO_GST: NO_ACTION,
} |
996,959 | 8a807addb0ee5426979468bebeeaa57a4be58457 | from plenum.test.helper import sdk_send_random_request, \
sdk_send_random_requests, sdk_get_and_check_replies, sdk_send_random_and_check
from plenum.test.pool_transactions.helper import sdk_pool_refresh
def test_sdk_pool_handle(sdk_pool_handle):
ph = sdk_pool_handle
assert ph > 0
def test_sdk_wallet_handle(sdk_wallet_handle):
wh = sdk_wallet_handle
assert wh > 0
def test_sdk_trustee_wallet(sdk_wallet_trustee):
wh, tr_did = sdk_wallet_trustee
assert wh > 0
assert tr_did
def test_sdk_steward_wallet(sdk_wallet_steward):
wh, st_did = sdk_wallet_steward
assert wh > 0
assert st_did
def test_sdk_client_wallet(sdk_wallet_client):
wh, cl_did = sdk_wallet_client
assert wh > 0
assert cl_did
def test_sdk_new_client_wallet(sdk_wallet_new_client):
wh, cl_did = sdk_wallet_new_client
assert wh > 0
assert cl_did
def test_sdk_new_steward_wallet(sdk_wallet_new_steward):
wh, cl_did = sdk_wallet_new_steward
assert wh > 0
assert cl_did
def test_sdk_trustee_send(looper, sdk_pool_handle, sdk_wallet_trustee):
resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_trustee)
_, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0]
assert j_resp['result']
def test_sdk_steward_send(looper, sdk_pool_handle, sdk_wallet_steward):
resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_steward)
_, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0]
assert j_resp['result']
def test_sdk_client_send(looper, sdk_pool_handle, sdk_wallet_client):
resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
_, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0]
assert j_resp['result']
def test_sdk_client2_send(looper, sdk_pool_handle, sdk_wallet_client2):
resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client2)
_, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0]
assert j_resp['result']
def test_sdk_new_client_send(looper, sdk_pool_handle, sdk_wallet_new_client):
resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_new_client)
_, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0]
assert j_resp['result']
def test_sdk_new_steward_send(looper, sdk_pool_handle, sdk_wallet_new_steward):
resp_task = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_new_steward)
_, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0]
assert j_resp['result']
def test_sdk_steward_send_many(looper, sdk_pool_handle, sdk_wallet_steward):
resp_task = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_steward, 30)
repl = sdk_get_and_check_replies(looper, resp_task)
for _, resp in repl:
assert resp['result']
def test_sdk_pool_refresh(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
sdk_pool_refresh(looper, sdk_pool_handle)
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 1)
|
996,960 | 0ed6d37d6ba086e300438db56a6c13f65e368da3 | def symbolToNumber(symbol):
if symbol=="A":
return 0
elif symbol=="C":
return 1
elif symbol =="G":
return 2
elif symbol=="T":
return 3
def patternToNumber(pattern):
if pattern =="":
return 0
count = 0
symbol = pattern[len(pattern)-1]
prefix = pattern[:len(pattern)-1]
# print(prefix)
count += 4*patternToNumber(prefix)+symbolToNumber(symbol)
# print(count)
return count
def Frequencies(text , k):
string = []
frequency = [0]*(4**k)
for i in range(len(text)-k+1):
pattern = text[i:i+k]
# pattern1 = "".join(ReverseComplement(text[i:i+k]))
temp1 = patternToNumber(pattern)
# temp2 = patternToNumber(pattern1)
frequency[temp1] += 1
return frequency
def NumberToSymbol(num):
if num == 0:
return "A"
elif num == 1:
return "C"
elif num == 2:
return "G"
elif num == 3:
return "T"
def NumberToPattern(index , k):
array = []
if k==1:
return NumberToSymbol(index)
prefixIndex = index//4
r = index%4
symbol = NumberToSymbol(r)
prefixPattern = NumberToPattern(prefixIndex , k-1)
array+=[symbol]
array+=prefixPattern
return array
def ClumpFinding(Genome, k, L, t):
freqPattern = []
clump = [0]*(4**k)
for i in range(len(Genome)-k+1):
text = Genome[i:i+L]
freqArray = Frequencies(text , k)
for i in range(4**k):
if freqArray[i] >= t:
clump[i] = 1
for i in range(4**k):
if clump[i] == 1:
pattern = NumberToPattern(i , k)
freqPattern+=[pattern]
return freqPattern
genome = input()
k , L , t = map(int , input().split())
temp = ClumpFinding(genome , k , L , t)
array = []
for i in range(len(temp)):
array += ["".join(reversed(temp[i]))]
print(*array , sep=" ") |
996,961 | e200cd7f9f1f5beb015ccf700e35909166053b82 | from skimage.feature import hog
from HSV import *
from scipy.ndimage import filters
def grayScale_feature(img_name, image_size):
return(array(Image.open(img_name).convert('L').resize(image_size)).flatten())
def HOG_feature(img_name, image_size):
img_PIL = Image.open(img_name).resize(image_size)
imgBW = array(img_PIL.convert('L'))
fd = hog(imgBW, orientations=8, pixels_per_cell=(16, 16),cells_per_block=(1, 1))
return fd[:,newaxis].flatten()
def Log_Sobel_feature(img_name, image_size):
im = array(Image.open(img_name).resize(image_size).convert('L'))
imx = zeros(im.shape)
filters.sobel(im,1,imx)
imy = zeros(im.shape)
filters.sobel(im,0,imy)
return log(sqrt(imx**2 + imy**2).flatten()+100)
def Sobel_feature(img_name, image_size):
im = array(Image.open(img_name).resize(image_size).convert('L'))
imx = zeros(im.shape)
filters.sobel(im,1,imx)
imy = zeros(im.shape)
filters.sobel(im,0,imy)
return sqrt(imx**2 + imy**2).flatten()
def H_feature(img_name, image_size):
HSV = array(convert_my_hsv(Image.open(img_name).resize(image_size)))
return HSV[0,:,:].flatten()
def S_feature(img_name, image_size):
HSV = array(convert_my_hsv(Image.open(img_name).resize(image_size)))
return HSV[1,:,:].flatten()
def V_feature(img_name, image_size):
HSV = array(convert_my_hsv(Image.open(img_name).resize(image_size)))
return HSV[2,:,:].flatten()
def HSV_feature(img_name, image_size):
return array(convert_my_hsv(Image.open(img_name).resize(image_size)))[:,:,:].flatten()
|
996,962 | d937005dcb00b5cb103fd2190c279eec0fbd1bf0 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Jonny"
# Email: jonnysps@yeah.net
# Date: 2017/10/13
import requests
def func_t():
headers = {'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3408.400 QQBrowser/9.6.12028.400',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Content-Type': 'application/x-www-form-urlencoded'}
url ="http://127.0.0.1:1080/WebTours/login.pl"
data = {'username': 'jonny10011',
'password': 'jy123456',
'passwordConfirm': 'jy123456',
'firstName': 'jonny3',
'lastName': 'ps3',
'address1': 'Street + Address + no10003',
'address2': 'CityStateZipno10003',
'register.x': '52',
'register.y': '8'
}
re=requests.post(url,data=data,headers=headers)
print(re.text)
a=func_t()
print(a) |
996,963 | d5c42faf674e0f049c3fed61c4acc7399cfba893 | from bs4 import BeautifulSoup
import requests
import re
import os
class GetPics:
def __init__(self, url, post_number):
self.url = url
self.post_number = post_number
self.img_number = 1
def getpic(self):
raw_page = requests.get(self.url)
raw_page.encoding = 'utf-8'
soup = BeautifulSoup(raw_page.text, 'html.parser')
img_tags = soup.find_all('img', file=re.compile("attachments/.*?\.jpg"))
describe_tag = soup.find('td',class_='t_msgfont')
post_title = soup.find('title')
if not os.path.exists('posts/{}'.format(self.post_number)):
os.makedirs('posts/{}'.format(self.post_number))
f = open('posts/{}/{}.txt'.format(self.post_number, self.post_number), 'w', encoding='utf-8')
f.write(self.url+'\n')
f.write(post_title.text+'\n\n\n\n\t\t\t')
for string in describe_tag.stripped_strings:
f.write(string+'\n')
f.close()
for img_tag in img_tags:
img_url = img_tag.get('file')
real_url = 'http://91.t9p.today/'+img_url
raw_img = requests.get(real_url)
img_file = open('posts/{}/{}.jpg'.format(self.post_number, self.img_number), 'wb')
img_file.write(raw_img.content)
print('downing {}'.format(self.img_number))
img_file.close()
self.img_number +=1
|
996,964 | fb8b383b59d1a2f457ebb9aef283d36f76fb0de3 | import os
folder = os.path.realpath('.')
import numpy
import math
import pylab
import scipy
import scipy.special
datafile = 'Campo_magnetico.txt'
rawdata = numpy.loadtxt(os.path.join(folder, datafile)).T
V = rawdata[0]
dV = rawdata[1]
t = 4
dt = 1
r = rawdata[2]
dr = rawdata[3]
I = rawdata[4]
dI = rawdata[5]
e = 1.6*10**(-19)
n = 4
Bi = (e*n*t*V)/(11.1*I)
dBi = Bi*pylab.sqrt((dt/t)**2+(dV/V)**2+(0.1/11.1)**2+(dI/I)**2)
B = Bi/(7.80*I*(10**(-4)))
dB = B*pylab.sqrt((dBi/Bi)**2+(dI/I)**2)
#Grafico
pylab.figure(1)
pylab.xlabel('r [cm]', size = 22)
pylab.ylabel('Bz/Bzmax', size = 22)
pylab.title('Andamento del campo magnetico', size = 20)
pylab.grid(color='gray')
pylab.errorbar(r,B,dB,dr,linestyle='', color = 'black')
pylab.tight_layout
pylab.minorticks_on
pylab.show()
#PARTE 2
Icoil = 5
dIcoil = 1
Vacc = 4
dVacc = 3
datafile = 'Cerchio.txt'
rawdata = numpy.loadtxt(os.path.join(folder, datafile)).T
x = rawdata[0]
dx = rawdata[1]
y = rawdata[2]
dy = rawdata[3]
a = b = numpy.empty(len(x))
i = 0
for i in range(0,len(x)-1):
a[i] = (y[i+1]-y[i])/(x[i+1]-x[i])
b[i] = (y[i+1]+y[i])/2+(1/(2*a[i]))*(x[i+1]+x[i])
xc = yc = numpy.empty(len(a))
i = 0
for i in range(0,len(a)-1):
xc[i] = a[i]*a[i+1]*(b[i+1]-b[i])/(a[i]-a[i+1])
yc[i] = (-xc[i]/a[i])+b[i]
i = 0
d = numpy.empty(len(xc))
for i in range(0,math.floor((1/3)*len(xc))):
print ('Coordinate dei centri')
print (xc[3*i], yc[3*i])
d[3*i] = pylab.sqrt((x[3*i]-xc[3*i])**2+(y[3*i]-yc[3*i])**2)
d[3*i+1] = pylab.sqrt((x[3*i+1]-xc[3*i])**2+(y[3*i+1]-yc[3*i])**2)
d[3*i+2] = pylab.sqrt((x[3*i+2]-xc[3*i])**2+(y[3*i+2]-yc[3*i])**2)
media=numpy.empty(len(d)/3)
disp=numpy.empty(len(d)/3)
i=0
while i <len(d):
a=(d[i]+d[i+1]+d[i+2])/3
media[(1/3)*i]=a
d[i:i+3].sort()
disp[(1/3)*i]=d[i+2]-d[i]
d=d+3
R=media
dR = disp
V = 6.0
dV = 0.1
datafile = 'EM.txt'
rawdata = numpy.loadtxt(os.path.join(folder, datafile)).T
Vacc = rawdata[0]
dVacc = rawdata[1]
Icoil = rawdata[2]
dIcoil = rawdata[3]
Bz = (e*n*t*Vacc)/(11.1*Icoil)
dBz = Bz*pylab.sqrt((dt/t)**2+(dVacc/Vacc)**2+(0.1/11.1)**2+(dIcoil/Icoil)**2)
k = (Bz*R)**2
dk = 2*k*pylab.sqrt((dBz/Bz)**2+(dR/R)**2)
pylab.figure(2)
pylab.xlabel('Vacc [V]', size = 22)
pylab.ylabel('(Bz*R)^2', size = 22)
pylab.title('Elettroni accelerati', size = 20)
pylab.grid(color='gray')
pylab.errorbar(R,k,dk,dr,linestyle='', color = 'black')
pylab.tight_layout
pylab.minorticks_on
pylab.show() |
996,965 | f7c98ebd702d8960e9fe81650ed7a5323f9b9b06 | import random
import pygame
from data.Resources_Loading_File import SONG_ONE
# from data.Resources_Loading_File import SONG_TWO
# Random background music
# Music lives forever
next_song = None
stop_music = False
# TODO: Maybe add some new songs
# All the songs that can be played
background_songs = [
SONG_ONE,
# SONG_TWO
]
# The actual function that plays the music
def play_random_songs():
global next_song
if pygame.mixer.music.get_busy():
pass
else:
next_song = random.choice(background_songs) # Chooses a random song
pygame.mixer.music.load(next_song)
pygame.mixer.music.play()
|
996,966 | 5f1ff4800b944dff50f53852e2f0c527294d4737 | from keras.layers import Input, Dense
from keras.models import Model
from rl.core.value_function import NeuralNetStateMachineActionValueFunction
class AntActionValueFunction(NeuralNetStateMachineActionValueFunction):
def __init__(self):
super(AntActionValueFunction, self).__init__()
input_size = 11
# This returns a tensor
inputs = Input(shape=(input_size,))
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(64, activation='relu', kernel_initializer='lecun_uniform')(inputs)
x = Dense(64, activation='relu', kernel_initializer='lecun_uniform')(x)
model_kwargs = dict(
optimizer='rmsprop',
loss='mean_squared_error'
)
output1 = Dense(2, activation='linear', kernel_initializer='lecun_uniform')(x)
model1 = Model(inputs=inputs, outputs=output1)
model1.compile(**model_kwargs)
output2 = Dense(2, activation='linear', kernel_initializer='lecun_uniform')(x)
model2 = Model(inputs=inputs, outputs=output2)
model2.compile(**model_kwargs)
model = Model(inputs=inputs, outputs=[output1, output2])
model.compile(**model_kwargs)
self.state_models = [model1, model2]
self.model = model
def evaluate(self, states, targets, **kwargs):
return self.model.evaluate(states.as_array(), targets, **kwargs)
def vectorized_fit(self, states, targets, **kwargs):
x = states.as_array()
return self.model.fit(x, targets, **kwargs)
def scalar_fit(self, states, actions, rewards, **kwargs):
pass
if __name__ == '__main__':
from rl.environments.line_world.state import AntState
from rl.core.state import IntExtState
value_function = AntActionValueFunction()
state = IntExtState(0, AntState(position=1))
print(value_function(state))
state = IntExtState(1, AntState(position=1))
print(value_function(state))
print(value_function.model.predict(state.external_state.as_array().reshape((1, 11))))
|
996,967 | acbe28bb64d2211cb902cb1c9f9c996cd0935287 | def checkIndex(key):
"""The key should be non-negative integer.
if it is not an interger a TypeError is raised.
if it is negative a IndexError is raised.
"""
if not isinstance(key, (int, float)): raise TypeError
if key<0: raise IndexError
class CounterList(list):
def __init__(self,*args):
super(CounterList,self).__init__(*args)
self.counter = 0
def __getitem__(self,index):
self.counter += 1
return super(CounterList, self).__getitem__(index)
class ArithmeticSequence:
def __init__(self, start=0, step=1):
""" Initialise an arethmetic sequence:
Start = First Value in sequence
step = difference between 2 values in sequence
changed - a dictionary of values modified by user.
"""
self.start = start
self.step = step
self.changed = {}
def __getitem__(self,key):
#get an item from arithmetic sequence
checkIndex(key)
try: return self.changed[key]
except KeyError: return self.start + key*self.step
def __setitem__(self,key,value):
#change an item in aretimetic sequence
checkIndex(key)
self.changed[key] = value
|
996,968 | 425508092169f7d1e3b65697b9a9eb65258d1332 | #!/usr/bin/python
import subprocess
def _run_cmd(cmd, module):
try:
return subprocess.check_output(cmd, shell = True).strip()
except subprocess.CalledProcessError as e:
module.fail_json(msg = "Command '"+e.cmd+"' failed: "+e.output)
def main():
module = AnsibleModule(
argument_spec = dict(
types = dict(required = True, type = "list")
),
supports_check_mode = False
)
data = { "ansible_facts": {} }
if "repos" in module.params["types"]:
# get current repos
data["ansible_facts"]["current_solaris_repo"] = _run_cmd("/bin/pkg publisher solaris | /bin/grep Origin | /bin/awk '{print $3}'", module)
data["ansible_facts"]["current_site_repo"] = _run_cmd("/bin/pkg publisher site | /bin/grep Origin | /bin/awk '{print $3}'", module)
module.exit_json(**data)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
# vim: textwidth=80 formatoptions=cqt wrapmargin=0
|
996,969 | f8f143cf22eeb12fb56ecc987a82608e944b1f30 | # Data Culling class, Python 3
# Henryk T. Haniewicz, 2018
# Local imports
import utils.pulsarUtilities as pu
import utils.plotUtils as pltu
import utils.otherUtilities as u
import utils.mathUtils as mathu
# PyPulse imports
from pypulse.archive import Archive
from pypulse.singlepulse import SinglePulse
from pypulse.utils import get_toa3
# Plotting imports
import matplotlib.pyplot as plt
import scipy.stats as spyst
import scipy.optimize as opt
# Other imports
import numpy as np
from scipy.fftpack import fft, fftshift
import math
import os
import sys
# Filter various annoying warnings (such as "cannot perform >= np.nan"). We know already...
import warnings
warnings.filterwarnings( "ignore" )
# Data culling class
class DataCull:
'''
Main class for data culling pulsar fits files to get a less noisy data set.
'''
def __init__( self, filename, template, directory = None, SNLim = 3000, verbose = False ):
'''
Initializes all archives and parameters in the data cube for a given file.
Also requires a template to be parsed in.
A custom signal / noise lower bound can also be set on initialization but
the default is 3000. This will exit the current archive if the SNR is
lower than the threshold.
One can also set whether long arrays and other bits of console text
are to be printed in full or in shorthand.
'''
if verbose:
print( "Initializing DataCull object..." )
self.SNError = False
# Parse directory in string or choose CWD if no directory given
if directory == None:
self.directory = str( os.getcwd() )
else:
self.directory = str( directory )
# Parse filename
if os.path.isfile( self.directory + filename ):
self.filename = str( filename )
else:
raise FileNotFoundError( "File {} not found in this directory...".format( filename ) )
# Load the template
self.template = self._loadTemplate( template )
# Parse verbose option
self.verbose = verbose
# Parse SNLim
self.SNLim = SNLim
# Load the file in the archive
self.ar = Archive( self.__str__(), verbose = self.verbose )
# Togglable print options
if self.verbose:
np.set_printoptions( threshold = np.inf )
# Check if Signal / Noise is too low
if self.ar.getSN() < SNLim:
if self.verbose:
print( "Signal / Noise ratio is way too low. (Below {})".format( SNLim ) )
print( "Data set to be thrown out..." )
self.SNError = True
# Load the data cube for the file
self.data = self.ar.getData()
def __repr__( self ):
return "DataCull( filename = {}, template = {}, directory = {}, SNLim = {}, verbose = {} )".format( self.filename, self.templateName, self.directory, self.SNLim, self.verbose )
def __str__( self ):
return self.directory + self.filename
def _loadTemplate( self, templateFilename ):
'''
Loads a template specified by the user. If no extension is given, the
extension .npy will be used. Note that this code is designed for numpy
arrays so it would be wise to use them.
Returns the template.
'''
# Parse the template's filename into a string and ensure the correct extension
self.templateName = str( templateFilename )
self.templateName = u.addExtension( self.templateName, 'npy' )
# Load the template
template = np.load( self.templateName )
return template
def reject( self, criterion = 'chauvenet', iterations = 1, fourier = True, rms = True, binShift = True, showPlots = False ):
'''
Performs the rejection algorithm until the number of iterations has been
reached or the data culling is complete, whichever comes first. The
default number of iterations is 1.
Requires the criterion to be set with the default criterion
being Chauvenet's criterion.
This is the function you should use to reject all outliers fully.
'''
if self.verbose:
print( "Beginning data rejection for {}...".format( self.filename ) )
# Initialize the completion flag to false
self.rejectionCompletionFlag = False
if fourier:
if self.verbose:
print( "Beginning FFT data rejection..." )
for i in np.arange( iterations ):
self.fourierTransformRejection( criterion, showPlots, showPlots )
# If all possible outliers have been found and the flag is set to true, don't bother doing any more iterations.
if self.rejectionCompletionFlag:
generation = i + 1
if self.verbose:
print( "RMS data rejection for {} complete after {} generations...".format( self.filename, generation ) )
break
# If the completion flag is still false, the cycles finished before full excision
if self.verbose and not self.rejectionCompletionFlag:
print( "Maximum number of iterations ({}) completed...".format( iterations ) )
# Re-initialize the completion flag to false
self.rejectionCompletionFlag = False
if rms:
if self.verbose:
print( "Beginning RMS data rejection..." )
for i in np.arange( iterations ):
self.rmsRejection( criterion, showPlots )
# If all possible outliers have been found and the flag is set to true, don't bother doing any more iterations.
if self.rejectionCompletionFlag:
generation = i + 1
if self.verbose:
print( "RMS data rejection for {} complete after {} generations...".format( self.filename, generation ) )
break
# If the completion flag is still false, the cycles finished before full excision
if self.verbose and not self.rejectionCompletionFlag:
print( "Maximum number of iterations ({}) completed...".format( iterations ) )
# Re-initialize the completion flag to false
self.rejectionCompletionFlag = False
if binShift:
if self.verbose:
print( "Beginning bin shift data rejection..." )
for i in np.arange( iterations ):
self.binShiftRejection( showPlots )
# If all possible outliers have been found and the flag is set to true, don't bother doing any more iterations.
if self.rejectionCompletionFlag == True:
generation = i + 1
if self.verbose:
print( "Bin shift data rejection for {} complete after {} generations...".format( self.filename, generation ) )
break
# If the completion flag is still false, the cycles finished before full excision
if self.verbose and not self.rejectionCompletionFlag:
print( "Maximum number of iterations ({}) completed...".format( iterations ) )
# Re-load the data cube for the file
self.data = self.ar.getData()
def rmsRejection( self, criterion, showPlot = False ):
'''
Rejects outlier root mean squared values for off pulse regions and
re-weights the data cube in the loaded archive.
'''
# Re-load the data cube for the file
self.data = self.ar.getData()
templateMask = pu.binMaskFromTemplate( self.template )
rmsArray, linearRmsArray, mu, sigma = u.getRMSArrayProperties( self.data, templateMask )
if showPlot == True:
# Creates the histogram
pltu.histogram_and_curves( linearRmsArray, mean = mu, std_dev = sigma, x_axis = 'Root Mean Squared', y_axis = 'Frequency Density', title = r'$\mu={},\ \sigma={}$'.format( mu, sigma ), show = True, curve_list = [spyst.norm.pdf, mathu.test_dist.test_pdf] )
# Determine which criterion to use to reject data
if criterion is 'chauvenet': # Chauvenet's Criterion
rejectionCriterion = mathu.chauvenet( rmsArray, mu, sigma, 3 )
elif criterion is 'DMAD': # Double Median Absolute Deviation
rejectionCriterion = mathu.doubleMAD( linearRmsArray )
rejectionCriterion = np.reshape( rejectionCriterion, ( self.ar.getNsubint(), self.ar.getNchan() ) )
else:
raise ValueError( "Allowed rejection criteria are either 'chauvenet' or 'DMAD'. Please use one of these..." )
# Set the weights of potential noise in each profile to 0
u.zeroWeights( rejectionCriterion, self.ar, self.verbose )
# Checks to see if there were any data to reject. If this array has length 0, all data was good and the completion flag is set to true.
if( len( np.where( rejectionCriterion )[0] ) == 0 ):
self.rejectionCompletionFlag = True
if self.verbose:
print( "Data rejection cycle complete..." )
def fourierTransformRejection( self, criterion, showTempPlot = False, showOtherPlots = False ):
'''
Uses FFT (Fast Fourier Transform) to get the break-down of signals in the
profile and compares to the the template.
'''
# Re-load the data cube
data = self.ar.getData()
tempData = self.template
# Initialize guess parameters and the curve to fit
guess_params = [100, 100, 1024]
curve = mathu.FFT_dist._pdf
# Set up arrays for FFT
profFFT = np.zeros_like( data )
tempFFT = fft( tempData )
# Normalize the template array w.r.t the max value and shift to middle
tempFFT = abs( mathu.normalizeToMax( abs( tempFFT.T ) ) )
tempFFT = fftshift( tempFFT )
# Create template FFT mask
fftTempMask = pu.binMaskFromTemplate( tempFFT )
rmsArray, linearRmsArray, mu, sigma = u.getRMSArrayProperties( data, fftTempMask )
tempParams = opt.curve_fit( curve, np.arange( len( tempFFT ) ), tempFFT, p0 = guess_params )
t = np.arange( 0, len( tempFFT ), 0.01)
temp_fit = mathu.normalizeToMax( curve( t, *tempParams[0] ) )
if showTempPlot:
pltu.plotAndShow( tempFFT, t, temp_fit )
# Loop over the time and frequency indices (subints and channels)
for time in np.arange( self.ar.getNsubint() ):
for frequency in np.arange( self.ar.getNchan() ):
# FFT then normalize and center FFT'd profile
profFFT[time][frequency] = fft( data[time][frequency] )
profFFT[time][frequency] = abs( mathu.normalizeToMax( abs( profFFT[time][frequency].T ) ) )
profFFT[time][frequency] = fftshift( profFFT[time][frequency] )
if all( profFFT[time][frequency] ) == 0:
continue
# Get optimization parameters for each profile for the same curve used to fit the template.
params = opt.curve_fit( curve, np.arange( len( tempFFT ) ), profFFT[time][frequency], p0 = guess_params )
# Normalize the curve with the fitted parameters
prof_fit = mathu.normalizeToMax( curve( t, *params[0] ) )
if showOtherPlots:
pltu.plotAndShow( profFFT[time][frequency], t, prof_fit, temp_fit )
# if not all( u.is_similar_array( tempParams[0], params[0], tolerance = [ 1e-1, 1, 2 ] ) ):
# print( "Not similar" )
# continue
if params[0][1] < 0:
print( "Not similar" )
if self.verbose:
print( "Setting the weight of (subint: {}, channel: {}) to 0".format( time, frequency ) )
self.ar.setWeights( 0, t = time, f = frequency )
else:
print( "Similar" )
# # Check if profile FT RMS matches template FT RMS based on Chauvenet
# if criterion is 'chauvenet': # Chauvenet's Criterion
#
# rejectionCriterion = mathu.chauvenet( rmsArray, mu, sigma, 2 )
#
# elif criterion is 'DMAD': # Double Median Absolute Deviation
#
# rejectionCriterion = mathu.doubleMAD( linearRmsArray )
# rejectionCriterion = np.reshape( rejectionCriterion, ( self.ar.getNsubint(), self.ar.getNchan() ) )
#
# else:
# raise ValueError( "Allowed rejection criteria are either 'chauvenet' or 'DMAD'. Please use one of these..." )
#
# if not rejectionCriterion:
# if self.verbose:
# print( "Setting the weight of (subint: {}, channel: {}) to 0".format( time, frequency ) )
# self.ar.setWeights( 0, t = time, f = frequency )
# Re-load the data cube
self.data = self.ar.getData()
def binShiftRejection( self, showPlot = False ):
'''
Gets the bin shift and bin shift errors of each profile in the file and
plots both quantities as a histogram.
Then, rejects based on Chauvenet criterion
'''
nBinShift, nBinError = self.getBinShifts()
# Reshape the bin shift and bin shift error arrays to be linear
linearNBinShift, linearNBinError = np.reshape( nBinShift, ( self.ar.getNchan() * self.ar.getNsubint() ) ), np.reshape( nBinError, ( self.ar.getNchan() * self.ar.getNsubint() ) )
# Mean and standard deviation of the bin shift
muS, sigmaS = np.nanmean( linearNBinShift ), np.nanstd( linearNBinShift )
# Mean and standard deviation of the bin shift error
muE, sigmaE = np.nanmean( linearNBinError ), np.nanstd( linearNBinError )
if showPlot == True:
# Create the histograms as two subplots
pltu.histogram_and_curves( linearNBinShift, mean = muS, std_dev = sigmaS, x_axis = r'Bin Shift from Template, $\hat{\tau}$', y_axis = 'Frequency Density', title = r'$\mu={},\ \sigma={}$'.format( muS, sigmaS ), show = True, curve_list = [spyst.norm.pdf] )
pltu.histogram_and_curves( linearNBinError, mean = muE, std_dev = sigmaE, x_axis = r'Bin Shift Error, $\sigma_{\tau}$', y_axis = 'Frequency Density', title = r'$\mu={},\ \sigma={}$'.format( muE, sigmaE ), show = True, curve_list = [spyst.maxwell.pdf] )
# Adjust subplots so they look nice
#plt.subplots_adjust( top=0.92, bottom=0.15, left=0.15, right=0.95, hspace=0.55, wspace=0.40 )
rejectionCriterionS, rejectionCriterionE = mathu.chauvenet( nBinShift, muS, sigmaS ), mathu.chauvenet( nBinError, muE, sigmaE )
# Set the weights of potential noise in each profile to 0
u.zeroWeights( rejectionCriterionS, self.ar, self.verbose )
u.zeroWeights( rejectionCriterionE, self.ar, self.verbose )
# Checks to see if there were any data to reject. If this array has length 0, all data was good and the completion flag is set to true.
if len( np.where( rejectionCriterionS )[0] ) == 0 and len( np.where( rejectionCriterionE )[0] ) == 0:
self.rejectionCompletionFlag = True
if self.verbose:
print( "Data rejection cycle complete..." )
def getBinShifts( self ):
'''
Returns the bin shift and bin shift error.
'''
if self.verbose:
print( "Getting bin shifts and errors from the template..." )
# Re-load the data cube
self.data = self.ar.getData()
templateMask = pu.binMaskFromTemplate( self.template )
# Return the array of RMS values for each profile
rmsArray = mathu.rmsMatrix2D( self.data, mask = templateMask, nanmask = True )
# Initialize the bin shifts and bin shift errors
nBinShift = np.zeros( ( self.ar.getNsubint(), self.ar.getNchan() ), dtype = float )
nBinError = np.zeros( ( self.ar.getNsubint(), self.ar.getNchan() ), dtype = float )
# Use PyPulse utility get_toa3 to obtain tauhat and sigma_tau for each profile and feed them into the two arrays.
for time in np.arange( self.ar.getNsubint() ):
for frequency in np.arange( self.ar.getNchan() ):
if all( amp == 0 for amp in self.data[time][frequency] ):
nBinShift[time][frequency] = np.nan
nBinError[time][frequency] = np.nan
else:
# Attempt to calculate the bin shift and error. If not possible, set the profile to 0.
try:
tauccf, tauhat, bhat, sigma_tau, sigma_b, snr, rho = get_toa3( self.template, self.data[time][frequency], rmsArray[time][frequency], dphi_in=0.1, snrthresh=0., nlagsfit=5, norder=2 )
nBinShift[time][frequency] = tauhat
nBinError[time][frequency] = sigma_tau
except:
if self.verbose:
print( "Setting the weight of (subint: {}, channel: {}) to 0".format( time, frequency ) )
self.ar.setWeights( 0, t = time, f = frequency )
nBinShift[time][frequency] = np.nan
nBinError[time][frequency] = np.nan
# Mask the nan values in the array so that histogram_and_curves doesn't malfunction
nBinShift, nBinError = np.ma.array( nBinShift, mask = np.isnan( nBinShift ) ), np.ma.array( nBinError, mask = np.isnan( nBinError ) )
return nBinShift, nBinError
# FOR TESTING
if __name__ == "__main__":
dir = "/Volumes/Henryk_Data/PSR J1756-2251/1756-2251 Nancay Data November 2017/Nancay_BON_data/"
temp = dir + "Lbandtemplate.npy"
# Cycle through each file in the stored directory
for i, file in enumerate( os.listdir( dir ) ):
# Initialize DCO
try:
dco = DataCull( file, temp, dir, verbose = False )
except SystemExit:
continue
if dco.ar.getFrontend() is 'ROACH':
continue
#dco.reject( criterion = 'chauvenet', iterations = 5, fourier = False, rms = True, binShift = False, showPlots = True )
#dco.ar.tscrunch( nsubint = 4 )
#dco.ar.fscrunch( nchan = 4 )
dco.fourierTransformRejection( 'chauvenet', True, True )
|
996,970 | 014598398418413f17643823b47336d3a5df3df7 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-03 11:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UnsafeUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100, verbose_name='username')),
('password', models.CharField(max_length=100, verbose_name='password')),
('is_admin', models.BooleanField(default=False, verbose_name='is admin')),
],
),
]
|
996,971 | bed9ae662bc2263d2f22aedf23561d68f31fd437 | T = int( input().strip() )
O = list()
for t in range(T) :
I = int( input().strip() )
x = [0 for x in range(10)]
if I == 0 :
O.append('INSOMNIA')
else :
to = 0
while sum(x) != 10 :
to = to + I
i = to
while i > 0 :
x[ int(i % 10) ] = 1
i //= 10
O.append(to)
for i,o in enumerate(O):
print('Case #', i+1, ': ', o, sep='') |
996,972 | 57c0d0dba3c83ce02856b2cf01b451e4329d9123 | # encoding=utf-8
"""
ๆฏๆฑ่่ดฆๅ้่
"""
__AUTHOR = 'thor'
class BillNoConfirm(object):
__BASE_PATH = 'file/'
def __init__(self):
self.field_index_dict = {
'house_code': 0,
'september_num': 1,
'october_num': 2,
}
def handle(self):
per_sql = 'update bill set status = \'NO_CONFIRM\' where house_info_id in ('
print(per_sql, end='')
with open(BillNoConfirm.__BASE_PATH + 'houseId.txt', 'r') as f:
for line in f.readlines():
print(line[:-1] + ', ', end='')
print(');')
@staticmethod
def to4(temp):
if len(temp) == 3:
temp = '0' + temp
return temp
if __name__ == '__main__':
handle = BillNoConfirm()
handle.handle()
# select * from bill where house_info_id in
# (17, 462, 94, 302, 95, 109, 123, 137, 233, 246, 259, 272, 3137,
# 151, 167, 199, 216, 203, 204, 221, 3138, 313, 331, 330, 348, 349, 367, 35);
#update bill set status = 'NO_CONFIRM' where house_info_id in (17, 94, 302, 95, 109, 123, 137, 233, 246, 259, 272, 151, 167, 199, 216, 203, 204, 221, 313, 331, 330, 348, 349, 367, 35);
# update bill set status = 'NO_CONFIRM'
# where house_info_id in
# (17, 94, 302, 95, 109, 123, 137, 233, 246, 259, 272, 151, 167, 199, 216, 203, 204, 221, 313, 331, 330, 348, 349, 367, 35)
# and product_type_code = 'propertyFee';
|
996,973 | acbdca902ecf95496ea88e371bd9c60f6edd00b8 | import os
import re
from vee import log
from vee.cli import style, style_note
from vee.pipeline.base import PipelineStep
from vee.subproc import call
from vee.utils import cached_property
from vee.exceptions import AlreadyInstalled, PipelineError
_installed_packages = set()
class RPMChecker(PipelineStep):
factory_priority = 1000
@cached_property
def installed_packages(self):
if _installed_packages:
return _installed_packages
packages = _installed_packages
out = call(['rpm', '-qa'], stdout=True)
for line in out.splitlines():
line = line.strip().lower()
if not line:
continue
packages.add(line)
chunks = line.split('-')
for i in range(1, len(chunks)):
packages.add('-'.join(chunks[:i]))
chunks = line.split('.')
for i in range(1, len(chunks)):
packages.add('.'.join(chunks[:i]))
return packages
@classmethod
def factory(cls, step, pkg):
if step == 'init' and re.match(r'^rpm:', pkg.url):
return cls()
def get_next(self, step, pkg):
return self
def init(self, pkg):
# Signal that we should not be persisted to the database.
pkg.virtual = True
def fetch(self, pkg):
if pkg.name.lower() not in self.installed_packages:
raise PipelineError('rpm package "%s" is not installed.' % pkg.name)
raise AlreadyInstalled()
def inspect(self, pkg):
pass
def extract(self, pkg):
pass
def build(self, pkg):
pass
def install(self, pkg):
pass
def optlink(self, pkg):
pass
def relocate(self, pkg):
pass
|
996,974 | df243af66be7b28b0c48e42e84e7580c1e3756c4 | from pytorch3d.renderer import (
FoVPerspectiveCameras,
PointLights,
RasterizationSettings,
TexturesVertex,
look_at_view_transform,
)
from pytorch3d.renderer import (
FoVPerspectiveCameras, look_at_view_transform,
RasterizationSettings, BlendParams,
MeshRenderer, MeshRasterizer, HardPhongShader
)
import random
from pytorch3d.io import load_obj, save_obj
import torch
from plot_image_grid import image_grid
import matplotlib.pyplot as plt
def render(meshes, model_id, shapenet_dataset, device, batch_size):
# Rendering settings.
#meshes = mesh.extend(batch_size)
#camera_elevation = [0.5 + 100 * random.random() for _ in range(batch_size)]
#camera_azimuth = [30 + 90 * random.random() for _ in range(batch_size)]
camera_elevation = 0 + 180 * torch.rand((batch_size))#torch.linspace(0, 180, batch_size)
camera_azimuth = -180 + 2 * 180 * torch.rand((batch_size))#torch.linspace(-180, 180, batch_size)
#R, T = look_at_view_transform(camera_distance, camera_elevation, camera_azimuth)
R, T = look_at_view_transform(2.0, camera_elevation, camera_azimuth)
cameras = FoVPerspectiveCameras(R=R, T=T, device=device)
cameras.eval() #necessary ?
raster_settings = RasterizationSettings(image_size=512) # TODO ?????
lights = PointLights(location=torch.tensor([0.0, 1.0, -2.0], device=device)[None],device=device)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
shader=HardPhongShader(device=device, cameras=cameras)
)
renderer.eval()
#rendering_settings = cameras, raster_settings, lights
#image = shapenet_dataset.render(
# model_ids=[model_id],
## device=device,
# cameras=camera,
# raster_settings=raster_settings,
# lights=lights,
#)[..., :3]
image = renderer(meshes)[..., :3]
#plt.imshow(image[0].squeeze().detach().cpu().numpy())
#print(image.shape)
#check images
#print(image.shape)
#plt.imshow(image[1].squeeze().detach().cpu().numpy())
#plt.show()
image = image.permute(0, 3, 1, 2)
#plt.show()
return image, cameras #TODO batch of images
#image_grid(images_by_idxs.cpu().numpy(), rows=1, cols=3, rgb=True)
#plt.show()
#save_obj("model.obj", mesh.verts_packed(), mesh.faces_packed())
|
996,975 | 0475b7b57cde36f5204f10d45468856c5c5fc85c | # pylint: disable=missing-module-docstring, missing-function-docstring
import pytest
from aqua.interface.parser_plaintext import PlainTextParser
def test_parse_normal_msg():
inp = PlainTextParser.parse_input("0 request foo")
assert inp.request == "request"
assert inp.return_id == 0
assert inp.params == ["foo"]
def test_parse_invalid_return_id():
with pytest.raises(ValueError):
PlainTextParser.parse_input("hi request foo")
def test_parse_too_short():
with pytest.raises(ValueError):
PlainTextParser.parse_input("hi")
|
996,976 | 989cffd656222ca8898339720c11c46265d1b8ac | """
Tests for Packet20.
"""
from irobot.packet import Packet20
def test_id():
"""Tests the packet `id`."""
assert Packet20.id == 20
def test_size():
"""Tests the packet `size`."""
assert Packet20.size == 2
def test_from_bytes_counter_clockwise():
"""Tests `from_bytes` with a counter-clockwise angle."""
data = bytes([0x00, 0x5a])
packet = Packet20.from_bytes(data)
assert packet is not None
assert type(packet) == Packet20
assert packet.angle == 90
def test_from_bytes_clockwise():
"""Tests `from_bytes` with a clockwise angle."""
data = bytes([0xff, 0xa6])
packet = Packet20.from_bytes(data)
assert packet is not None
assert type(packet) == Packet20
assert packet.angle == -90
|
996,977 | e8b9d6c5f566b4b9f1d684e20382a1a4956a7c00 | # Write a code to generate a half pyramid pattern using numbers.
# Sample Input :
# 5
# Sample Output :
# 5
# Sample Output :
# 55555
# 4444
# 333
# 22
# 1
N = int(input(''))
for index in range(0, N):
for secondIndex in range(0, N-index):
print(N-index, end='')
print('')
|
996,978 | 38388a3280d7841f6589e246a02213bae5149d1f | from mng.models import KV
def kvs():
settings = KV.objects
zero_year = 2016
zero_month = 2
zero_day = 28
desk_max = 20
tent_max = 20
umbrella_max = 15
red_max = 5
cloth_max = 5
loud_max = 2
sound_max = 1
projector_max = 1
if settings.count() <= 0:
zero_year_set = KV(set_key='zero_year', set_value=zero_year)
zero_year_set.save()
zero_month_set = KV(set_key='zero_month', set_value=zero_month)
zero_month_set.save()
zero_day_set = KV(set_key='zero_day', set_value=zero_day)
zero_day_set.save()
desk_set = KV(set_key='desk_max', set_value=desk_max)
desk_set.save()
tent_set = KV(set_key='tent_max', set_value=tent_max)
tent_set.save()
red_set = KV(set_key='red_max', set_value=red_max)
red_set.save()
cloth_set = KV(set_key='cloth_max', set_value=cloth_max)
cloth_set.save()
umbrella_set = KV(set_key='umbrella_max', set_value=umbrella_max)
umbrella_set.save()
loud_set = KV(set_key='loud_max', set_value=loud_max)
loud_set.save()
sound_set = KV(set_key='sound_max', set_value=sound_max)
sound_set.save()
projector_set = KV(set_key='projector_max', set_value=projector_max)
projector_set.save()
settings = KV.objects
zero_year = settings.filter(set_key='zero_year').first().set_value
zero_month = settings.filter(set_key='zero_month').first().set_value
zero_day = settings.filter(set_key='zero_day').first().set_value
desk_max = settings.filter(set_key='desk_max').first().set_value
tent_max = settings.filter(set_key='tent_max').first().set_value
umbrella_max = settings.filter(set_key='umbrella_max').first().set_value
red_max = settings.filter(set_key='red_max').first().set_value
cloth_max = settings.filter(set_key='cloth_max').first().set_value
loud_max = settings.filter(set_key='loud_max').first().set_value
sound_max = settings.filter(set_key='sound_max').first().set_value
projector_max = settings.filter(set_key='projector_max').first().set_value
return {
'zero_year': zero_year,
'zero_month': zero_month,
'zero_day': zero_day,
'desk_max': desk_max,
'tent_max': tent_max,
'umbrella_max': umbrella_max,
'red_max': red_max,
'cloth_max': cloth_max,
'loud_max': loud_max,
'sound_max': sound_max,
'projector_max': projector_max,
}
def zero_date():
return int(KV.objects.filter(set_key='zero_year').first().set_value), \
int(KV.objects.filter(set_key='zero_month').first().set_value), \
int(KV.objects.filter(set_key='zero_day').first().set_value)
def save_settings(zero_year, zero_month, zero_day,
desk_max, tent_max, umbrella_max, red_max, cloth_max, loud_max, sound_max, projector_max):
KV.objects.filter(set_key='zero_year').update(set_value=zero_year)
KV.objects.filter(set_key='zero_month').update(set_value=zero_month)
KV.objects.filter(set_key='zero_day').update(set_value=zero_day)
KV.objects.filter(set_key='desk_max').update(set_value=desk_max)
KV.objects.filter(set_key='tent_max').update(set_value=tent_max)
KV.objects.filter(set_key='umbrella_max').update(set_value=umbrella_max)
KV.objects.filter(set_key='red_max').update(set_value=red_max)
KV.objects.filter(set_key='cloth_max').update(set_value=cloth_max)
KV.objects.filter(set_key='loud_max').update(set_value=loud_max)
KV.objects.filter(set_key='sound_max').update(set_value=sound_max)
KV.objects.filter(set_key='projector_max').update(set_value=projector_max)
|
996,979 | d0429d506e0092b862834803b1ec7ab298c33d99 | #corey b. holstege
#2018-10-18
#problem 2.4.4
a = 6
b = 2
c = 9
print(c - a)
print(a + b * c)
print((a * c) / (4 * a))
print(c / (a - 3 * b))
|
996,980 | c77187f825812d7631fb7a985b0d1a18e359b45d | from util import memoized
from itertools import count
@memoized
def fib(n):
"""Returns the nth number in the Fibonacci sequence"""
if n in (0, 1): return n
return fib(n-1) + fib(n-2)
if __name__ == "__main__":
sum = 0
for n in count(1, 1):
result = fib(n)
if result > 4000000: break
if result % 2 == 0: sum += result
print sum
|
996,981 | ce5ced353bc4417f4f000f71973c1542382c4032 | """
Part 1: Discussion
1. What are the three main design advantages that object orientation
can provide? Explain each concept.
Abstraction
Abstraction allows chunks of code to be hidden, enabling users of the code to
access the functionality but not requiring that they understand the underlying
nuts and bolts.
Encapsulation
Encapsulation is when data and functionality are kept side-by-side, rather than a
function being completely distinct from the type of object it is acting on. This
allows us to create functionality that is both specific and unique to those
types of objects (or of objects in related child classes).
Polymorphism
Polymorphism allows different types of classes to have a consistant interface
with users, bringing together multiple subclasses to use a generalized 'template'
of attributes and methods. Making your code polymorphic means bringing together
those elements that are the same between Classes to live in a parent class, and
shifting anything that is unique to an object into a child class.
2. What is a class?
A class allows you to group together similar objects (instances) under an umbrella
with defined data points (attributes) and behaviors (methods) that all instances
of that class will share.
3. What is an instance attribute?
A piece of data about an object of a class that is assigned at the individual
level, not the class level.
4. What is a method?
A piece of code that defines the behavior that an instance of that class is
able to do.
5. What is an instance in object orientation?
An instance is an object of a particular class. The class defines the specific
data points that object will have (the attributes) and behavior that that object
can do (the methods). If an instance's class is a child class, the instance will
inherit attributes and methods from its parent classes as well.
6. How is a class attribute different than an instance attribute?
Give an example of when you might use each.
A attribute that is defined at the class level (a class attribute) will be the
same for all objects which are instantiated as part of that Class, whereas an
instance attribute is defined at the individual level and will be assigned for
only that instance of an object, not for other objects of that Class.
"""
# ----------------------------- Defining Classes -------------------------------
class Student(object):
"""Creates a class for Students"""
def __init__(self, f_name, l_name, address):
"""Initialize a student"""
self.f_name = f_name
self.l_name = l_name
self.address = address
class Question(object):
"""Creates a class for Questions"""
def __init__(self, question, correct_answer):
"""Initialize a new question"""
self.question = question
self.correct_answer = correct_answer.lower()
def ask_and_evaluate(self):
"""Prompts user with a question and evaluates user input.
If answer provided by user is the correct answer, function return True."""
print self.question
return self.correct_answer == raw_input('>> ').lower()
class Exam(object):
"""Creates a class for Questions"""
def __init__(self, name):
"""Initialize a new exam"""
self.name = name
self.questions = []
def add_question(self, question, correct_answer):
"""Instantiates a new question and adds to exam questions list"""
self.questions.append(Question(question, correct_answer))
def administer(self):
"""Administers questions, evaluates answers, and calculates score in %"""
score = 0.0
for question in self.questions:
if question.ask_and_evaluate() is True:
score += 1
return (score / len(self.questions)) * 100
class Quiz(Exam):
"""Creates class for Quiz which inherits from parent class, Exam"""
def administer(self):
"""Builds on exam.administer() method by returning score as Boolean"""
score = super(Quiz, self).administer()
return score >= 50
# ----------------------------- Defining Functions -----------------------------
def take_test(exam, student):
"""Administers exam and assigns score to new student instance attribute."""
student.score = exam.administer()
def example(exam_name, question_set, student):
"""Conducts sample quiz, and returns student and exam instances
Expected input includes...
exam_name: Give your exam/quiz a name
question_set: A dictionary of sample questions (keys) + answers (values)
student: A dictionary of a sample student's information
"""
exam = Exam(exam_name)
for question in question_set:
exam.add_question(question, question_set[question])
student = Student(student['f_name'], student['l_name'], student['address'])
take_test(exam, student)
return student, exam
# ------------------------ Dictionaries of Sample Content ----------------------
weird_state_facts = {
'It\'s illegal in Georgia to do what with a fork?': 'Eat fried chicken',
'In South Dakota it\'s illegal to fall down and sleep where?': 'In a cheese factory',
'In Kansas it\'s illegal to eat cherry pie with what?': 'Ice cream',
'It\'s illegal in Texas to put what on your neighbor\'s cow?': 'Graffiti'
}
watts_jacqui = {
'f_name': 'Jacqui',
'l_name': 'Watts',
'address': 'San Francisco'
}
# ------------------------------ Executable Code -------------------------------
jacqui, state_facts = example('Weird State Facts', weird_state_facts, watts_jacqui)
if jacqui.score is True or jacqui.score >= 50:
passed = 'passed'
else:
passed = 'did not pass'
print "{} {} took a quiz on {} and she {}!".format(jacqui.f_name, jacqui.l_name,
state_facts.name, passed)
|
996,982 | 24495d8311ce8d85605764ba1f5e4bfcee3fe639 | from flask_wtf import FlaskForm
from wtforms.fields import StringField, IntegerField, RadioField
from wtforms import validators
class FeatureInput(FlaskForm):
sex = RadioField('sex', choices=[('man','a man'),('woman','a woman')], validators=[validators.Required()])
pclass = RadioField('passenger_class', choices=[('1','first class'),('2','business'), ('3','economy')], validators=[validators.Required()], render_kw={"placeholder": "what class is your ticket in?"})
married = RadioField('married', choices=[('married','married'),('unmarried','unmarried')], validators=[validators.Required()])
age = IntegerField('age', validators=[validators.InputRequired()], render_kw={"placeholder": "how old are you?"})
siblings = IntegerField('siblings', validators=[validators.InputRequired()], render_kw={"placeholder": "how many siblings do you have?"})
'''
author @yvan
''' |
996,983 | 97a60df3d3496c393008d0a228a46412552997f5 | '''
Created on 2012-9-9
@author: TheBeet
'''
result_tag_count = 13
result_all = ['Waiting',
'Accepted',
'Presentation Error',
'Wrong Answer',
'Runtime error',
'Time Limit Exceed',
'Memory Limit Exceed',
'Output Limit Exceed',
'Compile Error',
'System Error',
'Validate Error',
'Restricted Call',
'Running']
result_all_short = ['WT',
'AC',
'PE',
'WA',
'RE',
'TLE',
'MLE',
'OLE',
'CE',
'SE',
'VE',
'RC',
'RN',]
result_full_tag = {0: 'Waiting',
1: 'Accepted',
2: 'Presentation Error',
3: 'Wrong Answer',
4: 'Runtime error',
5: 'Time Limit Exceed',
6: 'Memory Limit Exceed',
7: 'Output Limit Exceed',
8: 'Compile Error',
9: 'System Error',
10: 'Validate Error',
11: 'Restricted Call',
12: 'Running',}
result_short_tag = {0: 'WT',
1: 'AC',
2: 'PE',
3: 'WA',
4: 'RE',
5: 'TLE',
6: 'MLE',
7: 'OLE',
8: 'CE',
9: 'SE',
10: 'VE',
11: 'RC',
12: 'RN',}
|
996,984 | 71b3f43d01bda2a4169a1b24a09d5d3e0024eeaa | from kivy.app import App
class opencvApp(App):
pass
if __name__ == '__main__':
opencvApp().run() |
996,985 | 9c659528b869bcbe2af01aff5cdef23bd35dd431 | import pygame
# SCREEN
#######################################
# Set gamescreen size
WIDTH_SCREEN = 800
HEIGHT_SCREEN = 800
SCREENSIZE = (WIDTH_SCREEN, HEIGHT_SCREEN)
screen = pygame.display.set_mode(SCREENSIZE)
# GENERAL SETTINGS
###########################################
# COLORS
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
GREY = (240,248,255)
# # Timer
clock = pygame.time.Clock() # Initiate clock object
# TIME_PASSED = clock.tick(60)
# TIME_PASSED_SECONDS = TIME_PASSED/1000.0
# BALL SETTINGS
##########################################
BALL_SPEED = 700
BALL_START_POS_X = 250
BALL_START_POS_Y = 350
BALL_RADIUS = 5
# PLATFORM SETTINGS
#########################################
PLATFORM_SPEED = 700
ANGLE_MAGNITUDE = 2
# BRICKS SETTINGS
#########################################
YDISTANCE_BETWEEN_BLOCKS = 70
XSHIFT_BLOCKS = 10
XDISTANCE_BETWEEN_BLOCKS = 50
NUMBER_OF_BLOCK_HORIZONTAL = 15
YSHIFT_BLOCKS = 37
|
996,986 | 01f73c7531c00b1fba2f688e3fa7f5311414f546 | import ctypes
lib = ctypes.CDLL('./libPython.so.2')
lib.print_python_int.argtypes = [ctypes.py_object]
i = -1
lib.print_python_int(i)
i = 0
lib.print_python_int(i)
i = 1
lib.print_python_int(i)
i = 123456789
lib.print_python_int(i)
i = -123456789
lib.print_python_int(i)
i = 12345678901
lib.print_python_int(i)
i = 10304719833506056896
lib.print_python_int(i)
i = -9223372036854775808
lib.print_python_int(i)
i = 9223372036854775807
lib.print_python_int(i)
i = 18446744073709551615
lib.print_python_int(i)
i = -18446744073709551615
lib.print_python_int(i)
i = 18446744073709551616
lib.print_python_int(i)
i = 1111111111222222222233333333334444444444555555555566666666667777777777888888888899999999990000000000
lib.print_python_int(i)
i = -1111111111222222222233333333334444444444555555555566666666667777777777888888888899999999990000000000
lib.print_python_int(i)
|
996,987 | 216350318ed16a4568192387b1fd908915626c37 | import requests
url="http://cc.linkinme.com/hljtv5/9"
headers={"Referer":"http://www.hljtv.com/live/folder424/","User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36"}
r=requests.get(url,headers)
r.encoding='utf-8'
r=r.text
print(r) |
996,988 | 90e79ce8936ed4acc826141cbdfe446cf433cc84 | import rhinoscriptsyntax as rs
class Turtle:
def __init__(self, pos = [0,0,0], heading = [1,0,0]):
self.heading = heading
self.point = rs.AddPoint(pos)
pointPos = rs.PointCoordinates(self.point)
self.direction = rs.VectorCreate(heading,pointPos)
self.lines = []
def forward(self,magnitude):
print self.direction
movement = rs.VectorScale(self.direction,magnitude)
prevPos = rs.PointCoordinates(self.point)
rs.MoveObject(self.point,movement)
currentPos = rs.PointCoordinates(self.point)
rs.AddLine(prevPos,currentPos)
def left(self,angle,(X,Y,Z)):
self.direction = rs.VectorRotate(self.direction, angle, [X,Y,Z])
print(self.direction)
def right(self,angle,(X,Y,Z)):
self.direction = rs.VectorRotate(self.direction, -angle, [X,Y,Z])
print(self.direction)
def goto(self, x, y, z):
prevPos = rs.PointCoordinates(self.point)
movement = rs.VectorCreate([x,y,z],prevPos)
rs.MoveObject(self.point,movement)
currentPos = rs.PointCoordinates(self.point)
def cube(self, l, w, h):
# a = rs.AddPoint(self.point)
p = rs.rs.PointCoordinates(self.point)
a = rs.AddPoint(p)
b = rs.CopyObject(a,[l,0,0])
c = rs.CopyObject(a,[l,w,0])
d = rs.CopyObject(a,[0,w,0])
e = rs.CopyObject(a,[0,0,h])
f = rs.CopyObject(a,[l,0,h])
g = rs.CopyObject(a,[l,w,h])
h = rs.CopyObject(a,[0,w,h])
box = rs.AddBox([a,b,c,d,e,f,g,h])
def cubecenter(self, m1, m2, m3):
# a = rs.AddPoint(self.point)
p = rs.GetPoint("Enter center point")
a = rs.AddPoint(p)
l = m1/2
w = m2/2
h = m3/2
b = rs.CopyObject(a,[l,w,-h])
c = rs.CopyObject(a,[l,-w,-h])
d = rs.CopyObject(a,[-l,-w,-h])
e = rs.CopyObject(a,[-l,w,-h])
f = rs.CopyObject(a,[l,w,h])
g = rs.CopyObject(a,[l,-w,h])
h = rs.CopyObject(a,[-l,-w,h])
j = rs.CopyObject(a,[-l,w,(m3/2)])
box = rs.AddBox([b,c,d,e,f,g,h,j])
def sphere(self, radius):
# a = rs.AddPoint(self.point)
p = rs.rs.PointCoordinates(self.point)
a = rs.AddPoint(p)
box = rs.AddSphere(a,radius)
def cone(self, radius):
# a = rs.AddPoint(self.point)
base = rs.GetPoint("Base of cone")
if base:
height = rs.GetPoint("Height of cone", base)
if height: rs.AddCone(base, height, radius, cap=False )
def cylinder(self,r):
a = rs.GetPoint("Enter start point")
h = rs.GetReal("Enter the height")
cylinder = rs.AddCylinder(a,h,r)
def cylinders(self,num):
a = rs.GetPoint("Enter start point")
p = rs.AddPoint(a)
h = rs.GetReal("Enter the height")
for i in range(0,num):
a.X = a.X + 4
h = h + 5
r = 2
cylinder = rs.AddCylinder(a,h,r)
color02 = [i * 3,i * 2,255 - i * 6] #magenta
rs.ObjectColor(cylinder, color02)
def jump(self,magnitude):
a = rs.PointCoordinates(self.point)
p = rs.AddPoint(a)
sphere = rs.AddSphere(p,4)
print self.direction
prevPos = rs.PointCoordinates(self.point)
for i in range(1,110):
rs.MoveObject(sphere,(1,0,20 / i))
for i in range(1,110):
rs.MoveObject(sphere,(1,0,-1 * i / 40))
def jumps(self,magnitude):
a = rs.GetPoint("Enter start point")
p = rs.AddPoint(a)
sphere = rs.AddSphere(p,4)
print self.direction
prevPos = rs.PointCoordinates(self.point)
for d in range(1,50):
nn=rs.Redraw()
for i in range(1,50):
rs.MoveObject(sphere,(1,1,20 / i))
for i in range(1,50):
rs.MoveObject(sphere,(1,1,-1 * i / 40))
m=Turtle()
#m.sphere(5)
#m.cubecenter(10,10,10)
#m.cone(5)
#m.cylinder(5)
#m.cylinders(20)
#for i in range(10):
# m.left(45,(0,-1,0))
# m.forward(10)
#for i in range(10):
# m.left(45,(-1,0,-1))
# m.forward(10)
m.jumps(2)
|
996,989 | 85726754dcd2b8d8d1f4abbdde84fb6adfd3547a | ids = ['316219997', '316278670']
import utils
from itertools import chain, combinations
class Node:
def __init__(self, state, parent=None, action=None):
self.state = state
self.parent = parent
self.action = action
self.depth = 0
if parent:
self.depth = parent.depth + 1
def expand(self, problem,num_obs):
childs_list=[]
for act in problem.actions(self.state):
child=self.child_node(problem,act)
if child.depth<num_obs:
if is_possible(child.state,problem.observ_list[child.depth]):
childs_list.append(child)
return childs_list
def child_node(self, problem, action):
"""[Figure 3.10]"""
next = problem.result(self.state, action)
return Node(next, self, action)
def path(self):
"""Return a list of nodes forming the path from the root to this node."""
node, path_back = self, []
while node:
path_back.append(node.state)
node = node.parent
return list(reversed(path_back))
class Problem(object):
def __init__(self, initial, permutations=None):
self.initial = initial
self.permutations = permutations
def actions(self, state):
raise NotImplementedError
def result(self, state, action):
raise NotImplementedError
class MedicalProblem(Problem):
"""This class implements a medical problem according to problem description file"""
def __init__(self, initial,permutation=None):
"""Don't forget to implement the goal test
You should change the initial to your own representation.
search.Problem.__init__(self, initial) creates the root node"""
self.medics = initial["medics"]
self.police = initial["police"]
self.initial = initial['observations']
self.obser_num = len(initial['observations'])
self.row = len(initial['observations'][0])
self.col = len(initial['observations'][0][0])
self.init_matrix(self.initial,permutation)
Problem.__init__(self, self.initial,permutation)
def init_matrix(self,initial,perm):
observ = [list(x) for x in initial]
observations_list = [[list(x) for x in observ[i]] for i in range(len(observ))]
self.observ_list = tuple(observations_list)
if perm != ():
if len(perm)>0:
index=0
for i in range(len(observations_list[0])):
for j in range(len(observations_list[0][0])):
if observations_list[0][i][j]=='?':
observations_list[0][i][j]=(perm[index],1)
index+=1
else:
observations_list[0][i][j]=(observations_list[0][i][j],1)
else:
for k in range(len(observations_list[0])):
for l in range(len(observations_list[0][0])):
observations_list[0][k][l] = (observations_list[0][k][l], 1)
temp = [tuple(x) for x in observations_list]
temp = tuple([tuple([tuple(x) for x in temp[i]]) for i in range(len(temp))])
self.initial = temp[0]
def actions(self,state):
"""Returns all the actions that can be executed in the given
state. The result should be a tuple (or other iterable) of actions
as defined in the problem description file"""
sick = []
health = []
num_s = 0
num_h = 0
for i in range(self.row):
for j in range(self.col):
if state[i][j][0] == 'S':
sick.append(("quarantine", (i, j)))
num_s += 1
elif state[i][j][0] == 'H':
health.append(("vaccinate", (i, j)))
num_h += 1
res = []
if num_h < self.medics:
health_pow = list(chain.from_iterable(combinations(health, r) for r in range(num_h, num_h + 1)))[:]
else:
health_pow = list(chain.from_iterable(combinations(health, r) for r in range(self.medics, self.medics + 1)))[:]
if num_s < self.police:
sick_pow = list(chain.from_iterable(combinations(sick, r) for r in range(num_s, num_s + 1)))[:]
else:
sick_pow = list(chain.from_iterable(combinations(sick, r) for r in range(self.police, self.police + 1)))[:]
if len(health_pow) == 0:
sick_pow.append(())
return tuple(sick_pow)
if len(sick_pow) == 0:
health_pow.append(())
return tuple(health_pow)
for i in range(len(health_pow)):
for j in range(len(sick_pow)):
res.append(health_pow[i] + sick_pow[j])
return tuple(res)
def healthy(self,i, j, state,state_after_act):
if (i - 1) >= 0:
if state[i - 1][j][0] == 'S':
if state_after_act[i-1][j]!=0:
if state_after_act[i - 1][j][0] != 'Q':
return ('S', 1)
else:
return ('S', 1)
if (i + 1) < self.row:
if state[i + 1][j][0] == 'S':
if state_after_act[i+1][j]!=0:
if state_after_act[i +1][j][0] != 'Q':
return ('S', 1)
else:
return ('S', 1)
if (j - 1) >= 0:
if state[i][j - 1][0] == 'S':
if state_after_act[i][j-1]!=0:
if state_after_act[i][j-1][0] != 'Q':
return ('S', 1)
else:
return ('S', 1)
if (j + 1) < self.col:
if state[i][j + 1][0] == 'S':
if state_after_act[i][j+1]!=0:
if state_after_act[i][j +1][0] != 'Q':
return ('S', 1)
else:
return ('S', 1)
return ('H', 1)
def result(self, state, action):
"""Return the state that results from executing the given
action in the given state. The action must be one of
self.actions(state)."""
state_after_act = [[0 for i in range(self.col)] for j in range(self.row)]
for k in action:
x = k[1][0]
y = k[1][1]
if k[0] == "vaccinate":
state_after_act[x][y] = ('I', 1)
else:
state_after_act[x][y] = ('Q', 1)
for i in range(self.row):
for j in range(self.col):
if state_after_act[i][j] == 0:
if state[i][j][0] == 'U' or state[i][j][0] == 'I':
state_after_act[i][j] = state[i][j]
elif state[i][j][0] == 'S':
if state[i][j][1] == 3:
state_after_act[i][j] = ('H', 1)
else:
if state[i][j][1] == 1:
state_after_act[i][j] = ('S', 2)
elif state[i][j][1] == 2:
state_after_act[i][j] = ('S', 3)
elif state[i][j][0] == 'Q':
if state[i][j][1] == 2:
state_after_act[i][j] = ('H', 1)
else:
state_after_act[i][j] = ('Q', 2)
elif state[i][j][0] == 'H':
state_after_act[i][j] = self.healthy(i, j, state,state_after_act)
state_after_act[i] = tuple(state_after_act[i])
return tuple(state_after_act)
def solve_problem(input):
queries=input['queries']
num_question_mark=count_question_mark(input['observations'][0])
possible_states=num_question_mark*['S','H','U']
if num_question_mark==1:
permutation=possible_states
else:
permutation = list(set(list(chain.from_iterable(combinations(possible_states, r) for r in range(num_question_mark, num_question_mark+1)))[:]))
result_dict={}
for query in queries:
result_dict[tuple(query)]=[]
help_dict={}
for perm in permutation:
help_dict[tuple(perm)]=[]
if len(permutation)==0:
problem=MedicalProblem(Problem(input).initial)
BFS(problem,(),help_dict,problem.obser_num)
else:
for p in range(len(permutation)):
problem = MedicalProblem(Problem(input,permutation[p]).initial,permutation[p])
BFS(problem, permutation[p], help_dict,problem.obser_num)
check_query(queries,result_dict,help_dict)
return result_dict
def count_question_mark(observation):
counter = 0
for i in range(len(observation)):
for j in range(len(observation[0])):
if observation[i][j] == '?':
counter += 1
return counter
def BFS(problem,permutation,help_dict,num_observ):
node = Node(problem.initial)
frontier = utils.FIFOQueue()
frontier.append(node)
explored = set()
while frontier:
node = frontier.pop()
explored.add(node.state)
for child in node.expand(problem,num_observ):
if child.state not in explored and child not in frontier:
if child.depth == len(problem.observ_list)-1:
c=child.path()
help_dict[tuple(permutation)].append(c)
frontier.append(child)
return None
def is_possible(possible_state,true_state):
for i in range(len(possible_state)):
for j in range(len(possible_state[0])):
if possible_state[i][j][0]!=true_state[i][j] and true_state[i][j]!='?':
return False
return True
def equal(query,state):
if state[query[1]][query[0][0]][query[0][1]][0]==query[2]:
return 1
else:
return 0
def check_query(queries,result_dict,help_dict):
for query in queries:
b=True
if b:
for i in help_dict.keys():
if b:
for j in help_dict[i]:
result_dict[query].append(equal(query,j))
if 0 in result_dict[query] and 1 in result_dict[query]:
result_dict[query]="?"
b=False
break
if b:
if 0 not in result_dict[query]:
result_dict[query]='T'
else:
result_dict[query]='F'
|
996,990 | 9ef40157524fb27b6be48a80eded433825cf8949 | from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, INT, create_engine, TEXT
Base = declarative_base()
class VirusStatics(Base):
__tablename__ = 'VirusStatics'
id = Column(INT, primary_key=True, autoincrement=True)
report_date = Column(String(10), comment="ๆฅๅฏผๆถ้ด")
region = Column(String(10), comment="ๆฅๅฏผๆถ้ด")
city = Column(String(10), comment="ๆฅๅฏผๆถ้ด")
new_confirm = Column(INT, comment="ๆฐๅข็กฎ่ฏ")
new_cure = Column(INT, comment="ๆฐๅขๅบ้ข")
new_die = Column(INT, comment="ๆฐๅขๆญปไบก")
message_source = Column(String(50), comment="ๆถๆฏๆฅๆบ")
source_url_one = Column(TEXT, comment="ๆฅๆบ้พๆฅ1")
source_url_two = Column(TEXT, comment="ๆฅๆบ้พๆฅ2")
source_url_three = Column(TEXT, comment="ๆฅๆบ้พๆฅ3")
note = Column(String(150), comment="ๅคๆณจ")
engine: Engine = create_engine('mysql+pymysql://lumia:1044740758@LumiaO:3306/VirusStatic')
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
|
996,991 | df652ee8a72d537c9d512abd7f1bacee993f552e | print "Tarea python"
import numpy as np
N=30
tres=[]
cinco=[]
siete=[]
nueve=[]
print tres
for i in range (N+1):
tr=3*i
cin=5*i
sie=7*i
nue=9*i
if(tr<=N):
tres.append(tr)
if(cin<=N):
cinco.append(cin)
if(sie<=N):
siete.append(sie)
if(nue<=N):
nueve.append(nue)
print "multiplos de tres hasta",str(N),"\n",tres
print "multiplos de cinco hasta",str(N),"\n",cinco
print "multiplos de siete hasta",str(N),"\n",siete
print "multiplos de nueve hasta",str(N),"\n",nueve
t3=len(tres)
for i in range (t3-len(cinco)):
cinco.append(0)
for i in range (t3-len(siete)):
siete.append(0)
for i in range (t3-len(nueve)):
nueve.append(0)
a=len(tres)
b=0
for i in range (a):
b=b+(tres[i]+cinco[i]+siete[i]+nueve[i])
print "suma de los multiplos de 3, 5, 7 y 9 hasta el numero", str(N),"\n",b
|
996,992 | 2376b5b1ea6f0de0b95f32e127d1f02f19685488 | # Generated by Django 2.2.4 on 2019-08-20 10:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0003_auto_20190814_1545'),
]
operations = [
migrations.AlterModelOptions(
name='goods',
options={'verbose_name': 'ๅๅไฟกๆฏ', 'verbose_name_plural': 'ๅๅไฟกๆฏ'},
),
]
|
996,993 | 093ca9803474de0f92fba9726d4f3b79ff2b8bda | # Generated by Django 3.0.5 on 2020-08-05 15:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20200805_2028'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='createdon',
field=models.DateTimeField(default=''),
),
]
|
996,994 | 2c3ab6a02bb4e3c0b1650b7e4b952c703fac2948 | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import logging
class DataQualityOperator(BaseOperator):
ui_color = '#89DA59'
@apply_defaults
def __init__(self,
# Define your operators params (with defaults) here
# Example:
# conn_id = your-connection-name
redshift_conn_id="",
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
# Map params here
# Example:
# self.conn_id = conn_id
self.redshift_conn_id = redshift_conn_id
self.args= args
self.kwargs = kwargs
def execute(self, context):
self.log.info('DataQualityOperator not implemented yet')
redshift_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)
self.log.info('Data quality check on users table')
records = redshift_hook.get_records(self.kwargs["params"]["users_data_check"])
self.log.info(f'Users table records {records}')
if len(records) < 0 or len(records[0]) < 0:
raise ValueError(f"Data quality check failed. users table returned no results")
num_records = records[0][0]
if num_records > self.kwargs["params"]["users_data_result"]:
raise ValueError(f"Data quality check failed. users table contained NULL values")
logging.info(f"Data quality on table users check passed with {records[0][0]} records")
self.log.info('Data quality check on songs table')
records2 = redshift_hook.get_records(self.kwargs["params"]["songs_data_check"])
self.log.info(f'songs table records {records2}')
if len(records2) < 0 or len(records2[0]) < 0:
raise ValueError(f"Data quality check failed. songs table returned no results")
num_records2 = records2[0][0]
if num_records2 > self.kwargs["params"]["songs_data_result"]:
raise ValueError(f"Data quality check failed. songs table contained NULL values")
logging.info(f"Data quality on table songs check passed with {records2[0][0]} records")
self.log.info('Data quality check on artists table')
records3 = redshift_hook.get_records(self.kwargs["params"]["artists_data_check"])
self.log.info(f'artists table records {records3}')
if len(records3) < 0 or len(records3[0]) < 0:
raise ValueError(f"Data quality check failed. artists table returned no results")
num_records3 = records3[0][0]
if num_records3 > self.kwargs["params"]["artists_data_result"]:
raise ValueError(f"Data quality check failed. artists table contained NULL values")
logging.info(f"Data quality on table artists check passed with {records3[0][0]} records")
self.log.info('Data quality check on time table')
records4 = redshift_hook.get_records(self.kwargs["params"]["time_data_check"])
self.log.info(f'time table records {records4}')
if len(records4) < 0 or len(records4[0]) < 0:
raise ValueError(f"Data quality check failed. time table returned no results")
num_records4 = records4[0][0]
if num_records4 > self.kwargs["params"]["time_data_result"]:
raise ValueError(f"Data quality check failed. time table contained NULL values")
logging.info(f"Data quality on table time check passed with {records4[0][0]} records")
|
996,995 | a331d3ef4a5f919ba8d935964890045d74885983 | # @Author: Mikoลaj Stฤpniewski <maikelSoFly>
# @Date: 2017-12-16T02:09:12+01:00
# @Email: mikolaj.stepniewski1@gmail.com
# @Filename: main.py
# @Last modified by: maikelSoFly
# @Last modified time: 2017-12-17T15:20:01+01:00
# @License: Apache License Version 2.0, January 2004
# @Copyright: Copyright ยฉ 2017 Mikoลaj Stฤpniewski. All rights reserved.
from math import ceil
from math import floor
from neurons import *
from data import *
from progressBar import *
import random
import copy
from prettytable import PrettyTable
def countUniqueItems(arr):
return len(Counter(arr).keys())
def getMostCommonItem(arr):
return Counter(arr).most_common(1)[0][0]
def averageParameters(species, n=50):
sum = [0.0 for _ in range(4)]
for row in species:
sum[0] += row[0]
sum[1] += row[1]
sum[2] += row[2]
sum[3] += row[3]
return [ceil((sum[i]/n)*100)/100 for i in range(4)]
""" Main training function !!! """
def train(kohonenGroup, trainingData):
pBar = ProgressBar()
print('\n {} + {} + {}'.format(speciesNames[0], speciesNames[1], speciesNames[2]))
pBar.start(maxVal=epochs)
for i in range(epochs):
testWinners = kohonenGroup.train(trainingData, histFreq=20)
pBar.update()
return testWinners
if __name__ == '__main__':
""" Training parameters """
epochs = 25
decay = 0.01*(epochs)*13000
neuronGrid = [20, 20]
lRate = 0.07 # 0.07 one of the best
""" Exclude number of irises from total data set
and add to test data """
noExcludedIrises = 5
dataUrl = 'http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
speciesNames = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']
data = DataReader(url=dataUrl, delimiter=',').parse()
testData = []
for j in range(len(data)):
data[j].pop() # remove species name
data[j] = [float(i) for i in data[j]] # cast str elements to float
data[j] = normalizeInputs(data[j]) # normalize elements to 0...1 values
irisDict = {'setosa': data[:50], 'versicolor': data[50:100], 'virginica': data[100:]}
speciesArr = np.split(np.array(data), 3)
""" Pop random irises from dict to testData """
for i in range(noExcludedIrises):
index = np.random.randint(50-i)
testData.append(irisDict['setosa'].pop(index))
testData.append(irisDict['versicolor'].pop(index))
testData.append(irisDict['virginica'].pop(index))
kohonenGroup = KohonenNeuronGroup(
numOfInputs=4,
numOfNeurons=neuronGrid,
processFunc=euklidesDistance,
lRateFunc=simpleLRateCorrection(decay),
lRate=lRate
)
print('lRate0: {:.2f}\tdecay: {}\tneurons in group: {:d}\tepochs: {:d}'.format(
kohonenGroup._lRate, decay, kohonenGroup['totalNumOfNeurons'], epochs
))
print('\nโขAverages:')
for i, species in enumerate(speciesArr):
print('{} \t{}'.format(averageParameters(species), speciesNames[i]))
print()
""" Training & testing """
trainingData = []
trainingData.extend(irisDict['setosa'])
trainingData.extend(irisDict['versicolor'])
trainingData.extend(irisDict['virginica'])
trainingWinners = train(kohonenGroup, trainingData)
numOfActiveNeurons = countUniqueItems(trainingWinners)
trainingWinners = np.split(np.array(trainingWinners), 3)
mostActiveNeurons1 = [getMostCommonItem(row) for row in trainingWinners]
mostActiveNeurons = [getMostCommonItem(row)._iid for row in trainingWinners]
print('\n\nโขTraining Summary:')
table1 = PrettyTable()
table1.field_names = ['Total active', 'Most active', 'Last lRate']
table1.add_row([numOfActiveNeurons, mostActiveNeurons, kohonenGroup._currentLRate])
print(table1)
testWinners = kohonenGroup.classify(testData)
testWinners = np.split(np.array(testWinners), len(testData)/3)
print('\n\nโขTest Results:')
table2 = PrettyTable()
table2.field_names = [speciesNames[0], speciesNames[1], speciesNames[2]]
for row in testWinners:
table2.add_row([neuron._iid for neuron in row ])
print(table2)
print('\n\nโขWinners Weights:')
table3 = PrettyTable()
table3.field_names = ['Neuron iid', 'Sepal length', 'Sepal width', 'Petal length', 'Petal width']
for neuron in mostActiveNeurons1:
table3.add_row([neuron._iid,
round(neuron._weights[0], 3),
round(neuron._weights[1], 3),
round(neuron._weights[2], 3),
round(neuron._weights[3], 3)])
print(table3)
answ = input('Print error history?\ty/n: ')
if answ == 'y':
for neuron in mostActiveNeurons1:
print('\n')
print('โ' * 25, ' [neuron: {:d}]\n\n'.format(neuron._iid))
for row in neuron._errorHist:
print(row)
|
996,996 | 3134c30ff591b33bd3b2eb242e72dcd593bc82e6 | import cv2
import numpy as np
import math
import queue
import time
import dlib
import zerorpc
import base64
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
###############################################################################################################################################
# I S S U E #
###############################################################################################################################################
mod = SourceModule("""
#include<math.h>
#include<stdlib.h>
__device__ void computeCurvePt(float* curveP, int pt, int resultPt[2])
{
//int* resultPt = (int*)malloc(2 * sizeof(int));
//int resultPt[2];
resultPt[0] = pt;
resultPt[1] = int(curveP[0] * pt * pt + curveP[1] * pt + curveP[2]);
//return resultPt;
}
// ๊ทผ์ ๊ณต์ ์ด์ฉํด์ y์ขํ ์ฃผ์ด์ก์ ๋ ๊ณก์ ๊ณผ ๋ง๋๋ x์ขํ ์ฐพ๊ธฐ
//__device__ void computeCurvePx(float* UpperCurve, float* LowerCurve, int py, int* curvePx_prev)
__device__ void computeCurvePx(float* UpperCurve, float* LowerCurve, int py, int curvePx_prev[2], int Curve[2])
{
if((UpperCurve[1] * UpperCurve[1] - 4 * UpperCurve[0] * (UpperCurve[2] - py) < 0) || (LowerCurve[1] * LowerCurve[1] - 4 * LowerCurve[0] * (LowerCurve[2] - py) < 0))
{
Curve[0] = curvePx_prev[0];//
Curve[1] = curvePx_prev[1];//
//return circlePx_prev;
}
else
{
int x1_Up = int((-UpperCurve[1] - sqrtf(UpperCurve[1] * UpperCurve[1] - 4 * UpperCurve[0] * (UpperCurve[2] - py))) / (2 * UpperCurve[0]));
int x2_Up = int((-UpperCurve[1] + sqrtf(UpperCurve[1] * UpperCurve[1] - 4 * UpperCurve[0] * (UpperCurve[2] - py))) / (2 * UpperCurve[0]));
int x1_Low = int((-LowerCurve[1] + sqrtf(LowerCurve[1] * LowerCurve[1] - 4 * LowerCurve[0] * (LowerCurve[2] - py))) / (2 * LowerCurve[0]));
int x2_Low = int((-LowerCurve[1] - sqrtf(LowerCurve[1] * LowerCurve[1] - 4 * LowerCurve[0] * (LowerCurve[2] - py))) / (2 * LowerCurve[0]));
//int* result = (int*)malloc(2 * sizeof(int));
int result[2];
result[0] = (x1_Up < x1_Low ? x1_Low : x1_Up);
result[1] = (x2_Low > x2_Up ? x2_Up : x2_Low);
Curve[0] = result[0];//
Curve[1] = result[1];//
//return result;
}
}
// y์ขํ๊ฐ ์ฃผ์ด์ก์ ๋ ๋๋์์ ๋ง๋๋ ์ ์ฐพ๊ธฐ (O: ๋๋์ ์์ , R: ๋ฐ์ง๋ฆ, py: ์ฃผ์ด์ง y์ขํ)
//__device__ void computeCirclePx(int O[2], int R, int py, int* circlePx_prev)
__device__ void computeCirclePx(int O[2], int R, int py, int circlePx_prev[2], int Circle[2])
{
if(R * R - (py - O[0]) * (py - O[0]) < 0)
{
Circle[0] = circlePx_prev[0];//
Circle[1] = circlePx_prev[1];//
//return circlePx_prev;
}
else
{
//int* result = (int*)malloc(2 * sizeof(int));
int result[2];
result[0] = int(sqrtf(R * R - (py - O[0]) * (py - O[0])) + O[1]); // ํฐ x
result[1] = int(-sqrtf(R * R - (py - O[0]) * (py - O[0])) + O[1]); // ์์ x
Circle[0] = result[0];//
Circle[1] = result[1];//
//return result;
}
}
__device__ bool IsLowerComparison(float* curveP, int pt[2])
{
if(pt[1] < (curveP[0] * pt[0] * pt[0] + curveP[1] * pt[0] + curveP[2]))
return true;
else
return false;
}
__device__ bool IsUpperComparison(float* curveP, int pt[2])
{
if(pt[1] > (curveP[0] * pt[0] * pt[0] + curveP[1] * pt[0] + curveP[2]))
return true;
else
return false;
}
__global__ void pupilCheckL(int* PupilLocationLx_gpu, int* PupilLocationLy_gpu, int* frame_gpu, float* upperCurve, float* lowerCurve, int xmin, int ymin, int pupilcols, int cols, int tempw, int temph)
{
if(threadIdx.x + blockDim.x * blockIdx.x < tempw && threadIdx.y + blockDim.y * blockIdx.y < temph)
{
int i = xmin + threadIdx.x + blockDim.x * blockIdx.x;
int j = ymin + threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.x + blockDim.x * blockIdx.x;
int l = threadIdx.y + blockDim.y * blockIdx.y;
int Pt[2] = {i, j};
if (IsLowerComparison(lowerCurve, Pt) && IsUpperComparison(upperCurve, Pt))
{
if(frame_gpu[(j * cols + i) * 3] < 80 && frame_gpu[(j * cols + i) * 3 + 1] < 80 && frame_gpu[(j * cols + i) * 3 + 2] < 80)
{
PupilLocationLx_gpu[l * pupilcols + k] = i;
PupilLocationLy_gpu[l * pupilcols + k] = j;
//frame_gpu[(j * cols + i) * 3] = 255;
//frame_gpu[(j * cols + i) * 3 + 1] = 255;
//frame_gpu[(j * cols + i) * 3 + 2] = 255;
}
}
}
}
__global__ void pupilCheckR(int* PupilLocationRx_gpu, int* PupilLocationRy_gpu, int* frame_gpu, float* upperCurve, float* lowerCurve, int xmin, int ymin, int pupilcols, int cols, int tempw, int temph)
{
if(threadIdx.x + blockDim.x * blockIdx.x < tempw && threadIdx.y + blockDim.y * blockIdx.y < temph)
{
int i = xmin + threadIdx.x + blockDim.x * blockIdx.x;
int j = ymin + threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.x + blockDim.x * blockIdx.x;
int l = threadIdx.y + blockDim.y * blockIdx.y;
int Pt[2] = {i, j};
if (IsLowerComparison(lowerCurve, Pt) && IsUpperComparison(upperCurve, Pt))
{
if(frame_gpu[(j * cols + i) * 3] < 80 && frame_gpu[(j * cols + i) * 3 + 1] < 80 && frame_gpu[(j * cols + i) * 3 + 2] < 80)
{
PupilLocationRx_gpu[l * pupilcols + k] = i;
PupilLocationRy_gpu[l * pupilcols + k] = j;
//frame_gpu[(j * cols + i) * 3] = 255;
//frame_gpu[(j * cols + i) * 3 + 1] = 255;
//frame_gpu[(j * cols + i) * 3 + 2] = 255;
}
}
}
}
__global__ void warping(int* frame_gpu, float* ZMatrix_gpu, int* mov_info_gpu, int* Veg_gpu, int* resultFrame_gpu, int* MaskFrame_gpu, float FaceOrigin_gpu, int x_eye, int y_eye, float Zn, float r, float theta, int h, int w, int w_tmp, int cols, int rows, float w_gaze)
{
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if((i < h) && (j < w) && (i >= 0) && (j >= 0))
{
//float w_gaze = 0.1; // ๋ฐ์ผ๋ก ๋นผ๋ ๋จ
float w_eyeheight = 100/float(100); // ๋ฐ์ผ๋ก ๋นผ๋ ๋จ
int h_a = int(h/2); // ๋ฐ์ผ๋ก ๋นผ๋ ๋จ
int w_a = int(w/2); // ๋ฐ์ผ๋ก ๋นผ๋ ๋จ
int tmp_diff[3];
int tmp_1[3];
int tmp_2[3];
theta = 0.15;
ZMatrix_gpu[h_a * w_tmp + w_a] = Zn - r + sqrtf(r * r - (w/2 - FaceOrigin_gpu) * (w/2 - FaceOrigin_gpu)); // ๋ฐ์ผ๋ก ๋นผ๋ ๋จ
int tmp = int(cos(-theta-w_gaze)*h_a + sin(-theta-w_gaze)*ZMatrix_gpu[h_a * w_tmp + w_a]); // ๋ฐ์ผ๋ก ๋นผ๋ ๋จ
ZMatrix_gpu[h_a * w_tmp + w_a] = int(sin(-theta-w_gaze)*(-1)*h_a+ cos(-theta-w_gaze)*ZMatrix_gpu[h_a * w_tmp + w_a]); // ๋ฐ์ผ๋ก ๋นผ๋ ๋จ
int alpha = h_a - tmp; // ๋ฐ์ผ๋ก ๋นผ๋ ๋จ
ZMatrix_gpu[i * w_tmp + j] = Zn - r + sqrtf(r * r - (j - FaceOrigin_gpu) * (j - FaceOrigin_gpu));
tmp = int(cos(-theta-w_gaze)*i + sin(-theta-w_gaze)*ZMatrix_gpu[i * w_tmp + j]);
ZMatrix_gpu[i * w_tmp + j] = int(sin(-theta-w_gaze)*(-1)*i+ cos(-theta-w_gaze)*ZMatrix_gpu[i * w_tmp + j]);
int v = int((i-h_a)*theta*1.1);
int Xa_eye = int((w_eyeheight)*int(round( tmp * (cos(theta) + sin(theta) * tan(theta)) - ZMatrix_gpu[i * w_tmp + j] * sin(theta) * cos(theta) ) )) + int(alpha*(1.4)) + v;
int Xa = Xa_eye;
if((Xa > -1) && (Xa < h + 1))
{
mov_info_gpu[w * Xa + j] = i;
MaskFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3] = 0;
MaskFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 1] = 0;
MaskFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 2] = 0;
resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3] = frame_gpu[((i + y_eye) * cols + j + x_eye) * 3];
resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 1] = frame_gpu[((i + y_eye) * cols + j + x_eye) * 3 + 1];
resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 2] = frame_gpu[((i + y_eye) * cols + j + x_eye) * 3 + 2];
if(Xa+y_eye < Veg_gpu[j+x_eye])
{
resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3] = frame_gpu[((Xa+y_eye) * cols + j+x_eye) * 3];
resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 1] = frame_gpu[((Xa+y_eye) * cols + j+x_eye) * 3 + 1];
resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 2] = frame_gpu[((Xa+y_eye) * cols + j+x_eye) * 3 + 2];
}
else
{
tmp_diff[0] = resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3] - frame_gpu[((Xa+y_eye) * cols + j+x_eye) * 3];
tmp_diff[1] = resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 1] - frame_gpu[((Xa+y_eye) * cols + j+x_eye) * 3 + 1];
tmp_diff[2] = resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 2] - frame_gpu[((Xa+y_eye) * cols + j+x_eye) * 3 + 2];
if(sqrtf(tmp_diff[0] * tmp_diff[0] + tmp_diff[1] * tmp_diff[1] + tmp_diff[2] * tmp_diff[2] ) < 40)
{
tmp_1[0] = resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3];
tmp_1[1] = resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 1];
tmp_1[2] = resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 2];
tmp_2[0] = frame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3];
tmp_2[1] = frame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 1];
tmp_2[2] = frame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 2];
resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3] = int((tmp_1[0]+tmp_2[0])/2);
resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 1] = int((tmp_1[1]+tmp_2[1])/2);
resultFrame_gpu[((Xa + y_eye) * cols + j + x_eye) * 3 + 2] = int((tmp_1[2]+tmp_2[2])/2);
}
}
}
}
}
__global__ void interpolation(int* frame_gpu, int* mov_info_gpu, int* resultFrame_gpu, int* MaskFrame_gpu, int x, int y, int h, int w, int cols, int rows)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if((i < w - 1) && (j < h - 1) && (i >= 0) && (j >= 0))
{
if(MaskFrame_gpu[((j+y) * cols + i+x) * 3] != 0 && j!=0)
{
int flag_p=0; //plus ๋ฐฉํฅ Vector์กฐ์ฌ flag #์ ์ธ
int flag_m=0; //minus๋ฐฉํฅ Vector์กฐ์ฌ flag #์ ์ธ
int y_mov_p=0; //plus๋ฐฉํฅ mov #์ ์ธ
int y_mov_m=0; //minus๋ฐฉํฅ mov #์ ์ธ
int y_mov=0;
int tmp[4];
int tmpCheck = 0;
int e;
for(e = 1; e < int(h/2); e++)
{
if(flag_p==0)
{
if(mov_info_gpu[(j+e)*w+i] != 0)
{
y_mov_p = j+e - mov_info_gpu[(j+e)*w+i];
tmp[tmpCheck] = y_mov_p;
tmp[tmpCheck + 1] = e;
tmpCheck += 2;
flag_p = 1;
}
}
if(flag_m==0)
{
if(j-e>=0)
{
if(mov_info_gpu[(j-e)*w+i]!=0)
{
y_mov_m = j-e - mov_info_gpu[(j-e)*w+i] ;
tmp[tmpCheck] = y_mov_m;
tmp[tmpCheck + 1] = e;
tmpCheck += 2;
flag_m = 1;
}
}
}
if((flag_p==1) && (flag_m==1))
{
y_mov=(tmp[0]*tmp[3]+tmp[2]*tmp[1])/(tmp[1]+tmp[3]);
resultFrame_gpu[((j+y) * cols +i+x) * 3] = frame_gpu[((j-int(roundf(y_mov))+y) * cols +i+x) * 3];
resultFrame_gpu[((j+y) * cols +i+x) * 3 + 1] = frame_gpu[((j-int(roundf(y_mov))+y) * cols +i+x) * 3 + 1];
resultFrame_gpu[((j+y) * cols +i+x) * 3 + 2] = frame_gpu[((j-int(roundf(y_mov))+y) * cols +i+x) * 3 + 2];
break;
}
}
}
}
}
__global__ void horizontalCorrection(int* resultFrame, int* frame, float* avg, float* upperCurve, float* lowerCurve, int PupilMovVec, int PupilSquaredRadius, int cols, int h_start, int h_end, int w_start, int w_end)
{
int j = w_start + threadIdx.x + blockDim.x * blockIdx.x;
int i = h_start + threadIdx.y + blockDim.y * blockIdx.y;
if(i >= h_start && i < h_end && j >= w_start && j < w_end)
{
int startPoint_r;
int startPoint_l;
int curvePx_prev[2] = {0, 0};
int curvePx[2];
computeCurvePx(upperCurve, lowerCurve, i, curvePx_prev, curvePx);
int O[2] = {int(avg[1]), int(avg[0] + PupilMovVec)};
int circlePx[2];
computeCirclePx(O, PupilSquaredRadius, i, curvePx_prev, circlePx); // ์ด๋ํ ๋๋์์ y์ขํ ๊ฐ์ ๊ต์
int upPt[2] = {j, i};
int lowPt[2] = {j - int(PupilMovVec), i};
if((IsUpperComparison(upperCurve, upPt) && IsLowerComparison(lowerCurve, upPt)) || (IsUpperComparison(upperCurve, lowPt) && IsLowerComparison(lowerCurve, lowPt)))
{
float dist = sqrtf((avg[0] - (j - int(PupilMovVec))) * (avg[0] - (j - int(PupilMovVec))) + (avg[1] - i) * (avg[1] - i));
// ๊ฑฐ๋ฆฌ๊ฐ ๋๋์ ์ค์ฌ์ด๋ ๋ฐ์ง๋ฆ ์ด๋ด์ด๊ณ ์ด๋ ํ ์ ์ด ๊ณก์ ๋ฒ์ ์์ ๋ค์ด์จ ์ ๋ง ์ด๋์ํด
if(dist < PupilSquaredRadius && IsUpperComparison(upperCurve, lowPt) && IsLowerComparison(lowerCurve, lowPt) && (IsUpperComparison(upperCurve, upPt) && IsLowerComparison(lowerCurve, upPt)))
{
resultFrame[(i * cols + j) * 3] = frame[(i * cols + (j - int(PupilMovVec))) * 3];
resultFrame[(i * cols + j) * 3 + 1] = frame[(i * cols + (j - int(PupilMovVec))) * 3 + 1];
resultFrame[(i * cols + j) * 3 + 2] = frame[(i * cols + (j - int(PupilMovVec))) * 3 + 2];
}
else
{
if(PupilMovVec >= 0)
{
startPoint_r = curvePx[1] - int((curvePx[1] - circlePx[0])/2); // ์ด๋ ํ ์ค๋ฅธ์ชฝ ์ค๊ฐ ํฐ์
startPoint_l = curvePx[0] + int(((circlePx[1] - PupilMovVec) - curvePx[0])/2); // ์ด๋ ์ ์ผ์ชฝ ์ค๊ฐ ํฐ์
}
else
{
startPoint_r = curvePx[1] - int((curvePx[1] - (circlePx[0] - PupilMovVec))/2); // ์ด๋ ์ ์ค๋ฅธ์ชฝ ์ค๊ฐ ํฐ์
startPoint_l = curvePx[0] + int((circlePx[1] - curvePx[0])/2); // ์ด๋ ํ ์ผ์ชฝ ์ค๊ฐ ํฐ์
}
// ๋๋์ ์ค๋ฅธ์ชฝ ํฐ์ ๋ณด๊ฐ
if(j >= circlePx[0] && j < startPoint_r)
{
float ratio = (startPoint_r - j) / float(startPoint_r - circlePx[0]);
int idx = int(startPoint_r - (startPoint_r - (circlePx[0] - PupilMovVec)) * ratio);
resultFrame[(i * cols + j) * 3] = frame[(i * cols + idx) * 3];
resultFrame[(i * cols + j) * 3 + 1] = frame[(i * cols + idx) * 3 + 1];
resultFrame[(i * cols + j) * 3 + 2] = frame[(i * cols + idx) * 3 + 2];
}
// ๋๋์ ์ผ์ชฝ ํฐ์ ๋ณด๊ฐ
else if(j <= circlePx[1] && j > startPoint_l)
{
float ratio = (j - startPoint_l) / float(circlePx[1] - startPoint_l);
int idx = int(startPoint_l + ((circlePx[1] - PupilMovVec) - startPoint_l) * ratio);
resultFrame[(i * cols + j) * 3] = frame[(i * cols + idx) * 3];
resultFrame[(i * cols + j) * 3 + 1] = frame[(i * cols + idx) * 3 + 1];
resultFrame[(i * cols + j) * 3 + 2] = frame[(i * cols + idx) * 3 + 2];
}
}
}
}
}
__global__ void smooth(int* resultFrame, float* upperCurve, float* lowerCurve, int w_start, int w_end, int cols)
{
int j = w_start + threadIdx.x + blockDim.x * blockIdx.x;
// ๊ฒฝ๊ณ ์กฐ๊ธ ๋ถ๋๋ฝ๊ฒ
if(j >= w_start && j < w_end + 1)
{
int testPt[2];
int testPt2[2];
computeCurvePt(upperCurve, j, testPt);
computeCurvePt(lowerCurve, j, testPt2);
resultFrame[(testPt[1] * cols + testPt[0]) * 3] = int(resultFrame[(testPt[1] * cols + testPt[0] - 1) * 3] / 2 + resultFrame[(testPt[1] * cols + testPt[0] + 1) * 3] / 2);
resultFrame[(testPt[1] * cols + testPt[0]) * 3 + 1] = int(resultFrame[(testPt[1] * cols + testPt[0] - 1) * 3 + 1] / 2 + resultFrame[(testPt[1] * cols + testPt[0] + 1) * 3 + 1] / 2);
resultFrame[(testPt[1] * cols + testPt[0]) * 3 + 2] = int(resultFrame[(testPt[1] * cols + testPt[0] - 1) * 3 + 2] / 2 + resultFrame[(testPt[1] * cols + testPt[0] + 1) * 3 + 2] / 2);
resultFrame[(testPt2[1] * cols + testPt2[0]) * 3] = int(resultFrame[(testPt2[1] * cols + testPt2[0] - 1) * 3] / 2 + resultFrame[(testPt2[1] * cols + testPt2[0] + 1) * 3] / 2);
resultFrame[(testPt2[1] * cols + testPt2[0]) * 3 + 1] = int(resultFrame[(testPt2[1] * cols + testPt2[0] - 1) * 3 + 1] / 2 + resultFrame[(testPt2[1] * cols + testPt2[0] + 1) * 3 + 1] / 2);
resultFrame[(testPt2[1] * cols + testPt2[0]) * 3 + 2] = int(resultFrame[(testPt2[1] * cols + testPt2[0] - 1) * 3 + 2] / 2 + resultFrame[(testPt2[1] * cols + testPt2[0] + 1) * 3 + 2] / 2);
}
}
""",'nvcc') # ์์ํ๋ Cuda C์ฝ๋ ์์ฑ / ์ค๋ฅ ์์ผ๋ฉด ์ฝ๋ ์ปดํ์ผ๋์ด ์ฅ์น์ ๋ก๋
## ๋ ๊ฒ์ถ Left, Right ํ๋ ์๋ณ ์์น
eyeDlibPtL =[]
eyeDlibPtR =[]
# ์ ์ฅ ํ๋ ์ ์
frameSave=20
# ์ถ๊ฐ
phi = 0 # 5(๊ธฐ๋ณธ ๋น์ท) ~ -5
theta = 0.45
w_gaze = 0.0
cap = cv2.VideoCapture('vd5.mp4') #960,720
RIGHT_EYE = list(range(36, 42))
LEFT_EYE = list(range(42, 48))
w_r=15
h_r=15
startflag=0 #์์flag(์ด๊ธฐ๊ฐ ์ค์ )
warpflag_prev=0 #์ด์ ํ๋ ์ warp ์ฌ๋ถ 1:wapred 2:original
prevTime = 0
##################tmp for frame rate up conversion
pad=50
################################################
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat') # ํ์ต๋ชจ๋ธ ๋ก๋
detector = dlib.get_frontal_face_detector()
#detector = dlib.cnn_face_detection_model_v1('shape_predictor_68_face_landmarks.dat')
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat') # ํ์ต๋ชจ๋ธ ๋ก๋
# range๋ ๋๊ฐ์ด ํฌํจ์/ํน์ง๋ง๋ค ๋ฒํธ ๋ถ์ฌ
RIGHT_EYE = list(range(36, 42))
LEFT_EYE = list(range(42, 48))
index = LEFT_EYE + RIGHT_EYE
PupilMovVec_L = 0
PupilMovVec_R = 0
PupilSquaredRadius = 10
preAvgLx = 0
preAvgLy = 0
preAvgRx = 0
preAvgRy = 0
mvinfo_prev=[]
plist_prev=[]
eyeDlibPtL_prev = []
eyeDlibPtR_prev = []
region_prev=[10,10,10,10]
def pointExtraction(frame,gray,detector,predictor, eyeDlibPtL, eyeDlibPtR):
frame_downsampled = frame.copy()
gray_downsampled = gray.copy()
frame_downsampled = cv2.resize(frame_downsampled,dsize=(int(gray.shape[1]*0.5), int(gray.shape[0]*0.5)),interpolation=cv2.INTER_AREA) #for dlib scale
gray_downsampled = cv2.resize(gray_downsampled,dsize=(int(gray.shape[1]*0.5), int(gray.shape[0]*0.5)),interpolation=cv2.INTER_AREA) #for dlib scale
dets = detector(gray_downsampled, 1) # ์
์
ํ๋ง ํ์
eyeDlibPtL.clear()
eyeDlibPtR.clear()
list_points = [] # detect๋๋ ์ผ๊ตด 1๊ฐ๋ผ๊ณ ๊ฐ์ ...*
for face in dets:
shape = predictor(frame_downsampled, face) #์ผ๊ตด์์ 68๊ฐ ์ ์ฐพ๊ธฐ
for p in shape.parts():
list_points.append([p.x*2, p.y*2])
list_points = np.array(list_points) # ๋ฆฌ์คํธ๋ฅผ numpy๋ก ๋ณํ
cnt=0 #์์๋ก ์ผ๋จ ํ์ํ๋ ค๊ณ
for i in list_points:
#cv2.circle(frame, (i[0],i[1]), 2, (0, 255, 0), -1)
#cv2.putText(frame, str(cnt),(i[0],i[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 255, 0))
cnt=cnt+1
for idx in range(36,42):
eyeDlibPtL.append(list_points[idx])
#cv2.circle(frame, (list_points[idx][0], list_points[idx][1]), 2, (0, 255, 0), -1)
for idx in range(42, 48):
eyeDlibPtR.append(list_points[idx])
#cv2.circle(frame, (list_points[idx][0], list_points[idx][1]), 2, (0, 255, 0), -1)
return list_points
##๊ณก์ ๋ฐ๊พธ๊ธฐ
#def CreateVeg(x_l,y_l,w,h): # ์ญ์ ์์
# Crv = [0]*(640)
# ax=0
# ay=0
# for t in range(0,w+10):
# ax= int(
# (1-t/(w+9)) * (1-t/(w+9)) * (x_l) +
# (2 * (t/(w+9)) * (1-t/(w+9)) * (x_l+w*6/10)) +
# (t/(w+9)) * (t/(w+9)) * (x_l+w))
# ay=int(
# (1-t/(w+9)) * (1-t/(w+9)) * (int(y_l*1.04)) +
# (2 * (t/(w+9)) * (1-t/(w+9)) * (y_l-h/4+h/8+h/16)) +
# (t/(w+9)) * (t/(w+9)) * (y_l+h/2))
# Crv[ax]=ay
# return Crv
def getTheta(tilt, p_eye,f): #
# in case ) tilt = 0 -> theta = p_eye/f
theta = (
math.atan(
(p_eye*math.cos(tilt) - f*math.sin(tilt))/
(p_eye*math.sin(tilt) + f*math.cos(tilt))
)
)
return theta
def getRadius(Zn,faceHalf):
return (Zn * Zn + pow(faceHalf, 2)) / (2* Zn)+5
def warping(phi,x,y,w,h,cy, Crv, frame_gpu, resultFrame_gpu, MaskFrame_gpu, w_gaze, cols, rows):
###Matching Z value using Cylinder Model
f=int(64000/w)
#f=500
w=int(w*1.2) # ์์ ๋น์จ / ์ด์งํผ ๊ณก์ ์ผ๋ก ์๋ฅผ๊ฑฐ๋ฉด ํฌ๊ฒ ํ๋๊ฐ or ์ฌ์ฉ์ ์ฒ์๋ ์
๋ ฅ.
x=int(x-w/5)
h=int(h*2.5)
y=int(y-h/2)
mov_info=[0]*(int(w*2))*(int(h*2)) #๋ณํ ์ดํ ์ ๋ณด ์ ์ฅ ๋ฐฐ์ด , ์๋ก์์ฑ
ZMatrix = np.empty((h, int(w*1.1))) #Z๊ฐ LUT
#FaceOrigin = ( h/2, w/2 ) #์์๊ฐ #์ด์ : ZMatrix๋ ๋์์ญ๋ง ๋ฐ๋ก๋ง๋ฌ.
FaceOrigin = np.array( (h/2, w/2) )
faceHalf = w / 2 # ์์๊ฐ
Zn = faceHalf # ์์๊ฐ
###
#Crv = CreateVeg(x,y,w,h) # ๋ค๋ฅธ ๊ณก์ ์ผ๋ก ๋ฐ๊ฟ์์
###
theta = np.float32(getTheta(phi,cy,f))
r = getRadius(Zn,faceHalf) # Cylinder model ๋ฐ์ง๋ฆ
x_eye=x
y_eye=y
# pyCuda
mov_info_np = np.array(mov_info, dtype = np.int32)
mov_info_gpu = cuda.mem_alloc(mov_info_np.nbytes)
Crv_np = np.array(Crv, dtype = np.int32)
Crv_gpu = cuda.mem_alloc(Crv_np.nbytes)
ZMatrix = ZMatrix.astype(np.float32)
ZMatrix_gpu = cuda.mem_alloc(ZMatrix.nbytes)
cuda.memcpy_htod(mov_info_gpu, mov_info_np)
cuda.memcpy_htod(Crv_gpu, Crv_np)
cuda.memcpy_htod(ZMatrix_gpu, ZMatrix)
func = mod.get_function("warping")
bdim = (32, 32, 1)
dx, mx = divmod(w, bdim[0])
dy, my = divmod(h, bdim[1])
gdim = (dx + (mx > 0), dy + (my > 0))
func(frame_gpu, ZMatrix_gpu, mov_info_gpu, Crv_gpu, resultFrame_gpu, MaskFrame_gpu, np.float32(FaceOrigin[1]), np.int32(x_eye), np.int32(y_eye), np.float32(Zn), np.float32(r), np.float32(theta), np.int32(h), np.int32(w), np.int32(len(ZMatrix[0])), np.int32(cols), np.int32(rows), np.float32(w_gaze), block = bdim, grid = gdim)
pycuda.driver.Context.synchronize()
func = mod.get_function("interpolation")
bdim = (32, 32, 1)
dx, mx = divmod(int(h-1), bdim[0])
dy, my = divmod(int(w-1), bdim[1])
gdim = (dx + (mx > 0), dy + (my > 0))
func(frame_gpu, mov_info_gpu, resultFrame_gpu, MaskFrame_gpu, np.int32(x), np.int32(y), np.int32(h), np.int32(w), np.int32(cols), np.int32(rows), block = bdim, grid = gdim)
pycuda.driver.Context.synchronize()
return resultFrame_gpu
def horizontalCorrection(resultFrame, frame_gpu, eyeDlibPt, avg, PupilMovVec, PupilSquaredRadius, upperCurve, lowerCurve):
#์์๋ก๋ฃ์
cols=640
h_start = int((eyeDlibPt[1][1] + eyeDlibPt[2][1])/2 - 2)
h_end = int((eyeDlibPt[4][1] + eyeDlibPt[5][1])/2 + 3)
w_start = int(eyeDlibPt[0][0])
w_end = int(eyeDlibPt[3][0])
resultFrame = resultFrame.astype(np.uint32)
avg = avg.astype(np.float32)
upperCurve = upperCurve.astype(np.float32)
lowerCurve = lowerCurve.astype(np.float32)
resultFrame_gpu = cuda.mem_alloc(resultFrame.nbytes)
avg_gpu = cuda.mem_alloc(avg.nbytes)
upperCurve_gpu = cuda.mem_alloc(upperCurve.nbytes)
lowerCurve_gpu = cuda.mem_alloc(lowerCurve.nbytes)
cuda.memcpy_htod(resultFrame_gpu, resultFrame)
cuda.memcpy_htod(avg_gpu, avg)
cuda.memcpy_htod(upperCurve_gpu, upperCurve)
cuda.memcpy_htod(lowerCurve_gpu, lowerCurve)
func = mod.get_function("horizontalCorrection")
w = w_end - w_start
h = h_end - h_start
bdim = (32, 32, 1)
dx, mx = divmod(w, bdim[0])
dy, my = divmod(h, bdim[1])
gdim = (dx + (mx>0), dy + (my>0))
func(resultFrame_gpu, frame_gpu, avg_gpu, upperCurve_gpu, lowerCurve_gpu, np.int32(PupilMovVec), np.int32(PupilSquaredRadius), np.int32(cols), np.int32(h_start), np.int32(h_end), np.int32(w_start), np.int32(w_end), block = bdim, grid = gdim)
pycuda.driver.Context.synchronize()
func = mod.get_function("smooth")
w = w_end - w_start
bdim = (32, 1, 1)
dx, mx = divmod(w, bdim[0])
dy, my = divmod(1, bdim[1])
gdim = (dx + (mx>0), dy + (my>0))
func(resultFrame_gpu, upperCurve_gpu, lowerCurve_gpu, np.int32(w_start), np.int32(w_end), np.int32(cols), block = bdim, grid = gdim)
pycuda.driver.Context.synchronize()
cuda.memcpy_dtoh(resultFrame, resultFrame_gpu)
resultFrame = resultFrame.astype(np.uint8)
return resultFrame
# ๋๋์ ์์ผ๋ก ๋๋์ ์ค์ฌ์ ๊ฒ์ถ
def detectPupilCenter(frame_gpu, eyeDlibPtL, eyeDlibPtR, cols, preAvgLx, preAvgLy, preAvgRx, preAvgRy, upperCurve_l, lowerCurve_l, upperCurve_r, lowerCurve_r):
PupilLocationLx = np.array([0] * (int((eyeDlibPtL[4][1] + eyeDlibPtL[5][1])/2) - int((eyeDlibPtL[1][1] + eyeDlibPtL[2][1])/2)) * (eyeDlibPtL[3][0] - eyeDlibPtL[0][0]), dtype = np.int32)
PupilLocationLy = np.array([0] * (int((eyeDlibPtL[4][1] + eyeDlibPtL[5][1])/2) - int((eyeDlibPtL[1][1] + eyeDlibPtL[2][1])/2)) * (eyeDlibPtL[3][0] - eyeDlibPtL[0][0]), dtype = np.int32)
PupilLocationLx_gpu = cuda.mem_alloc(PupilLocationLx.nbytes)
PupilLocationLy_gpu = cuda.mem_alloc(PupilLocationLy.nbytes)
PupilLocationRx = np.array([0] * (int((eyeDlibPtR[4][1] + eyeDlibPtR[5][1])/2) - int((eyeDlibPtR[1][1] + eyeDlibPtR[2][1])/2)) * (eyeDlibPtR[3][0] - eyeDlibPtR[0][0]), dtype = np.int32)
PupilLocationRy = np.array([0] * (int((eyeDlibPtR[4][1] + eyeDlibPtR[5][1])/2) - int((eyeDlibPtR[1][1] + eyeDlibPtR[2][1])/2)) * (eyeDlibPtR[3][0] - eyeDlibPtR[0][0]), dtype = np.int32)
PupilLocationRx_gpu = cuda.mem_alloc(PupilLocationRx.nbytes)
PupilLocationRy_gpu = cuda.mem_alloc(PupilLocationRy.nbytes)
cuda.memcpy_htod(PupilLocationLx_gpu, PupilLocationLx)
cuda.memcpy_htod(PupilLocationLy_gpu, PupilLocationLy)
cuda.memcpy_htod(PupilLocationRx_gpu, PupilLocationRx)
cuda.memcpy_htod(PupilLocationRy_gpu, PupilLocationRy)
upperCurve_l = upperCurve_l.astype(np.float32)
lowerCurve_l = lowerCurve_l.astype(np.float32)
upperCurve_r = upperCurve_r.astype(np.float32)
lowerCurve_r = lowerCurve_r.astype(np.float32)
upperCurve_l_gpu = cuda.mem_alloc(upperCurve_l.nbytes)
lowerCurve_l_gpu = cuda.mem_alloc(lowerCurve_l.nbytes)
upperCurve_r_gpu = cuda.mem_alloc(upperCurve_r.nbytes)
lowerCurve_r_gpu = cuda.mem_alloc(lowerCurve_r.nbytes)
cuda.memcpy_htod(upperCurve_l_gpu, upperCurve_l)
cuda.memcpy_htod(lowerCurve_l_gpu, lowerCurve_l)
cuda.memcpy_htod(upperCurve_r_gpu, upperCurve_r)
cuda.memcpy_htod(lowerCurve_r_gpu, lowerCurve_r)
func = mod.get_function("pupilCheckL")
bdim = (32, 32, 1)
temph = int((eyeDlibPtL[4][1] + eyeDlibPtL[5][1])/2) - int((eyeDlibPtL[1][1] + eyeDlibPtL[2][1])/2)
tempw = int(eyeDlibPtL[3][0] - eyeDlibPtL[0][0])
dy, my = divmod(int((eyeDlibPtL[4][1] + eyeDlibPtL[5][1])/2) - int((eyeDlibPtL[1][1] + eyeDlibPtL[2][1])/2), bdim[0])
dx, mx = divmod(int(eyeDlibPtL[3][0] - eyeDlibPtL[0][0]), bdim[1])
gdim = (dx + (mx>0), dy + (my>0))
func(PupilLocationLx_gpu, PupilLocationLy_gpu, frame_gpu, upperCurve_l_gpu, lowerCurve_l_gpu, np.int32(eyeDlibPtL[0][0]), np.int32((eyeDlibPtL[1][1] + eyeDlibPtL[2][1])/2), np.int32(eyeDlibPtL[3][0] - eyeDlibPtL[0][0]), np.int32(cols), np.int32(tempw), np.int32(temph), block = bdim, grid = gdim)
pycuda.driver.Context.synchronize()
cuda.memcpy_dtoh(PupilLocationLx, PupilLocationLx_gpu)
cuda.memcpy_dtoh(PupilLocationLy, PupilLocationLy_gpu)
func = mod.get_function("pupilCheckR")
bdim = (32, 32, 1)
temph = int((eyeDlibPtR[4][1] + eyeDlibPtR[5][1])/2) - int((eyeDlibPtR[1][1] + eyeDlibPtR[2][1])/2)
tempw = int(eyeDlibPtR[3][0] - eyeDlibPtR[0][0])
dy, my = divmod(int((eyeDlibPtR[4][1] + eyeDlibPtR[5][1])/2) - int((eyeDlibPtR[1][1] + eyeDlibPtR[2][1])/2), bdim[0])
dx, mx = divmod(int(eyeDlibPtR[3][0] - eyeDlibPtR[0][0]), bdim[1])
gdim = (dx + (mx>0), dy + (my>0))
func(PupilLocationRx_gpu, PupilLocationRy_gpu, frame_gpu, upperCurve_r_gpu, lowerCurve_r_gpu, np.int32(eyeDlibPtR[0][0]), np.int32((eyeDlibPtR[1][1] + eyeDlibPtR[2][1])/2), np.int32(eyeDlibPtR[3][0] - eyeDlibPtR[0][0]), np.int32(cols), np.int32(tempw), np.int32(temph), block = bdim, grid = gdim)
pycuda.driver.Context.synchronize()
cuda.memcpy_dtoh(PupilLocationRx, PupilLocationRx_gpu)
cuda.memcpy_dtoh(PupilLocationRy, PupilLocationRy_gpu)
if len(PupilLocationLx.nonzero()[0]) != 0 and len(PupilLocationLy.nonzero()[0]) != 0 and len(PupilLocationRx.nonzero()[0]) != 0 and len(PupilLocationRy.nonzero()[0]) != 0:
avgLx = sum(PupilLocationLx) / len(PupilLocationLx.nonzero()[0])
avgLy = sum(PupilLocationLy) / len(PupilLocationLy.nonzero()[0])
avgRx = sum(PupilLocationRx) / len(PupilLocationRx.nonzero()[0])
avgRy = sum(PupilLocationRy) / len(PupilLocationRy.nonzero()[0])
(preAvgLx, preAvgLy, preAvgRx, preAvgRy) = (avgLx, avgLy, avgRx, avgRy)
else:
(avgLx, avgLy, avgRx, avgRy) = (preAvgLx, preAvgLy, preAvgRx, preAvgRy)
return (avgLx, avgLy, avgRx, avgRy, preAvgLx, preAvgLy, preAvgRx, preAvgRy, frame_gpu)
def computeCurve(p0, p1, p2):
A = np.array([[p0[0] * p0[0], p0[0], 1], [p1[0] * p1[0], p1[0], 1], [p2[0] * p2[0], p2[0], 1]])
B = np.array([p0[1], p1[1], p2[1]])
return np.linalg.solve(A, B)
#def computeCurvePt(curveP, pt):
# resultPt = np.array([pt, int(curveP[0] * pt * pt + curveP[1] * pt + curveP[2])])
# return resultPt
def setCurvePt(curveP, x, W, cols, add):
Crv = [0]*cols
for i in range(x, x + W):
Crv[i] = int(curveP[0] * i * i + curveP[1] * i + curveP[2] - add)
return Crv
# ๊ทผ์ ๊ณต์ ์ด์ฉํด์ y์ขํ ์ฃผ์ด์ก์ ๋ ๊ณก์ ๊ณผ ๋ง๋๋ x์ขํ ์ฐพ๊ธฐ
def computeCurvePx(UpperCurve, LowerCurve, py, curvePx_prev):
if UpperCurve[1] * UpperCurve[1] - 4 * UpperCurve[0] * (UpperCurve[2] - py) < 0 or LowerCurve[1] * LowerCurve[1] - 4 * LowerCurve[0] * (LowerCurve[2] - py) < 0:
return curvePx_prev
else:
x1_Up = int((-UpperCurve[1] - math.sqrt(UpperCurve[1] * UpperCurve[1] - 4 * UpperCurve[0] * (UpperCurve[2] - py))) / (2 * UpperCurve[0]))
x2_Up = int((-UpperCurve[1] + math.sqrt(UpperCurve[1] * UpperCurve[1] - 4 * UpperCurve[0] * (UpperCurve[2] - py))) / (2 * UpperCurve[0]))
x1_Low = int((-LowerCurve[1] + math.sqrt(LowerCurve[1] * LowerCurve[1] - 4 * LowerCurve[0] * (LowerCurve[2] - py))) / (2 * LowerCurve[0]))
x2_Low = int((-LowerCurve[1] - math.sqrt(LowerCurve[1] * LowerCurve[1] - 4 * LowerCurve[0] * (LowerCurve[2] - py))) / (2 * LowerCurve[0]))
x1 = x1_Low if x1_Up < x1_Low else x1_Up
x2 = x2_Up if x2_Low > x2_Up else x2_Low
return np.array([x1, x2])
#def IsLowerComparison(curveP, pt):
# if pt[1] < (curveP[0] * pt[0] * pt[0] + curveP[1] * pt[0] + curveP[2]):
# return True
# else:
# return False
#def IsUpperComparison(curveP, pt):
# if pt[1] > (curveP[0] * pt[0] * pt[0] + curveP[1] * pt[0] + curveP[2]):
# return True
# else:
# return False
# y์ขํ๊ฐ ์ฃผ์ด์ก์ ๋ ๋๋์์ ๋ง๋๋ ์ ์ฐพ๊ธฐ (O: ๋๋์ ์์ , R: ๋ฐ์ง๋ฆ, py: ์ฃผ์ด์ง y์ขํ)
def computeCirclePx(O, R, py, circlePx_prev):
if R * R - (py - O[0]) * (py - O[0]) < 0:
return circlePx_prev
else:
x1 = int(math.sqrt(R * R - (py - O[0]) * (py - O[0])) + O[1]) # ํฐ x
x2 = int(-math.sqrt(R * R - (py - O[0]) * (py - O[0])) + O[1]) # ์์ x
return np.array([x1, x2])
########################## Main ############################
#if cap.isOpened():
# ret, frame_prev=cap.read()
# frame_prev = cv2.resize(frame_prev,(640, 480),interpolation=cv2.INTER_AREA)
# rows, cols = frame_prev.shape[:2]
# rotation_matrix = cv2.getRotationMatrix2D((cols/2, rows/2), 270, 1)
# frame_prev = cv2.warpAffine(frame_prev, rotation_matrix, (cols, rows))
# gray_prev = cv2.cvtColor(frame_prev, cv2.COLOR_BGR2GRAY)
# Result_prev=frame_prev.copy()
########## Cam Loop #####################################################################################################################################################################################
#while(cap.isOpened()):
class MainRPC(object):
@staticmethod
def mainCorrection(inputs):
binary_array = base64.b64decode(inputs)
binary_np = np.frombuffer(binary_array, dtype=np.uint8)
frame = cv2.imdecode(binary_np, cv2.IMREAD_ANYCOLOR)
#####################################################3
eyeDlibPtL = []
eyeDlibPtR = []
# # ์ ์ฅ ํ๋ ์ ์
# frameSave = 20
#
# # ์ถ๊ฐ
# phi = 0 # 5(๊ธฐ๋ณธ ๋น์ท) ~ -5
theta = 0.45
w_gaze = 0.0
#
# cap = cv2.VideoCapture('vd5.mp4') # 960,720
#
# RIGHT_EYE = list(range(36, 42))
# LEFT_EYE = list(range(42, 48))
#
# w_r = 15
# h_r = 15
#
# startflag = 0 # ์์flag(์ด๊ธฐ๊ฐ ์ค์ )
# warpflag_prev = 0 # ์ด์ ํ๋ ์ warp ์ฌ๋ถ 1:wapred 2:original
#
# prevTime = 0
# ##################tmp for frame rate up conversion
#
# pad = 50
# ################################################
#
# detector = dlib.get_frontal_face_detector()
# # detector = dlib.cnn_face_detection_model_v1('shape_predictor_68_face_landmarks.dat')
# predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat') # ํ์ต๋ชจ๋ธ ๋ก๋
#
# # range๋ ๋๊ฐ์ด ํฌํจ์/ํน์ง๋ง๋ค ๋ฒํธ ๋ถ์ฌ
# RIGHT_EYE = list(range(36, 42))
# LEFT_EYE = list(range(42, 48))
#
# index = LEFT_EYE + RIGHT_EYE
PupilMovVec_L = 0
PupilMovVec_R = 0
PupilSquaredRadius = 10
preAvgLx = 0
preAvgLy = 0
preAvgRx = 0
preAvgRy = 0
# mvinfo_prev = []
# plist_prev = []
# eyeDlibPtL_prev = []
# eyeDlibPtR_prev = []
# region_prev = [10, 10, 10, 10]
#################################################
##์ ์ญ์ด์ด์ผํ๋๋ฐ ์ ์ญ์ผ๋ก ์ธ์์๋๋๋ณ์๋ค
region_prev = [10, 10, 10, 10]
startflag = 0
plist_prev = []
##
print("first code test")
gray_prev = cv2.cvtColor(frame_prev, cv2.COLOR_BGR2GRAY)
prevTime = 0
Result_prev=frame_prev.copy()
##################tmp for frame rate up conversion
pad=50
################################################
# range๋ ๋๊ฐ์ด ํฌํจ์/ํน์ง๋ง๋ค ๋ฒํธ ๋ถ์ฌ
RIGHT_EYE = list(range(36, 42))
LEFT_EYE = list(range(42, 48))
index = LEFT_EYE + RIGHT_EYE
########## Cam Loop#####################################################################################################################################################################################
frame = cv2.resize(frame,(640, 480),interpolation=cv2.INTER_AREA)
rows, cols = frame.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((cols/2, rows/2), 270, 1)
#frame = cv2.warpAffine(frame, rotation_matrix, (cols, rows))
gray = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
gray = cv2.blur(gray, (3, 3), anchor=(-1, -1), borderType=cv2.BORDER_DEFAULT)
gray = cv2.equalizeHist(gray)
# 1. Eye Detecting
#list_points = pointExtraction(frame,gray,detector,predictor)
list_points = pointExtraction(frame,gray,detector,predictor, eyeDlibPtL, eyeDlibPtR)
###################################################################################
ResultFrame=frame.copy()
ResultFrame_h=frame.copy()
TrackingFrame = frame.copy() # Warping ๊ฒฐ๊ณผ Frame
MaskFrame=frame.copy() # Warping ๊ฐ flag ๋ฐฐ์ด(for interpolation)
#detected_f = faces.detectMultiScale(gray, 1.3, 5) #Face(detected)
#detected = eyes.detectMultiScale(gray, 1.3, 5) #Eyes(detected)
warpflag=1 #warp์ด๊ธฐ๊ฐ
if len(list_points)<68 or abs(eyeDlibPtL[0][1] - eyeDlibPtL[3][1]) > (eyeDlibPtL[3][0] - eyeDlibPtL[0][0]) / 2 or abs(eyeDlibPtR[0][1] - eyeDlibPtR[3][1]) > (eyeDlibPtR[3][0] - eyeDlibPtR[0][0]) / 2 :
#wapredflag==0์ผ๋ก
print("warpedflag=0 or can't find eye or etc...")
#print(list_points)
warpflag=0
# 2. Gaze Estimation
#gazeVec = ExtractGazeVector() #์์ฑ ์์
if startflag == 0:
print("start")
startflag = 1
else:
if warpflag==1 :
#warpflag_prev ํ๋จ
#cx,cy=tuple(np.average(eyeDlibPtL,0))
#cx_r,cy_r=tuple(np.average(eyeDlibPtR,0))
#cv2.circle(frame,(int(cx),int(cy)),2,(0,0,255),-1)
#cv2.circle(frame,(int(cx_r),int(cy_r)),2,(0,0,255),-1)
############################################
#left eye
#x=list_points[36][0]
#y=int((list_points[37][1]+list_points[38][1])/2)
#w=int((list_points[39][0]-x)*1.5)
#h=int((list_points[41][1]-y)*1.5)
x=list_points[36][0]
y=int((list_points[37][1]+list_points[38][1])/2)
w=int((list_points[39][0]-x)*1.1)
h=int((list_points[41][1]-y)*1.5)
#right eye
x_r=list_points[42][0]
y_r=int((list_points[43][1]+list_points[44][1])/2)
w_r=int((list_points[45][0]-x_r)*1.5)
h_r=int((list_points[47][1]-y_r)*1.5)
PupilSquaredRadius = int((eyeDlibPtL[3][0] - eyeDlibPtL[0][0])/3.3)
######################### tracking ์์ ํ #############################
diffsum = 999
x_p,y_p,w_p,h_p = region_prev
for i in range(y_p, y_p + h_p):
for j in range(x_p, x_p + w_p):
diffsum = diffsum + abs(int(gray[i][j]) - int(gray_prev[i][j]))
diffsum = diffsum/(w_p * h_p)
if diffsum < 5 and len(plist_prev) != 0 and len(eyeDlibPtL_prev) != 0 and len(eyeDlibPtR_prev) != 0:
#print("not move")
list_points = plist_prev
eyeDlibPtL = eyeDlibPtL_prev.copy()
eyeDlibPtR = eyeDlibPtR_prev.copy()
#for i in list_points:
# cv2.circle(frame, (i[0],i[1]), 2, (0, 255, 0), -1)
#for i in eyeDlibPtL:
# cv2.circle(frame, (i[0],i[1]), 2, (0, 255, 0), -1)
#for i in eyeDlibPtR:
# cv2.circle(frame, (i[0],i[1]), 2, (0, 255, 0), -1)
region_prev = [x,y,w,h]
##########################################
h_start_r = int((eyeDlibPtR[1][1] + eyeDlibPtR[2][1])/2 - 2)
h_end_r = int((eyeDlibPtR[4][1] + eyeDlibPtR[5][1])/2 + 2)
w_start_r = eyeDlibPtR[0][0] + 2
w_end_r = eyeDlibPtR[3][0] + 3
h_start_l = int((eyeDlibPtL[1][1] + eyeDlibPtL[2][1])/2 - 4)
h_end_l = int((eyeDlibPtL[4][1] + eyeDlibPtL[5][1])/2 + 2)
w_start_l = eyeDlibPtL[0][0]
w_end_l = eyeDlibPtL[3][0] + 3
upperCurve_l = computeCurve((w_start_l, eyeDlibPtL[0][1] - 3), (int((eyeDlibPtL[1][0] + eyeDlibPtL[2][0])/2), h_start_l), (w_end_l, eyeDlibPtL[3][1]))
lowerCurve_l = computeCurve((w_start_l , eyeDlibPtL[0][1]),(int((eyeDlibPtL[4][0] + eyeDlibPtL[5][0])/2), h_end_l), (w_end_l, eyeDlibPtL[3][1]))
upperCurve_r = computeCurve((w_start_r, eyeDlibPtR[0][1] - 3), (int((eyeDlibPtR[1][0] + eyeDlibPtR[2][0])/2), h_start_r), (w_end_r, eyeDlibPtR[3][1]))
lowerCurve_r = computeCurve((w_start_r , eyeDlibPtR[0][1]),(int((eyeDlibPtR[4][0] + eyeDlibPtR[5][0])/2), h_end_r), (w_end_r, eyeDlibPtR[3][1]))
frame = frame.astype(np.uint32)
frame_gpu = cuda.mem_alloc(frame.nbytes)
cuda.memcpy_htod(frame_gpu, frame)
# ๋๋์ ์ ๋๋์ ์ค์ฌ ๊ฒ์ถ
(avgLx, avgLy, avgRx, avgRy, preAvgLx, preAvgLy, preAvgRx, preAvgRy, frame_gpu) = detectPupilCenter(frame_gpu, eyeDlibPtL, eyeDlibPtR, cols, preAvgLx, preAvgLy, preAvgRx, preAvgRy, upperCurve_l, lowerCurve_l, upperCurve_r, lowerCurve_r)
#cv2.circle(ResultFrame, (int(avgLx), int(avgLy)), 3, (0,0,255), -1)
#cv2.circle(ResultFrame, (int(avgRx), int(avgRy)), 3, (0,0,255), -1)
##########################################
# ๊ณก์ ๊ณ์ฐ
tempwL = eyeDlibPtL[3][0] - eyeDlibPtL[0][0]
tempwR = eyeDlibPtR[3][0] - eyeDlibPtR[0][0]
add = 15 # ๋๋งค ์ ์ด๋๊น์ง ๊ต์
# ์ํ ์ํ ๊ณก์ ๋์น์ด๋ ๋ ์ค๊ฐ ์ ๋
CrvL = setCurvePt(computeCurve(eyeDlibPtL[0], (int((eyeDlibPtL[1][0] + eyeDlibPtL[2][0])/2), int((eyeDlibPtL[1][1] + eyeDlibPtL[2][1])/2)), eyeDlibPtL[3]), x - int(tempwL * 0.3), int(tempwL * 1.6), cols, add)
CrvR = setCurvePt(computeCurve(eyeDlibPtR[0], (int((eyeDlibPtR[1][0] + eyeDlibPtR[2][0])/2), int((eyeDlibPtR[1][1] + eyeDlibPtR[2][1])/2)), eyeDlibPtR[3]), x_r - int(tempwR * 0.3), int(tempwR * 1.6), cols, add)
##5. Left-Right Correction
avgL = np.array((avgLx, avgLy))
avgR = np.array((avgRx, avgRy))
tempEyeW_L = eyeDlibPtL[3][0] - eyeDlibPtL[0][0]
tempEyeW_R = eyeDlibPtR[3][0] - eyeDlibPtR[0][0]
# ๋ ์๊ฐ์์ ๋๋ง ์ฒ๋ฆฌ
if eyeDlibPtL[5][1] - eyeDlibPtL[1][1] > tempEyeW_L * 0.2:
# # ๋๋์๊ฐ ์ด๋ ์ ๋ ๋ฒ์ ์์ ๋ค์ด์์ผ ๊ต์
# #if avgL[0] > eyeDlibPtL[0][0] + tempEyeW_L * 2 / 7 and avgL[0] < eyeDlibPtL[0][0] + tempEyeW_L * 5 / 7 and avgR[0] > eyeDlibPtR[0][0] + tempEyeW_R * 2 / 7 and avgR[0] < eyeDlibPtR[0][0] + tempEyeW_R * 5 / 7:
# ResultFrame = horizontalCorrection(ResultFrame, frame, eyeDlibPtL, avgL, PupilMovVec_L, PupilSquaredRadius)
# ResultFrame = horizontalCorrection(ResultFrame, frame, eyeDlibPtR, avgR, PupilMovVec_R, PupilSquaredRadius)
#h_start_r = int((eyeDlibPtR[1][1] + eyeDlibPtR[2][1])/2 - 2)
#h_end_r = int((eyeDlibPtR[4][1] + eyeDlibPtR[5][1])/2 + 2)
#w_start_r = eyeDlibPtR[0][0] + 2
#w_end_r = eyeDlibPtR[3][0] + 3
#h_start_l = int((eyeDlibPtL[1][1] + eyeDlibPtL[2][1])/2 - 4)
#h_end_l = int((eyeDlibPtL[4][1] + eyeDlibPtL[5][1])/2 + 2)
#w_start_l = eyeDlibPtL[0][0]
#w_end_l = eyeDlibPtL[3][0] + 3
#upperCurve_l = computeCurve((w_start_l, eyeDlibPtL[0][1] - 3), (int((eyeDlibPtL[1][0] + eyeDlibPtL[2][0])/2), h_start_l), (w_end_l, eyeDlibPtL[3][1]))
#lowerCurve_l = computeCurve((w_start_l , eyeDlibPtL[0][1]),(int((eyeDlibPtL[4][0] + eyeDlibPtL[5][0])/2), h_end_l), (w_end_l, eyeDlibPtL[3][1]))
#upperCurve_r = computeCurve((w_start_r, eyeDlibPtR[0][1] - 3), (int((eyeDlibPtR[1][0] + eyeDlibPtR[2][0])/2), h_start_r), (w_end_r, eyeDlibPtR[3][1]))
#lowerCurve_r = computeCurve((w_start_r , eyeDlibPtR[0][1]),(int((eyeDlibPtR[4][0] + eyeDlibPtR[5][0])/2), h_end_r), (w_end_r, eyeDlibPtR[3][1]))
circlePx_prev = np.array((0, 0))
curvePx_prev = np.array((0, 0))
curvePx_avg_L = computeCurvePx(upperCurve_l, lowerCurve_l, avgL[1], curvePx_prev)
circlePx_avg_L = computeCirclePx((avgL[1], avgL[0]), PupilSquaredRadius, avgL[1], circlePx_prev)
curvePx_avg_R = computeCurvePx(upperCurve_r, lowerCurve_r, avgR[1], curvePx_prev)
circlePx_avg_R = computeCirclePx((avgR[1], avgR[0]), PupilSquaredRadius, avgR[1], circlePx_prev)
# ๋๋์๊ฐ ์ข์ฐ ๋๋งค์ ๋ฟ์ผ๋ฉด ์ข์ฐ ๊ต์ X
if circlePx_avg_L[0] > curvePx_avg_L[1] or circlePx_avg_L[1] < curvePx_avg_L[0] or circlePx_avg_R[0] > curvePx_avg_R[1] or circlePx_avg_R[1] < curvePx_avg_R[0]:
print("No Horizental Correction")
else:
ResultFrame = horizontalCorrection(ResultFrame, frame_gpu, eyeDlibPtL, avgL, PupilMovVec_L, PupilSquaredRadius, upperCurve_l, lowerCurve_l)
ResultFrame = horizontalCorrection(ResultFrame, frame_gpu, eyeDlibPtR, avgR, PupilMovVec_R, PupilSquaredRadius, upperCurve_r, lowerCurve_r)
ResultFrame = cv2.medianBlur(ResultFrame, 3)
ResultFrame_h = ResultFrame.copy()
ResultFrame = ResultFrame.astype(np.uint32)
ResultFrame_h = ResultFrame_h.astype(np.uint32)
MaskFrame = MaskFrame.astype(np.uint32)
ResultFrame_gpu = cuda.mem_alloc(ResultFrame.nbytes)
ResultFrame_h_gpu = cuda.mem_alloc(ResultFrame_h.nbytes)
MaskFrame_gpu = cuda.mem_alloc(MaskFrame.nbytes)
cuda.memcpy_htod(ResultFrame_gpu, ResultFrame)
cuda.memcpy_htod(ResultFrame_h_gpu, ResultFrame_h)
cuda.memcpy_htod(MaskFrame_gpu, MaskFrame)
#3. Warping_L
ResultFrame_h_gpu = warping(phi,x,y,w,h,avgL[1],CrvL, ResultFrame_gpu, ResultFrame_h_gpu, MaskFrame_gpu, w_gaze, cols, rows)
#4. Warping_R
ResultFrame_h_gpu = warping(phi,x_r,y_r,w_r,h_r,avgR[1],CrvR, ResultFrame_gpu, ResultFrame_h_gpu, MaskFrame_gpu, w_gaze, cols, rows)
cuda.memcpy_dtoh(ResultFrame_h, ResultFrame_h_gpu)
cuda.memcpy_dtoh(ResultFrame, ResultFrame_gpu)
cuda.memcpy_dtoh(frame, frame_gpu)
ResultFrame_h = ResultFrame_h.astype(np.uint8)
ResultFrame = ResultFrame.astype(np.uint8)
frame = frame.astype(np.uint8)
#cv2.circle(frame, (int(avgLx) + PupilSquaredRadius, int(avgLy)), 3, (0,0,255), -1)
#cv2.circle(frame, (int(avgRx) + PupilSquaredRadius, int(avgRy)), 3, (0,0,255), -1)
#############################################
else : # warpflag 0์ผ ๋
#warpflag_prev ํ
a=1
#print("warpflag=0")
plist_prev = list_points
eyeDlibPtL_prev = eyeDlibPtL.copy()
eyeDlibPtR_prev = eyeDlibPtR.copy()
#gray_prev = gray.copy()
#frame_prev=frame.copy() ## ์ด์ ํ๋ ์ frame rate up์ ์ํ
ResultFrame_h = cv2.medianBlur(ResultFrame_h, 3)
#frame = cv2.medianBlur(frame, 3)
curTime = time.time()
sec = curTime - prevTime
prevTime = curTime
fps = 1 / sec
str_fps = "FPS : %0.1f" % fps
cv2.putText(frame, str_fps, (0,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
cv2.imshow('Frame',frame)
cv2.imshow('ResultFrame',ResultFrame)
cv2.imshow('ResultFrame_h',ResultFrame_h)
_, imen = cv2.imencode('.jpeg', ResultFrame)
imenb = imen.tobytes()
result = base64.b64encode(imenb).decode()
#print(result)
return 1
# key = cv2.waitKey(1)
# # '1' ๋๋ฅด๋ฉด phi ์ค์ฌ์ ๊ต์ ๋ง์ด ๋๊ฒ '2' ๋๋ฅด๋ฉด phi ๋์ฌ์ ๊ธฐ์กด์ ๊ฐ๊น๊ฒ / ์ต๋ ์ต์๊ฐ ์ ํด๋๊ธฐ
# if key == ord('1'):
# if w_gaze > 0.0:
# w_gaze -= 0.05
# elif key == ord('2'):
# if w_gaze < 0.3:
# w_gaze += 0.05
# elif key == ord('3'):
# if PupilMovVec_L > -8:
# PupilMovVec_L -= 1
# PupilMovVec_R -= 1
# elif key == ord('4'):
# if PupilMovVec_L < 8:
# PupilMovVec_L += 1
# PupilMovVec_R += 1
#if key == ord('q'):
#break
#else:
# break
#cap.release()
s = zerorpc.Server(MainRPC())
s.bind("tcp://*:4242")
s.run()
#################################################################################################################################################################################################### |
996,997 | 30b01587e9fed94c3a5cafdda064cee72595614b | # Write your MySQL query statement below
select Email
from Person
group by Email
having count(Email)>1
|
996,998 | 3011d55c79647729100c9cc7ac2ef303ee658ebd | # major, minor, revision
# major 1, 2, 3
# minor new feature 1.0, 1.1, 1.2
# revision small fix 1.1.1, 1.1.2
# 0.1, 0.5... pre-release versions
def get_length(version_str):
nums = version_str.split('.')
num_size = len(nums)
if num_size == 3:
return len(nums[0]), len(nums[1]), len(nums[2])
elif num_size == 2:
return len(nums[0]), len(nums[1]), 0
else:
return len(nums[0]), 0, 0
def solution(l):
# l: list of elevator versions as string
# return the same list sorted in asceding order by major, minor, revision
# max # of digits relies on the max length of each numbers
# "1.1.2", "1.0", "1.3.3", "1.0.12", "1.0.2" => length: 1,1,1, 1,1,0, 1,1,1, 1,1,2, 1,1,1 => (1,1,1,1,1), (1,1,1,1,1), (1,0,1,2,1)
lens = list(zip(*map(lambda x: get_length(x), l)))
max_lens = [max(lens[1]), max(lens[2]), 0]
# if maxlen == 1 : possible numbers 0-9[10] = 10^1
# if maxlen == 2 : possible numbers 0-99[100] ... = 10^2
# range of ranks -> 0 ~ 10^(major_len + minor_len + version_len)
def convert_to_rank(version_str):
nums = version_str.split('.')
rank = 0
for i in range(len(nums)):
rank += (int(nums[i]) + 1) * (10 ** sum(max_lens[i:]))
return rank
return sorted(l, key=convert_to_rank)
if __name__ == '__main__':
assert (solution(["1.1.2", "1.0", "1.3.3", "1.0.12", "1.0.2"]) == ["1.0", "1.0.2", "1.0.12", "1.1.2", "1.3.3"])
assert (solution(["1", "1.0", "1.0.0"]) == ["1", "1.0", "1.0.0"])
assert (solution(["1.0", "1", "1.0.0"]) == ["1", "1.0", "1.0.0"])
# print(solution(["1.11", "2.0.0", "1.2", "2", "0.1", "1.2.1", "1.1.1", "2.0"]))
assert (solution(["1.11", "2.0.0", "1.2", "2", "0.1", "1.2.1", "1.1.1", "2.0"]) == ['0.1', '1.1.1', '1.2', '1.2.1', '1.11', '2', '2.0', '2.0.0'])
|
996,999 | 7be00fced609aa164c23eb1c98d5b89eb1f326ee | #!/usr/bin/env python3
import time
import csv
DATA_FILE = "data.csv"
def load(tasks):
with open(DATA_FILE) as csvfile:
timesheetData = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in timesheetData:
try:
tasks.append({"name": row[0], "estimate": row[1], "time_taken": float(row[2])*(60*60)})
except:
raise Exception("error parsing row: {}".format(row))
def save(tasks):
with open(DATA_FILE, 'w') as csvfile:
timesheetData = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for task in tasks:
timesheetData.writerow([task["name"], task["estimate"], task["time_taken"]/(60*60)])
def choose_task(tasks):
while True:
print("Available tasks:")
for index, task in enumerate(tasks):
print("{}: {}".format(index, task["name"]))
task_index = input("Choose task:")
try:
task_index = int(task_index)
except:
continue
if 0 <= task_index < len(tasks):
return task_index
if __name__ == "__main__":
tasks = []
load(tasks)
while True:
task_index = choose_task(tasks)
start_time = time.time()
answer = input("New task? (n for no)").lower()
stop_time = time.time()
tasks[task_index]["time_taken"] += stop_time - start_time
save(tasks)
if answer == "n":
break
save(tasks) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.