seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
43594129465 | import getopt
import os
import random
import re
import string
import sys
import jsonpickle
from model.contact import Contact
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " " * 10
str = prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
return clear(str.strip())
def random_mail(maxlen):
symbols = string.ascii_letters + string.digits
str = "".join([random.choice(symbols) for i in range(random.randrange(maxlen // 2))]) + "@"
str += "".join([random.choice(symbols) for i in range(random.randrange(maxlen // 2))]) + "."
str += "".join([random.choice(symbols) for i in range(3)])
return str
def random_month():
months = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October",
"November", "December"]
return random.choice(months)
def random_day(maxday):
return random.choice(range(maxday))
def random_year(minyear, maxyear):
return random.choice(range(minyear, maxyear))
def random_phone(prefix, maxlen):
symbols = string.digits + " " * 10 + "(" + ")" + "-"
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def clear(s):
return re.sub("\s+", " ", s)
testdata = [
Contact(first_name=random_string("TestName", 15), middle_name=random_string("TestMiddle", 15),
last_name=random_string("TestLat", 15),
nickname=random_string("NickTest", 10), title=random_string("TestTitle", 10),
company=random_string("Test Co", 15),
address=random_string("Address", 40), homephone=random_phone("+", 15), workphone=random_phone("+3(75)", 11),
mobilephone=random_phone("8(029)", 12),
email=random_mail(16), email2=random_mail(10), email3=random_mail(20),
homepage=random_string("somepage", 5), day_of_birth=str(random_day(28)),
month_of_birth=random_month(), year_of_birth=str(random_year(1900, 2018)))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata)) | xd2006/python_st | generator/contact.py | contact.py | py | 2,534 | python | en | code | 0 | github-code | 13 |
3889320392 | from models.voucher_type import VoucherType, db
# Get all VoucherTypes
def find_all():
return VoucherType.query.all()
# Get VoucherTypes by filtering
# By id
def find_by_id(id):
return VoucherType.query.filter_by(id=id).first()
# Insert data
def insert(json_data):
try:
voucherType = VoucherType.from_json(json_data)
db.session.add(voucherType)
db.session.commit()
return True
except:
return False
# Update data
def update_by_id(id, data):
try:
voucherType = VoucherType.query.filter_by(id=id).update(data)
db.session.commit()
return True
except:
return False
# Delete data
def delete_by_id(id):
try:
voucherType = find_by_id(id)
db.session.delete(voucherType)
db.session.commit()
return True
except:
return False
| NXTung1102000/InformationSystemIntegration | backend/repository/voucher_type_repo.py | voucher_type_repo.py | py | 866 | python | en | code | 0 | github-code | 13 |
24550411459 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 17:06:24 2020
Experiment code - temporal masking, blocked-design
intact, negated and scrambled faces with their phase scrambled mask.
4 durations
@author: jschuurmans
"""
#%% =============================================================================
# imports
from psychopy import visual, event, core, gui, data
import os
import numpy as np
import glob
from PIL import Image
import random
import copy
#%% =============================================================================
# a block contains 20 unique images + their mask
monRR = 60 # refresh rate on monitor is 60Hz
frame = 1000/monRR # one
durCond = [3, 5, 6, 9] #50, 83.33, 100, 150 ms
durCondNames = [str(int(durCond[0]*frame)),str(int(durCond[1]*frame)),str(int(durCond[2]*frame)),str(int(durCond[3]*frame))]
typCond = ['Int', 'Neg', 'Scr']
sfType = ['LSF', 'HSF']
nCond = len(durCond)*len(typCond)*len(sfType) #nr of conditions = 24
nBlockPerCond = 20 #nr of blocks per condition (in total)
nUniBlocks = int(nBlockPerCond/2) #nr of unique blocks per condition = 10 (10 sequences to make)
nBlocks = nCond*nBlockPerCond # 264 blocks in total
nRuns = 20 # runs for whole exp
nBlocksRun = nBlocks/nRuns # so... 24 blocks per run --> PICKLE IT :)
durBlock = 10 # seconds
nStim = 20 # stimuli per block
nPositions = 24 # 24 positions in a block (for stim distribution)
fixStEn = 12 # Duration of fixation at begin/end of run in ms
colourChange = (0.8, 1.0, 1.0) #(0, 1.0, 1.0) = too red
#%% =============================================================================
#paths
baseFolder = ''
#commented out, this is just for testing in Spyder
#baseFolder = 'C:\\Users\\jolien\\Documents\\3T_RPinV1\\recurrentSF_3T_CodeRepo\\mainExpCode\\'
dataPath = baseFolder + 'data'
stimPath = baseFolder + 'stimuli'
backPath = baseFolder + 'background'
seqLocation = baseFolder + 'sequence_withinBlock.txt'
#%% =============================================================================
# in case we need to shut down the expt
def esc():
if 'escape' in last_response:
logfile.close()
eventfile.close()
win.mouseVisible = True
win.close()
core.quit
#%% =============================================================================
# Store info about the experiment session
# Get subject participant ID and run nr through a dialog box
expName = 'Recurrent face processing in V1'
expInfo = {
'1. Participant ID': '',
'2. Run number': ('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20'),
'3. Screen hight in px': '1080', #1080
'4. Screen width in px': '1920', #1920
'5. Make sequence?': ('no','yes')
}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
# If 'Cancel' is pressed, quit
if dlg.OK == False:
core.quit()
# Get date and time
expInfo['date'] = data.getDateStr()
expInfo['expName'] = expName
dataPath = os.path.join(dataPath, expInfo['1. Participant ID'])
# Make sure there is a path to write away the data
if not os.path.isdir(dataPath):
os.makedirs(dataPath)
runNr = int(expInfo['2. Run number'])
scrsize = (int(expInfo['4. Screen width in px']),int(expInfo['3. Screen hight in px']))
# make a text file to save data with 'comma-separated-values'
eventName = expInfo['1. Participant ID'] + '_task-mainExp_run-' + expInfo['2. Run number'] + '_events.csv'
eventFname = os.path.join(dataPath, eventName)
dataName = expInfo['1. Participant ID'] + '_run' + expInfo['2. Run number'] + '_' + expInfo['date'] + '.csv'
dataFname = os.path.join(dataPath, dataName)
logfile = open(dataFname, 'w')
logfile.write('BlockNumber,PositionInRun,PositionInBlock,TrialNumber,ConditionName,SpatialFrequency,TrialStart,TrialDuration,StimDuration,MaskDuration,NumberOfStimulusFrames,ImageFileName,MaskFileName,BackFrame,CatchTrial,Keypress,ResponseStamp,ResponseTime\n')
eventfile = open(eventFname, 'w')
eventfile.write('onset, duration, trial_type\n')
stimSize = 550
if expInfo['5. Make sequence?'] == 'yes':
sequence_path = os.path.join(baseFolder + 'sequence_creator.py')
exec(open(sequence_path).read())
#%% =============================================================================
#make or load block order for participant
path_blockSeq = os.path.join(dataPath, expInfo['1. Participant ID'] + 'blockSeq.txt')
path_backSeq = os.path.join(dataPath, expInfo['1. Participant ID'] + 'backSeq.txt')
path_blockCount = os.path.join(dataPath, expInfo['1. Participant ID'] + 'blockCount.txt')
path_stimSeq = os.path.join(dataPath, expInfo['1. Participant ID'] + 'stimSeq.txt')
blockSeq = []
backSeq = []
stimSeq = []
# opening the block sequence list
with open(path_blockSeq, 'r') as f:
mystring = f.read()
my_list = mystring.split("\n")
for item in my_list:
line = item.split(',')
line.remove('')
new_line = [int(i) for i in line]
blockSeq.append(new_line)
blockSeq.remove([])
# opening the background sequence list
with open(path_backSeq, 'r') as f:
mystring = f.read()
my_list = mystring.split("\n")
for item in my_list:
line = item.split(',')
line.remove('')
new_line = [int(i) for i in line]
backSeq.append(new_line)
backSeq.remove([])
# opening the stimulus sequence list
with open(path_stimSeq, 'r') as f:
mystring = f.read()
my_list = mystring.split("\n")
for item in my_list:
line = item.split(',')
line.remove('')
new_line = [int(i) for i in line]
stimSeq.append(new_line)
stimSeq.remove([])
#%% =============================================================================
#stim settings
runSeq = blockSeq[runNr-1] #sequence of blocks within the current run
faceNames = []
maskLSFNames = []
maskHSFNames = []
for times in range(nUniBlocks):
if times < 9:
name = 'bg0' + str(times+1)
else:
name = 'bg' + str(times+1)
stimSpecBack = glob.glob(os.path.join(stimPath, name + '*Stim*.bmp'))
stimSpecBack.sort()
maskLSFSpecBack = glob.glob(os.path.join(stimPath, name + '*MaskLSF*.bmp'))
maskLSFSpecBack.sort()
maskHSFSpecBack = glob.glob(os.path.join(stimPath, name + '*MaskHSF*.bmp'))
maskHSFSpecBack.sort()
faceNames.append(stimSpecBack)
#maskNames.append(maskSpecBack)
maskLSFNames.append(maskLSFSpecBack)
maskHSFNames.append(maskHSFSpecBack)
backNames = glob.glob(os.path.join(backPath, '*.bmp'))
backNames.sort()
allTrialsOrder = []
stimPos = list(range(nPositions)) #possible positions within a block
blockPos = 1
#For shuffle every run
blockCount = list(np.zeros(nCond)) #there are 24 conditions.
blockCount = [x+(runNr-1) for x in blockCount]
#creating a trials order for all
for blockNr in runSeq: #loop through blocks in specific run
stimSeqNr = int(blockNr+(blockCount[blockNr])) #i.e. block 1 starts with stim sequence 1
if stimSeqNr > 19: # there are only 20 sequences (0-19)..
stimSeqNr = int(stimSeqNr - 24) #so when an index is above 19.. start over from start (0)
trials = stimSeq[stimSeqNr] #get specific stim order for this block
trialNumber = 0
#select the correct back frame -> backSeq[block][run]
backType = backSeq[blockNr][int(blockCount[blockNr])]
if backType < 9:
backName = 'bg0' + str(backType+1)
else:
backName = 'bg' + str(backType+1)
back = [i for i in backNames if (backName + '.bmp') in i]
blockFaceNames = faceNames[backType]
blockMaskLSFNames = maskLSFNames[backType]
blockMaskHSFNames = maskHSFNames[backType]
# decide which trials will be catch trials
# 2 per block, one in first half other in second half
catchList = list(np.zeros(int(nPositions/2)))
catchList[0]=1
random.shuffle(catchList)
while catchList[0] == 1:
random.shuffle(catchList)
toAdd = copy.deepcopy(catchList)
random.shuffle(toAdd)
catchList.extend(toAdd)
#condition/block numbers, to make it more clear:
#LSF 50 83.3 100 150
#int 1 7 13 19
#neg 2 8 14 20
#scr 3 9 15 21
#HSF 50 83.3 100 150
#int 4 10 16 22
#neg 5 11 17 23
#scr 6 12 18 24
#,13,16,19,22
for position in stimPos: #if position contains no stims, no im/mask/trailnr
image = None
mask = None
maskType = None
trialNr = None
condiName = None
#if there is a trial for the specific position, give it correct timing info
if any(map((lambda value: value == blockNr), (0,1,2,3,4,5))): #50ms
stimFr = durCond[0]
duration = durCondNames[0] +'ms'
elif any(map((lambda value: value == blockNr), (6,7,8,9,10,11))): #83ms
stimFr = durCond[1]
duration = durCondNames[1] +'ms'
elif any(map((lambda value: value == blockNr), (12,13,14,15,16,17))):#100ms
stimFr = durCond[2]
duration = durCondNames[2] +'ms'
elif any(map((lambda value: value == blockNr), (18,19,20,21,22,23))):#150ms
stimFr = durCond[3]
duration = durCondNames[3] +'ms'
if any(map((lambda value: value == blockNr), (0,3,6,9,12,15,18,21))): #intact stim
trType = 0
elif any(map((lambda value: value == blockNr), (1,4,7,10,13,16,19,22))): #neg stim
trType = 20
elif any(map((lambda value: value == blockNr), (2,5,8,11,14,17,20,23))): #scr
trType = 40
if any(map((lambda value: value == blockNr), (0,1,2,6,7,8,12,13,14,18,19,20))): #LSF
maskset = blockMaskLSFNames;
elif any(map((lambda value: value == blockNr), (3,4,5,9,10,11,15,16,17,21,22,23))): #HSF
maskset = blockMaskHSFNames;
if position in trials:
#index = np.where(trials == position)
index = trials.index(position)
image = blockFaceNames[index+trType][-19:]
mask = maskset[index+trType][-22:]
maskType = mask[12:-7]
trialNumber += 1
trialNr = trialNumber
condiName = image[5:8] + '_' + duration
allTrialsOrder.append({'blockNr' : blockNr+1,
'posInRun': blockPos,
'posInBlock' : position+1,
'trialNr': trialNr,
'condName': condiName,
'stimFrames': stimFr,
'imageName': image,
'maskName': mask,
'maskType' : maskType,
'backFrame': back[0][-8:],
'nrOfBlockOccurenceInExp': blockCount[blockNr],
'catchTrial': catchList[position]})
blockPos += 1
blockCount[blockNr] += 1
trialsReady = data.TrialHandler(allTrialsOrder, nReps=1, method='sequential',
originPath=stimPath)
#%% =============================================================================
#loading the checkerboards for the last part of the run
checkerboards = []
checkerboards.append(glob.glob(os.path.join(dataPath, '*Back.bmp')))
checkerboards.append(glob.glob(os.path.join(dataPath, '*Face.bmp')))
#%% =============================================================================
#window setup
win = visual.Window(size=scrsize, color='grey', units='pix', fullscr=True)
#win.close()
frameRate = win.getActualFrameRate(nIdentical=60, nMaxFrames=100,
nWarmUpFrames=10, threshold=1)
print('framerate is', frameRate)
#cra
instruc01 = 'Welcome!\nHopefully you are comfortable and relaxed.\n\nDuring this experiment you will see faces flashed on the screen.\nThe only thing you should do\nis press a button when the colour changes.\n\nPress a button to continue.\n(1 -> buttonbox key)'
instruc01 = visual.TextStim(win, color='black', height=32, text=instruc01)
instruc02 = 'The experiment is about to start!\n\n Waiting for the scanner trigger.. (s)'
instruc02 = visual.TextStim(win, color='black',height=32,text=instruc02)
#create fixation cross
fix1=visual.Line(win,start=(-stimSize,-stimSize),end=(stimSize, stimSize),
pos=(0.0, 0.0),lineWidth=1.0,lineColor='black',units='pix')
fix2=visual.Line(win,start=(-stimSize,stimSize),end=(stimSize, -stimSize),
pos=(0.0, 0.0),lineWidth=1.0,lineColor='black',units='pix')
instruc01.draw()
win.flip()
while not '1' in event.getKeys():
core.wait(0.1)
instruc02.draw()
win.flip()
while not 's' in event.getKeys():
core.wait(0.1)
win.mouseVisible = False
# =============================================================================
# start stopwatch clock
clock = core.Clock()
clock.reset()
# =============================================================================
# clear any previous presses/escapes
last_response = ''; response_time = ''; reactionTime = '';
response = []
esc() # in case we need to shut down the expt
# =============================================================================
trialCount = 1
fr2 = 10
totalFr = 25 #total nr of trialframes is 25 = 416ms
trNum = 0
corrResp = 0; totalCatch = 0; ok = 2 #all necessary for the task
#draw fixation cross
fix1.setAutoDraw(True)
fix2.setAutoDraw(True)
fixEnd = 0
#win.close()
for trial in trialsReady:
if trialCount == 1 or trialCount % nPositions == 1: #beginning fixation
#if trialCount >= 4 and trialCount % nPositions == 1:
#win.saveMovieFrames(name) #for saving the exp trial -> saves all frames
#create catchlist for the following block
fixStart = clock.getTime() #start tracking time trialCount =30
if trialCount != 1:
toSave2 = str(fixEnd) + ',' + str((fixStart-fixEnd)) + ',' +str(trial['condName']) + '\n'
eventfile.write(toSave2)
win.flip()
stim1=[]
stim2=[]
stim3=[]
fr1=[]
fr3=[]
catchyCatch = []
#load images for the next block
for ii in range(nPositions):
if allTrialsOrder[trNum]['trialNr'] == None: #if the trial doesnt contain a stimulus
if allTrialsOrder[trNum]['catchTrial'] == True:
col = colourChange
else:
col = (1.0, 1.0, 1.0)
im1 = Image.open(os.path.join(backPath, allTrialsOrder[trNum]['backFrame']))
stim1.append(visual.ImageStim(win, size=[stimSize,stimSize],image=im1,color=col))
im2 = Image.open(os.path.join(backPath, allTrialsOrder[trNum]['backFrame']))
stim2.append(visual.ImageStim(win, size=[stimSize,stimSize],image=im2,color=col))
im3 = Image.open(os.path.join(backPath, allTrialsOrder[trNum]['backFrame']))
stim3.append(visual.ImageStim(win, size=[stimSize,stimSize],image=im3,color=col))
fr1.append(fr2)
fr3.append((totalFr - fr2)-fr2)
else:
if allTrialsOrder[trNum]['catchTrial'] == True:
col = colourChange
else:
col = (1.0, 1.0, 1.0)
im1 = Image.open(os.path.join(stimPath, allTrialsOrder[trNum]['imageName']))
stim1.append(visual.ImageStim(win, size=[stimSize,stimSize],image=im1,color=col))
im2 = Image.open(os.path.join(stimPath, allTrialsOrder[trNum]['maskName']))
stim2.append(visual.ImageStim(win, size=[stimSize,stimSize],image=im2,color=col))
im3 = Image.open(os.path.join(backPath, allTrialsOrder[trNum]['backFrame']))
stim3.append(visual.ImageStim(win, size=[stimSize,stimSize],image=im3,color=col))
fr1.append(allTrialsOrder[trNum]['stimFrames'])
fr3.append((totalFr - fr2) - allTrialsOrder[trNum]['stimFrames'])
trNum += 1
#if clock hits the fixation time for start/end in seconds, end the fixation
loadEnd = clock.getTime()
loadTime = loadEnd-fixStart
x=1
if trialCount == 1:
while x==1:
fixNow = clock.getTime()
timeFix = fixNow-fixStart
if timeFix > (fixStEn-1): # time to fixate more then 11 seconds? end
x=2
else:
while x==1:
fixNow = clock.getTime()
timeFix = fixNow-fixStart
if timeFix > 9: # time to fixate more then 9 seconds? end
x=2
for nFrames in range(60): #last second of fixation start flipping, to prevent frame drops later on
win.flip()
fixEnd = clock.getTime()
toSave = str(int(trial['blockNr'])) + ',' + str(trial['posInRun']) +',0,0,'+ 'fixation,None,fix start: '+str(fixStart)+',fix dur: '+ str(round((timeFix)*1000)+1000) + ',load dur: ' + str(round(loadTime*1000)) + ',None,None,None,None,None,None,None,None,None\n'
logfile.write(toSave)
toSave2 = str(fixStart) + ',' + str((fixEnd-fixStart)) + ',fixation\n'
eventfile.write(toSave2)
print('fixation, dur: ' + str(round((timeFix)*1000)+1000) + ',load dur: ' + str(round(loadTime*1000)) + ' ms')
# name = (dataPath + str(trial['condName']) +'_'+ str(trial['maskType'])+'.png')#for saving the exp trial -> save name of frames
startTrial = clock.getTime()
response = event.getKeys(timeStamped=clock) #check for responses to target
esc()
if trial['catchTrial'] == True: #if its a catchtrail, start the clock
catchStart = clock.getTime()
totalCatch += 1
if ok == 0:
corrResp += 1
ok = 1
elif not response == [] and ok == 1: #check for responses to target
last_response = response[-1][0] # most recent response, first in tuple
response_time = response[-1][1]
ok = 0
reactionTime = (response_time - catchStart)*1000
print('CLICK!! The reactiontime is ', reactionTime, 'ms' )
for nFrames in range(fr1[trial['posInBlock']-1]): #stimulus
stim1[trial['posInBlock']-1].draw()
#win.getMovieFrame(buffer = 'back') #for saving the exp trial -> saves all frames
win.flip()
afterStim = clock.getTime()
stimDur = afterStim - startTrial
for nFrames in range(fr2): # mask
stim2[trial['posInBlock']-1].draw()
#win.getMovieFrame(buffer = 'back') #for saving the exp trial -> saves all frames
win.flip()
afterMask = clock.getTime()
maskDur = afterMask - afterStim
for nFrames in range(fr3[trial['posInBlock']-1]): #background
stim3[trial['posInBlock']-1].draw()
#win.getMovieFrame(buffer = 'back') #for saving the exp trial -> saves all frames
win.flip()
if not response == [] and ok == 1: #check for responses to target
last_response = response[-1][0] # most recent response, first in tuple
response_time = response[-1][1]
ok = 0
reactionTime = (response_time - catchStart)*1000
print('CLICK!! The reactiontime is ', reactionTime, 'ms' )
endTrial = clock.getTime()
trialDuration = round((endTrial-startTrial)*1000)
print(trial['condName'],' - ',trial['maskType'] ,'block:', int(trial['blockNr']),', trial', int(trialCount),
', trial time: ', round((endTrial-startTrial)*1000), 'ms')
toSave = str(trial['blockNr'])+','+str(trial['posInRun'])+','+str(trial['posInBlock'])+','+str(trial['trialNr']) +','+ str(trial['condName']) +','+ str(trial['maskType'])+','+ str(startTrial)+','+ str(trialDuration) +','+ str(round(stimDur*1000)) +','+ str(round(maskDur*1000)) +','+ str(trial['stimFrames']) +','+ str(trial['imageName']) +','+ str(trial['maskName']) +','+ str(trial['backFrame'])+','+ str(int(trial['catchTrial']))+','+str(last_response)+','+str(response_time)+','+str(reactionTime)+'\n'
logfile.write(toSave)
if not last_response == '': #empry responses if it's already logged
esc() # in case we need to shut down the expt
last_response = ''; response_time = ''; reactionTime = '';
response = []
trialCount += 1
if ok == 0:
corrResp += 1
#one more normal fixation
fixStart = clock.getTime()
for nFrames in range(600): # 600 = 10 seconds
win.flip()
fixNow = clock.getTime()
timeFix = fixNow-fixStart
toSave = str(int(trial['blockNr'])) + ',' + str(trial['posInRun']) +',0,0,'+ 'fixation,None,fix start: '+str(fixStart)+',fix dur: '+ str(round(timeFix)*1000) + ',None,None,None,None,None,None,None,None,None,None\n'
logfile.write(toSave)
toSave2 = str(fixStart) + ',' + str(timeFix) + ',fixation\n'
eventfile.write(toSave2)
#final face chackerboard, then background checkerboard
for checks in range(2): #checks=1 is face checks=0 is background
#per part, 10 seconds. 1 cicle (ori+inv) will show 4 times per sec.
checkerOri = visual.ImageStim(win=win,size=[stimSize,stimSize], image=Image.open(checkerboards[[checks][0]][1]))
checkerInv = visual.ImageStim(win=win,size=[stimSize,stimSize], image=Image.open(checkerboards[[checks][0]][0]))
checkerTimeStart= clock.getTime()
for times in range(30):
for nFrames in range(10): #6 frames = 100ms each -> 5Hz(or10)
checkerOri.draw()
win.flip()
for nFrames in range(10): #10 frames = 166.6ms each -> 3Hz (or6)
checkerInv.draw()
win.flip()
checkerTimeEnd = clock.getTime()
checkerTimeTotal = checkerTimeEnd-checkerTimeStart
print('it took ' + str(checkerTimeTotal) + 'ms')
if checks == 1:
checkName = 'face checkers'
else:
checkName = 'back checkers'
toSave = checkName + ',3Hz aka 6Hz,0,0,'+ 'checkerboard,None,checker start: '+str(checkerTimeStart)+',checker dur: '+ str(round(checkerTimeTotal)*1000) + ',None,'+str(checkerboards[[checks][0]][1][-19:])+','+str(checkerboards[[checks][0]][0][-19:])+',None,None,None,None,None\n'
logfile.write(toSave)
toSave2 = str(checkerTimeStart) + ',' + str(checkerTimeTotal) + ',' + str(checkerboards[[checks][0]][1][-19:]) + '\n'
eventfile.write(toSave2)
#finalfixationnnn
fixStart = clock.getTime()
for nFrames in range(monRR*fixStEn): # 12 sec --> end fixation*refreshrate
win.flip()
fixNow = clock.getTime()
timeFix = fixNow-fixStart
toSave = 'EndFixatione,final,0,0,'+ 'fixation,fix start: '+str(fixStart)+',fix dur: '+ str(round(timeFix)*1000) + ',None,None,None,None,None,None,None,None,None,None\n'
logfile.write(toSave)
toSave2 = str(fixStart) + ',' + str(timeFix) + ',fixation\n'
eventfile.write(toSave2)
fix1.setAutoDraw(False)
fix2.setAutoDraw(False)
win.mouseVisible = True
totExpDur = clock.getTime()
percCorr = (100/totalCatch)*corrResp
toSave = 'Total run duration: ' + str(totExpDur) + '\nPercentage correct = ' + str(percCorr)
logfile.write(toSave)
instruc03 = 'This is the end of run ' + str(runNr) + ' out of 20\n\nYou have a score of ' + str(round(percCorr)) + '%\nThank you for paying attention :)\n\nPress \'x\' to close the screen.'
instruc03 = visual.TextStim(win, color='black',height=32,text=instruc03)
instruc03.draw()
win.flip()
while not 'x' in event.getKeys():
core.wait(0.1)
print('time exp: ', int(clock.getTime()))
logfile.close()
eventfile.close()
win.close()
| jpschuurmans/CtF_7T_experiment | exampleCodes/experimentCode.py | experimentCode.py | py | 23,519 | python | en | code | 1 | github-code | 13 |
678235219 | import cProfile, pstats
from BVP import BVP_solver
from PDEs import Grid, BoundaryCondition
def profile_BVP_solver(grid,bc_left,bc_right,q,D,u_guess=None):
# Create a cProfile.Profile object
pr = cProfile.Profile()
# Start profiling
pr.enable()
# Call the BVP_solver function
result = BVP_solver(grid,bc_left,bc_right,q,D,u_guess)
# Stop profiling
pr.disable()
# Create a pstats.Stats object for processing profiling data
stats = pstats.Stats(pr)
# Sort and print the profiling results
stats.strip_dirs().sort_stats('cumulative').print_stats()
# Return the result from BVP_solver
return result
grid = Grid(100,0,1)
bc_left = BoundaryCondition('dirichlet',[-0],grid)
bc_right = BoundaryCondition('dirichlet',[0],grid)
profile_BVP_solver(grid,bc_left,bc_right,q=1,D=1) | MikeJohnson424/emat30008 | profile_BVP_solver.py | profile_BVP_solver.py | py | 833 | python | en | code | 0 | github-code | 13 |
17057271394 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class OpenPromoCamp(object):
def __init__(self):
self._camp_alias = None
self._camp_desc = None
self._camp_end_time = None
self._camp_name = None
self._camp_start_time = None
self._camp_type = None
@property
def camp_alias(self):
return self._camp_alias
@camp_alias.setter
def camp_alias(self, value):
self._camp_alias = value
@property
def camp_desc(self):
return self._camp_desc
@camp_desc.setter
def camp_desc(self, value):
self._camp_desc = value
@property
def camp_end_time(self):
return self._camp_end_time
@camp_end_time.setter
def camp_end_time(self, value):
self._camp_end_time = value
@property
def camp_name(self):
return self._camp_name
@camp_name.setter
def camp_name(self, value):
self._camp_name = value
@property
def camp_start_time(self):
return self._camp_start_time
@camp_start_time.setter
def camp_start_time(self, value):
self._camp_start_time = value
@property
def camp_type(self):
return self._camp_type
@camp_type.setter
def camp_type(self, value):
self._camp_type = value
def to_alipay_dict(self):
params = dict()
if self.camp_alias:
if hasattr(self.camp_alias, 'to_alipay_dict'):
params['camp_alias'] = self.camp_alias.to_alipay_dict()
else:
params['camp_alias'] = self.camp_alias
if self.camp_desc:
if hasattr(self.camp_desc, 'to_alipay_dict'):
params['camp_desc'] = self.camp_desc.to_alipay_dict()
else:
params['camp_desc'] = self.camp_desc
if self.camp_end_time:
if hasattr(self.camp_end_time, 'to_alipay_dict'):
params['camp_end_time'] = self.camp_end_time.to_alipay_dict()
else:
params['camp_end_time'] = self.camp_end_time
if self.camp_name:
if hasattr(self.camp_name, 'to_alipay_dict'):
params['camp_name'] = self.camp_name.to_alipay_dict()
else:
params['camp_name'] = self.camp_name
if self.camp_start_time:
if hasattr(self.camp_start_time, 'to_alipay_dict'):
params['camp_start_time'] = self.camp_start_time.to_alipay_dict()
else:
params['camp_start_time'] = self.camp_start_time
if self.camp_type:
if hasattr(self.camp_type, 'to_alipay_dict'):
params['camp_type'] = self.camp_type.to_alipay_dict()
else:
params['camp_type'] = self.camp_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OpenPromoCamp()
if 'camp_alias' in d:
o.camp_alias = d['camp_alias']
if 'camp_desc' in d:
o.camp_desc = d['camp_desc']
if 'camp_end_time' in d:
o.camp_end_time = d['camp_end_time']
if 'camp_name' in d:
o.camp_name = d['camp_name']
if 'camp_start_time' in d:
o.camp_start_time = d['camp_start_time']
if 'camp_type' in d:
o.camp_type = d['camp_type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/OpenPromoCamp.py | OpenPromoCamp.py | py | 3,453 | python | ro | code | 241 | github-code | 13 |
38924816615 | from typing import Optional, Dict, List
import pandas as pd
import pyarrow
from common.pandas.df_utils import concat, downsample_uniform
from featurizer.actors.cache_actor import get_cache_actor, create_cache_actor
from featurizer.calculator.calculator import build_feature_label_set_task_graph
from featurizer.calculator.executor import execute_graph
from featurizer.sql.db_actor import create_db_actor
from featurizer.storage.featurizer_storage import FeaturizerStorage
from featurizer.config import FeaturizerConfig
from featurizer.features.feature_tree.feature_tree import construct_feature, get_feature_by_key_or_name, \
construct_features_from_configs
import ray.experimental
import ray
from ray.data import Dataset
import featurizer
import common
import client
# TODO these are local packages to pass to dev cluster
LOCAL_PACKAGES_TO_PASS_TO_REMOTE_DEV_RAY_CLUSTER = [featurizer, common, client]
class Featurizer:
@classmethod
def run(cls, config: FeaturizerConfig, ray_address: str, parallelism: int):
features = construct_features_from_configs(config.feature_configs)
# for f in features:
# print(f, f.children)
storage = FeaturizerStorage()
storage.store_features_metadata_if_needed(features)
data_ranges_meta = storage.get_data_sources_meta(features, start_date=config.start_date, end_date=config.end_date)
stored_features_meta = storage.get_features_meta(features, start_date=config.start_date, end_date=config.end_date)
label_feature = None
if config.label_feature_index is not None:
label_feature = features[config.label_feature_index]
cache = {}
features_to_store = [features[i] for i in config.features_to_store]
with ray.init(address=ray_address, ignore_reinit_error=True, runtime_env={
'py_modules': LOCAL_PACKAGES_TO_PASS_TO_REMOTE_DEV_RAY_CLUSTER,
'pip': ['pyhumps', 'diskcache']
}):
# remove old actor from prev session if it exists
try:
cache_actor = get_cache_actor()
ray.kill(cache_actor)
except ValueError:
pass
cache_actor = create_cache_actor(cache)
create_db_actor()
# TODO pass params indicating if user doesn't want to join/lookahead and build/execute graph accordingly
dag = build_feature_label_set_task_graph(
features=features,
label=label_feature,
label_lookahead=config.label_lookahead,
data_ranges_meta=data_ranges_meta,
obj_ref_cache=cache,
features_to_store=features_to_store,
stored_feature_blocks_meta=stored_features_meta,
result_owner=cache_actor
)
# TODO first two values are weird outliers for some reason, why?
# df = df.tail(-2)
refs = execute_graph(dag=dag, parallelism=parallelism)
ray.get(cache_actor.record_featurizer_result_refs.remote(refs))
@classmethod
def get_dataset(cls) -> Dataset:
cache_actor = get_cache_actor()
refs = ray.get(cache_actor.get_featurizer_result_refs.remote())
return ray.data.from_pandas_refs(refs)
@classmethod
def get_ds_metadata(cls, ds: Dataset) -> Dict:
# should return metadata about featurization result e.g. in memory size, num blocks, schema, set name, etc.
return {
'count': ds.count(),
'schema': ds.schema(),
'num_blocks': ds.num_blocks(),
'size_bytes': ds.size_bytes(),
'stats': ds.stats()
}
@classmethod
def get_columns(cls, ds: Dataset) -> List[str]:
ds_metadata = cls.get_ds_metadata(ds)
schema: pyarrow.Schema = ds_metadata['schema']
cols = schema.names
return cols
@classmethod
def get_feature_columns(cls, ds: Dataset) -> List[str]:
columns = cls.get_columns(ds)
label_column = cls.get_label_column(ds)
res = []
to_remove = ['timestamp', 'receipt_timestamp', label_column]
for c in columns:
if c not in to_remove:
res.append(c)
return res
@classmethod
def get_label_column(cls, ds: Dataset) -> str:
cols = cls.get_columns(ds)
print(cols)
pos = None
for i in range(len(cols)):
if cols[i].startswith('label_'):
if pos is not None:
raise ValueError('Can not have more than 1 label column')
pos = i
if pos is None:
raise ValueError('Can not find label column')
return cols[pos]
@classmethod
def get_materialized_data(cls, start: Optional[str] = None, end: Optional[str] = None, pick_every_nth_row: Optional[int] = 1) -> pd.DataFrame:
cache_actor = get_cache_actor()
refs = ray.get(cache_actor.get_featurizer_result_refs.remote())
# TODO filter refs based on start/end
@ray.remote
def downsample(df: pd.DataFrame, nth_row: int) -> pd.DataFrame:
return downsample_uniform(df, nth_row)
if pick_every_nth_row != 1:
# TODO const num_cpus ?
downsampled_refs = [downsample.options(num_cpus=0.9).remote(ref, pick_every_nth_row) for ref in refs]
else:
downsampled_refs = refs
downsampled_dfs = ray.get(downsampled_refs)
return concat(downsampled_dfs)
if __name__ == '__main__':
ray_address = 'ray://127.0.0.1:10001'
with ray.init(address=ray_address, ignore_reinit_error=True, runtime_env={
'py_modules': LOCAL_PACKAGES_TO_PASS_TO_REMOTE_DEV_RAY_CLUSTER,
'pip': ['pyhumps']
}):
df = Featurizer.get_materialized_data()
print(df) | dirtyValera/svoe | featurizer/runner.py | runner.py | py | 5,865 | python | en | code | 12 | github-code | 13 |
43266985024 | from api.models import TaskList
from api.serializers import TaskListSerializer, TaskSerializer
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.shortcuts import get_object_or_404
@api_view(['GET', 'POST'])
def task_lists_view(request):
if request.method == 'GET':
task_lists = TaskList.objects.all()
serializer = TaskListSerializer(task_lists, many=True)
return Response(serializer.data, status=200)
elif request.method == 'POST':
serializer = TaskListSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=201)
return Response(serializer.errors, status=500)
@api_view(['GET', 'PUT', 'DELETE'])
def task_list_view(request, pk):
task_list = get_object_or_404(TaskList, pk=pk)
if request.method == 'GET':
serializer = TaskListSerializer(task_list)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = TaskListSerializer(instance=task_list, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
elif request.method == 'DELETE':
task_list.delete()
return Response(status=204)
@api_view(['GET', 'POST'])
def tasks_view(request, pk):
if request.method == 'GET':
task_list = get_object_or_404(TaskList, pk=pk)
tasks = task_list.task_set.all()
serializer = TaskSerializer(tasks, many=True)
return Response(serializer.data)
elif request.method == 'POST':
request.data['task_list'] = pk
serializer = TaskSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=201)
return Response(serializer.errors, status=500)
| saltanatnareshova/webdev2019-1 | Week13/todo_back/api/views/fbv.py | fbv.py | py | 1,909 | python | en | code | 0 | github-code | 13 |
4714375773 | import random
import heapq
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib import animation
def party_dijkstra(G, source, target=None, cutoff=None, weight='weight'):
"""Compute shortest paths and lengths in a weighted graph G of multiple parties
Uses aa modification of Dijkstra's algorithm for shortest paths.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance,path : dictionaries
Returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from the source.
The second stores the path from the source to that node.
"""
if source==target:
return ({source:0}, {source:[source]})
dist = {} # dictionary of final distances
paths = {source:[source]} # dictionary of paths
seen = {source:0}
fringe=[] # use heapq with (distance,label) tuples
heapq.heappush(fringe,(0,source))
use = source.lstrip ('S')
seen['SS'] = 0
for x in G.edges('SS'):
if x[1].lstrip('S')!=use:
seen[x[1]] = 0
while fringe:
(d,v)=heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
if v == target:
break
edata=iter(G[v].items())
for w,edgedata in edata:
vw_dist = dist[v] + edgedata.get(weight,1)
if cutoff is not None:
if vw_dist>cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
if (str(w)[0] == 'D'):
S = paths[v][0].lstrip('S')
D = str(w).lstrip('D')
if S == D:
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w))
paths[w] = paths[v]+[w]
else:
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w))
paths[w] = paths[v]+[w]
try:
return paths[target], dist [target]
except KeyError:
raise nx.NetworkXNoPath("node %s not reachable from %s"%(source,target))
class MPCCRP(object):
def __init__(self, popN, capN, capE, timeE, graph_type='grid_2d', graph_shape=[5,5], seed=42):
"""Initialize Capacity Constrained Route Planner"""
self.graph_type = graph_type
self.graph_shape = graph_shape
self.popN = popN
self.capN = capN
self.capE = capE
self.timeE = timeE
self.seed = seed
self.makeCCGraph()
def makeCCGraph(self):
"""Make the specified Capacity Constrained Graph"""
graph_types = {
'grid_2d': self.makeCCGraph_grid2d
}
if self.graph_type in graph_types:
graph_types[self.graph_type]()
else:
graph_types['grid_2d']()
def makeCCGraph_grid2d(self):
"""Make 2D grid Capacity Constrained Graph"""
if (self.seed != None):
random.seed(self.seed)
self.G = nx.grid_2d_graph(self.graph_shape[0],self.graph_shape[1])
self.makeCCNodes()
self.makeCCEdges()
def makeCCNodes(self):
"""Make Capacity Constrained Nodes"""
for node in self.G.nodes_iter():
cap = random.randint(self.capN['min'],self.capN['max'])
self.G.node[node]['cap'] = cap
self.G.node[node]['res'] = dict()
self.G.node[node]['pop'] = {}
pop = 0
for party, partyInfo in self.popN.items():
partyMax = min(cap-pop, partyInfo['max'])
partyMin = min(partyMax, partyInfo['min'])
partyPop = random.randint(partyMin,partyMax)
self.G.node[node]['pop'][party] = {0:partyPop}
pop += partyPop
def makeCCEdges(self):
"""Make Capacity Constrained Edges"""
for edge in self.G.edges_iter():
cap = random.randint(self.capE['min'],self.capE['max'])
time = random.randint(self.timeE['min'],self.timeE['max'])
self.G.edge[edge[0]][edge[1]]['cap'] = cap
self.G.edge[edge[0]][edge[1]]['time'] = time
self.G.edge[edge[0]][edge[1]]['res'] = dict()
def getPartyN(self, party, t = 0):
"""Get Node Locations for a Party and Time t"""
nodes = []
for node in self.G.nodes_iter():
if (self.getNodePop(node, party, t) > 0):
nodes.append(node)
return nodes
def addPartySuper(self, party, partyInfo):
"""Add Party Super to Graph"""
S = partyInfo['S']
D = partyInfo['D']
PS = 'S' + party
PD = 'D' + party
self.G.add_edges_from([(PS,'SS')], time = 0, cap = float('inf'))
self.G.add_edges_from([(PD, 'SD')], time = 0, cap = float('inf'))
PS = [PS] * len(S)
PD = [PD] * len(D)
self.G.add_edges_from(zip(PS, S), time = 0, cap = float('inf'))
self.G.add_edges_from(zip(PD, D), time = 0, cap = float('inf'))
def removePartySuper(self, party):
"""Remove Party Super from Graph"""
PS = 'S' + party
PD = 'D' + party
self.G.remove_node(PS)
self.G.remove_node(PD)
def addSuper(self, SD):
"""Add Super Nodes to Graph"""
self.SD = SD
self.G.add_node('SS')
self.G.add_node('SD')
for party, partyInfo in self.SD.iteritems():
self.addPartySuper(party, partyInfo)
def removeSuper(self):
"""Remove Super Nodes from Graph"""
self.G.remove_node('SS')
self.G.remove_node('SD')
for party, partyInfo in self.SD.iteritems():
self.removePartySuper(party)
def getPop(self, t = 0):
"""Get Total Population"""
pop = 0
for node in self.G.nodes_iter():
for party, partyInfo in self.G.node[node]['pop'].items():
pop += self.getNodePop(node, party, t)
return pop
def getNodePop(self, node, party, t):
"""Get Node Population of Party at Time t"""
if t in self.G.node[node]['pop'][party]:
pop = self.G.node[node]['pop'][party][t]
else:
pop = self.G.node[node]['pop'][party][min(self.G.node[node]['pop'][party].keys(), key=lambda T: abs(t-T) if (T<=t) else T)]
return pop
def setNodePop(self, node, party, t, pop):
"""Set Node Population of Party at Time t"""
pop = self.G.node[node]['pop'][party][t] = pop
def getR(self, t = 0):
"""Get the best path R"""
Routes = []
for x in self.G.edges('SS'):
R1,d1 = party_dijkstra(self.G, source=x[1], target='SD', weight='time')
heapq.heappush(Routes, (d1,R1))
d,R = heapq.heappop(Routes)
party = R[0].lstrip('S')
R = R[1:-2]
return R, party
def getEdgeTime(self, R, i):
"""Get an Edge's Travel Time"""
try:
t = self.G.edge[R[i]][R[i+1]]['time']
except:
t = 0
return t
def getResE(self, R, t, i):
"""Get an Edge's Reservation"""
try:
res = self.G.edge[R[i]][R[i+1]]['res'][t]
except:
res = self.G.edge[R[i]][R[i+1]]['cap']
return res
def getResN(self, node, t):
"""Get a Node's Reservation"""
try:
res = self.G.node[node]['res'][t]
except:
res = self.G.node[node]['cap']
return res
def getEdgeRes(self, R, t=0):
"""Get Edge Reservations"""
capER = 0
for i in range(len(R)-1):
res = self.getResE(R, t, i)
capER += res
t += self.getEdgeTime(R, i)
return capER
def getNodeRes(self, R, t=0):
"""Get Node Reservations"""
capNR = 0
for i in range(len(R)):
res = self.getResN(R[i], t)
capNR += res
t += self.getEdgeTime(R, i)
return capNR
def getPathFlow(self, R, party, t=0):
"""Get Path Flow"""
popS = self.getNodePop(R[0], party, t)
edgeRes = self.getEdgeRes(R, t)
nodeRes = self.getNodeRes(R, t)
flow = min(popS, edgeRes, nodeRes)
return flow
def setStage(self, R, party, flow, t=0):
"""Set Path Reservations"""
t0 = t
for i in range(len(R)-1):
pop = self.getNodePop(R[i], party, t)
self.setNodePop(R[i], party, t, pop - flow)
resN = self.getResN(R[i], t)
self.G.node[R[i]]['res'][t] = resN + flow
resN = self.getResE(R, t, i)
self.G.edge[R[i]][R[i+1]]['res'][t] = resN - flow
t += self.getEdgeTime(R, i)
pop = self.getNodePop(R[i+1], party, t)
self.setNodePop(R[i+1], party, t, pop + flow)
resN = self.getResN(R[i], t)
self.G.node[R[i]]['res'][t] = resN - flow
if ((self.getNodePop(R[0], party, t0) <= 0) or (len(R) < 2)):
PS = 'S' + party
self.SD[party]['S'].remove(R[0])
self.G.remove_edge(R[0],PS)
if self.G.degree(PS) == 1:
self.G.remove_edge(PS,'SS')
def genSD(self, source_shape=[[1,4],[1,4]], source_type='grid_2d'):
"""Generate Sources and Destination"""
source_types = {
'grid_2d': self.genS_grid2d
}
if source_type in source_types:
S = source_types[source_type](source_shape)
else:
S = source_types['grid_2d'](source_shape)
D = self.genD(S)
return S, D
def genS_grid2d(self, source_shape):
"""Generate 2D grid of Sources given shape"""
S = []
Sx = range(source_shape[0][0],source_shape[0][1])
Sy = range(source_shape[1][0],source_shape[1][1])
for x in Sx:
S += (zip([x]*len(Sy),Sy))
return S
def genD(self, S):
"""Generate Destination given Sources"""
D = self.G.nodes()
for s in S:
D.remove(s)
return D
def getStage(self, R, party, flow, t):
"""Get the stage of the plan given R and t"""
stage = dict()
stage['Party'] = party
stage['S'] = R[0]
stage['S_data'] = self.G.node[R[0]]
stage['D'] = R[-1]
stage['D_data'] = self.G.node[R[-1]]
stage['R'] = R
stage['Flow'] = flow
stage['Start'] = t
return stage
def isPlanning(self, t):
"""Check if Planning is Done"""
S = 0
for party in self.SD:
S += len(self.SD[party]['S'])
return (S != 0)
def getTotalTime(self, plan):
totalTime = 0
for stage in plan:
for party, partyInfo in stage['D_data']['pop'].items():
maxT = max(partyInfo.keys())
if totalTime < maxT:
totalTime = maxT
return totalTime
def applyCCRP(self, SD, t=0):
"""Apply Capacity Constrained Route Planner to Graph"""
plan = []
self.addSuper(SD)
while self.isPlanning(t):
R, party = self.getR(t)
flow = self.getPathFlow(R, party, t)
if (flow <= 0):
t += 1
else:
stage = self.getStage(R, party, flow, t)
plan.append(stage)
self.setStage(R, party, flow, t)
self.removeSuper()
toatalTime = self.getTotalTime(plan)
return plan, toatalTime
def drawGraph(self, SD, figsize=(20,15), params='ne', saveFig=False, fname=None, flabel=None, t=0,):
if saveFig:
fig = plt.figure(figsize=figsize)
edgewidth = []
nodelabels = {}
edgelabels = {}
for (u,d) in self.G.nodes(data=True):
nodelabels[u] = str(d['cap'])
for party in self.popN:
nodelabels[u] += ',' + str(self.getNodePop(u, party, t))
for (u,v,d) in self.G.edges(data=True):
edgewidth.append(d['cap']*3)
edgelabels[(u,v)] = str('\n\n\n') + str(d['cap']) + ',' + str(d['time'])
#pos = nx.spring_layout(self.G, weight='time'*-1, iterations=50)
#pos = nx.spectral_layout(self.G)
pos = dict((node,node) for node in self.G.nodes())
edge_colors = list(edge[2]['time'] for edge in self.G.edges(data=True))
edge_cmap = plt.cm.summer
bbox = dict(alpha=0.0)
nodecap=[self.G.node[node]['cap']*500 for node in self.G]
nx.draw_networkx_edges (self.G, pos, width = edgewidth, edge_color=edge_colors, edge_cmap=edge_cmap)
nx.draw_networkx_nodes (self.G, pos, node_size=nodecap, node_color='blue', alpha=.6)
#evacColor = dict((node,'r') for node in self.G.nodes())
evacColor = list('purple' for node in self.G.nodes(data=True))
evacColor = []
for node in self.G.nodes():
if node in SD['evac']['S']:
evacColor.append('r')
else:
evacColor.append('g')
partyColor = {
'evac':evacColor,
'resp':'orange'
}
for party in self.popN:
nodepop=[]
for node in self.G.nodes():
pop = self.getNodePop(node, party, t)
nodepop.append(pop*500)
nx.draw_networkx_nodes (self.G, pos, node_size=nodepop, node_color=partyColor[party], alpha=.9)
if 'n' in params:
nx.draw_networkx_labels(self.G, pos, labels=nodelabels, font_size=10, font_color='white', font_weight='bold')
if 'e' in params:
nx.draw_networkx_edge_labels(self.G, pos, edge_labels = edgelabels, font_size=10, font_color='black', bbox=bbox)
if saveFig:
if ~(fname==None):
fname = 'CCRP_'\
+ str(ccrp.graph_type) + '_'\
+ str(ccrp.graph_shape[0]) +'x'\
+ str(ccrp.graph_shape[1])\
+ flabel +'.pdf'
fig.savefig(fname)
#return fig | ruffsl/CS6601P1 | Code/Python/ccrpTools.py | ccrpTools.py | py | 14,893 | python | en | code | 5 | github-code | 13 |
22842280397 | import numpy as np
import torch
import rawpy
from torch.utils.data import Dataset
import random
from PIL import Image
from scipy import ndimage
from os.path import join
patch_size = 512
class LSID(Dataset):
def __init__(self, data_path, subset, patch_size=512, max_nr_images_per_gt_and_shutter=100):
self.data_path = data_path
# Max number of images with the same gt image AND same shutter speed
# Number of images in Sony training set for different values of this parameter:
# [0, 280, 559, 725, 888, 1050, 1212, 1374, 1536, 1696, 1853, 1862, 1865, 1865, 1865]
self.max_nr_images_per_gt_and_shutter = max_nr_images_per_gt_and_shutter
self.data = self.__make_dataset(subset)
self.patch_size = patch_size
def __pack_bayer(self, image):
# change raw image to float
image = image.raw_image_visible.astype(np.float32)
image = np.maximum(image - 492, 0) / (16383 - 492) # 16383 max, 492 min
image = np.expand_dims(image, axis=2) # (x,x,dim)
h, w, _ = image.shape
return np.concatenate((image[0:h:2, 0:w:2], image[0:h:2, 1:w:2], image[1:h:2, 0:w:2], image[1:h:2, 1:w:2]),
axis=2) # concat along dim (x,x,dim)
def __make_dataset(self, subset):
file_path = "Sony_train_list.txt"
if subset == 'train':
file_path = "Sony_train_list.txt"
elif subset == "test":
file_path = "Sony_test_list.txt"
elif subset == "valid":
file_path = "Sony_val_list.txt"
files = open(join(self.data_path, file_path), 'r').readlines()
dataset = []
for f in files:
file_list = f.split()
# Reduce set size: continue the loop if condition is met.
image_path = file_list[0] # Example: './Sony/short/00001_06_0.1s.ARW'
image_number_string = image_path.split(sep='_')[1]
image_number = int(image_number_string)
if image_number > self.max_nr_images_per_gt_and_shutter:
continue
file_path_short = join(self.data_path, file_list[0])
file_path_long = join(self.data_path, file_list[1])
exposure_ratio = float(file_list[1].split("_")[-1][:-5]) / float(file_list[0].split("_")[-1][:-5])
iso = file_list[2]
sample = {
'image': file_path_short,
'gt': file_path_long,
'exposure_ratio': exposure_ratio,
'iso': iso
}
dataset.append(sample)
return dataset
def __getitem__(self, index):
file_path_short = self.data[index]['image']
file_path_long = self.data[index]['gt']
image = rawpy.imread(file_path_long)
image = image.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
target = np.float32(image / 65535.0) # divide by 65535 after converting a uint16 intensity image to double
image = rawpy.imread(file_path_short)
image = self.__pack_bayer(image)
image = image * min(self.data[index]['exposure_ratio'], 300)
# random crop
i, j = random.randint(0, image.shape[0] - self.patch_size), random.randint(0, image.shape[1] - self.patch_size)
image = image[i:i + self.patch_size, j:j + self.patch_size, :]
target = target[i * 2:i * 2 + self.patch_size * 2, j * 2:j * 2 + self.patch_size * 2, :]
# changed from tensor functions to numpy functions as tensor need to converted to PIL image to use built in transforms
# random rotation
if random.random() > 0.5:
angle = random.randint(-10, 10)
image = ndimage.rotate(image, angle, reshape=False) # set to false to preserve size
target = ndimage.rotate(target, angle, reshape=False)
image = torch.from_numpy(image)
target = torch.from_numpy(target)
# flip with tensor type to avoid negative strides
if random.random() > 0.5:
image = torch.flip(image, dims=(0,))
target = torch.flip(target, dims=(0,))
if random.random() > 0.5:
image = torch.flip(image, dims=(1,))
target = torch.flip(target, dims=(1,))
return image, target
def __len__(self):
return len(self.data)
#train_data = LSID("./", "train", patch_size=512, max_nr_images_per_gt=3)
#data_loader = torch.utils.data.DataLoader(train_data, batch_size=2, shuffle=None)
#for i, (inputs, targets) in enumerate(data_loader):
# print(i, inputs.shape, targets.shape)
| NoorZia/LSID | dataset.py | dataset.py | py | 4,642 | python | en | code | 1 | github-code | 13 |
29839414242 | import sys
import encodings
import encodings.aliases
import re
import collections
from builtins import str as _builtin_str
import functools
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
def getUserLocale():
# get system localeconv and reset system back to default
import locale
locale.setlocale(locale.LC_ALL, '')
conv = locale.localeconv()
locale.setlocale(locale.LC_ALL, 'C')
return conv
def getLanguageCode():
import locale
return locale.getdefaultlocale()[0].replace("_","-")
# Iterate over grouping intervals
def _grouping_intervals(grouping):
last_interval = 3 # added by Mark V to prevent compile error but not necessary semantically
for interval in grouping:
# if grouping is -1, we are done
if interval == CHAR_MAX:
return
# 0: re-use last group ad infinitum
if interval == 0:
while True:
yield last_interval
yield interval
last_interval = interval
#perform the grouping from right to left
def _group(conv, s, monetary=False):
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
result = ""
seps = 0
if s[-1] == ' ':
stripped = s.rstrip()
right_spaces = s[len(stripped):]
s = stripped
else:
right_spaces = ''
left_spaces = ''
groups = []
for interval in _grouping_intervals(grouping):
if not s or s[-1] not in "0123456789":
# only non-digit characters remain (sign, spaces)
left_spaces = s
s = ''
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
return (
left_spaces + thousands_sep.join(groups) + right_spaces,
len(thousands_sep) * (len(groups) - 1)
)
# Strip a given amount of excess padding from the given string
def _strip_padding(s, amount):
lpos = 0
while amount and s[lpos] == ' ':
lpos += 1
amount -= 1
rpos = len(s) - 1
while amount and s[rpos] == ' ':
rpos -= 1
amount -= 1
return s[lpos:rpos+1]
_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def format(conv, percent, value, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
# this is only for one-percent-specifier strings and this should be checked
match = _percent_re.match(percent)
if not match or len(match.group())!= len(percent):
raise ValueError(("format() must be given exactly one %%char "
"format specifier, %s not valid") % repr(percent))
return _format(conv, percent, value, grouping, monetary, *additional)
def _format(conv, percent, value, grouping=False, monetary=False, *additional):
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(conv, parts[0], monetary=monetary)
decimal_point = conv[monetary and 'mon_decimal_point'
or 'decimal_point']
formatted = decimal_point.join(parts)
if seps:
formatted = _strip_padding(formatted, seps)
elif percent[-1] in 'diu':
seps = 0
if grouping:
formatted, seps = _group(conv, formatted, monetary=monetary)
if seps:
formatted = _strip_padding(formatted, seps)
return formatted
def format_string(conv, f, val, grouping=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if isinstance(val, collections.Mapping):
new_val = []
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
new_val.append(format(conv, perc.group(), val, grouping))
else:
if not isinstance(val, tuple):
val = (val,)
new_val = []
i = 0
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
starcount = perc.group('modifiers').count('*')
new_val.append(_format(conv,
perc.group(),
val[i],
grouping,
False,
*val[i+1:i+1+starcount]))
i += (1 + starcount)
val = tuple(new_val)
return new_f % val
def currency(conv, val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
# check for illegal values
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using "
"the 'C' locale.")
s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace('<', '').replace('>', '')
def str(conv, val):
"""Convert float to integer, taking the locale into account."""
return format(conv, "%.12g", val)
def atof(conv, string, func=float):
"Parses a string as a float according to the locale settings."
#First, get rid of the grouping
ts = conv['thousands_sep']
if ts:
string = string.replace(ts, '')
#next, replace the decimal point with a dot
dd = conv['decimal_point']
if dd:
string = string.replace(dd, '.')
#finally, parse the string
return func(string)
def atoi(conv, str):
"Converts a string to an integer according to the locale settings."
return atof(conv, str, int)
| gplehmann/Arelle | arelle/Locale.py | Locale.py | py | 7,428 | python | en | code | null | github-code | 13 |
4224376224 | import json
import time
import random
from instagrapi import Client
cl = Client()
cl.login('USERNAME','PASSWORD')
json.dump(
cl.get_settings(),
open('session.json', 'w')
)
# cl = Client(json.load(open('settings.json')))
print('Login Successfully...')
media = cl.hashtag_medias_recent('python', amount=10) # get 10 recent post with python hashtag
c = 0
for m in media:
c += 1
try:
cl.media_like(m.id)
cl.media_comment(m.id , 'comment_text') # change comment text
print(str(c) + ': ' + m.code + '\t\tTime: ' + str(m.taken_at) + '\t\tLike: ' + str(m.like_count) + '\t\tComment: ' + str(m.comment_count))
time.sleep(random.randint(30,60)) # sleep to avoid the account ban
except Exception as e:
print(e.args)
print('Done')
| EsmaeiliSina/instabot | app.py | app.py | py | 794 | python | en | code | 2 | github-code | 13 |
14415174725 | import pygame
import math
from pygame.math import Vector2 as vec
from settings import *
from main_test import *
from ghost import *
# Ghost 클래스 상속
class PinkGhost(Ghost):
def __init__(self, Game, pos, speed):
self.Game = Game
self.grid_pos = pos
self.pos = [pos.x, pos.y]
self.pix_pos = self.get_pix_pos()
self.speed = speed
self.centroid_pos = None
self.next_dir = UP
self.color = "Pink"
####################### MOVING #######################
# 현위치와 무게중심 거리 차이 계산
def calculate_distance(self, pos):
red_pos = self.Game.red_ghost.grid_pos
blue_pos = self.Game.blue_ghost.grid_pos
green_pos = self.Game.green_ghost.grid_pos
centroid_x = int((red_pos.x + blue_pos.x + green_pos.x) // 3)
centroid_y = int((red_pos.y + blue_pos.y + green_pos.y) // 3)
self.centroid_pos = vec(centroid_x, centroid_y)
cx, cy = self.centroid_pos.x, self.centroid_pos.y
x, y = pos.x, pos.y
dist = round(math.sqrt((cx - x)**2 + (cy - y)**2), 2)
return dist
def get_direction(self):
# 고스트 하우스에서 나오기
if self.grid_pos in self.Game.ghost_house:
return UP
x, y = self.grid_pos.x, self.grid_pos.y
up, down, left, right = vec(x, y - 1), vec(x, y + 1), vec(x - 1, y), vec(x + 1, y)
next_dir_list= [up, down, right, left]
able_to_go = [False, False, False, False]
index_true = []
for i, next in enumerate(next_dir_list):
if next not in self.Game.walls:
able_to_go[i] = True
index_true.append(i)
else:
able_to_go[i] = False
if i in index_true:
index_true.remove(i)
index_true.sort()
if len(index_true) <= 1 and index_true[0]:
return vec(next_dir_list[index_true[0]] - vec(x, y))
min_dist = float('inf')
for i, able in enumerate(able_to_go):
if able:
dist = self.calculate_distance(next_dir_list[i])
if dist < min_dist:
min_dist = dist
self.next_dir = i
# 위
if self.next_dir == 0:
self.next_dir = UP
# 아래
elif self.next_dir == 1:
self.next_dir = DOWN
# 오른쪽
elif self.next_dir == 2:
self.next_dir = RIGHT
# 왼쪽
elif self.next_dir == 3:
self.next_dir = LEFT
return self.next_dir
def move(self):
# 5초 뒤 행동 시작
if self.Game.fps_after_start > 300:
self.direction = self.get_direction()
###################### TESTING ######################
def show_direction(self):
if self.centroid_pos:
pos_x = self.centroid_pos[0]
pos_y = self.centroid_pos[1]
pygame.draw.rect(self.Game.screen, PINK, (pos_x * CELL + SPACE, pos_y * CELL + SPACE, CELL, CELL))
####################### DRAWING #######################
def get_image(self):
image = pink_up
if self.direction == vec(0, 0):
image = pink_right
elif self.direction == UP:
image = pink_up
elif self.direction == DOWN:
image = pink_down
elif self.direction == RIGHT:
image = pink_right
elif self.direction == LEFT:
image = pink_left
return image
def stop(self):
cur_pos = (self.pix_pos.x - 15, self.pix_pos.y - 15)
self.speed = 0
self.Game.screen.blit(self.get_image(), cur_pos)
def draw(self):
cur_pos = (self.pix_pos.x - 15, self.pix_pos.y - 15)
# stop 후 다시 움직이기 시작할 때
if self.speed == 0:
self.speed = PINK_GHOST_SPEED
# 빨강 유령 동작 확인
#self.show_direction()
self.Game.screen.blit(self.get_image(), cur_pos)
| KKIMIs/AI-Pacman | GamePacman/pink_ghost.py | pink_ghost.py | py | 4,036 | python | en | code | 0 | github-code | 13 |
35905259419 | import numpy as np
import pandas as pd
import requests
import json
from datetime import date, timedelta, datetime
import time
import sqlite3
from sqlite3 import Error
today = date.today()
tdelta = timedelta(days=7)
one_week_date = today + tdelta
# day_time = datetime.today().strftime('%A')
conn = sqlite3.connect('data.db') #Connecting to database
c = conn.cursor()
c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='previous_events_1920'")
#if the count is 1, then table exists
if c.fetchone()[0]==1 :
previously_logged_events_1920_df = pd.read_sql_query("SELECT * FROM previous_events_1920", conn)
else:
previously_logged_events_1920_df = pd.DataFrame()
event_request_1920 = requests.get(
'https://theorangealliance.org/api/event?season_key=1920',
headers={'Content-Type': 'application/json', 'X-TOA-Key': 'ef98a4e91bcabfcc23d2241046f3894e3521ab605a30af96b2f0c6a30f0fdcdf', 'X-Application-Origin': 'roboDojo'},
) #Calling for all this year's events
events_1920 = json.loads(event_request_1920.content)
events_1920_df = pd.DataFrame(events_1920)
indexNames = events_1920_df[(events_1920_df["event_type_key"] == "OTHER") | (events_1920_df["event_type_key"] == "SCRIMMAGE")].index
events_1920_df.drop(indexNames, inplace=True) #Deleting certain types of events
def date_parse(x): #Formatting start_date for events, removing time
list = []
list = x.split("T")
return list[0]
events_1920_df["start_date"] = events_1920_df["start_date"].apply(date_parse)
events_1920_df = events_1920_df[["event_key", "region_key", "event_code", "event_type_key", "event_name", "start_date", "city", "venue", "website"]]
events_1920_df = events_1920_df.sort_values("start_date")
future_events_1920_df = events_1920_df.loc[events_1920_df.start_date >= str(today)]
events_1920_df = events_1920_df.drop(events_1920_df[events_1920_df.start_date >= str(today)].index)
events_1920_df = events_1920_df.reset_index()
events_1920_df = events_1920_df.drop(columns=["index"])
future_events_1920_df = future_events_1920_df.reset_index()
future_events_1920_df = future_events_1920_df.drop(columns=["index"])
indexList = []
eventList = []
if previously_logged_events_1920_df.empty == False:
for i in range(len(events_1920_df)):
eventKey = events_1920_df.loc[i, "event_key"]
if eventKey not in previously_logged_events_1920_df.event_key.values:
indexList.append(i)
eventList.append(events_1920_df.loc[i, "event_key"])
else:
indexList = list(range(len(events_1920_df)))
eventList = list(events_1920_df.event_key.values)
events_1920_df.to_sql("previous_events_1920", con=conn, if_exists="replace")
#MOVING ON TO LOOKING FOR USEABLE FUTURE EVENTS
future_events_1920_df = future_events_1920_df.loc[future_events_1920_df["start_date"] <= str(one_week_date)]
for i in future_events_1920_df.event_key:
while True:
try:
event_matches_request = requests.get(
'https://theorangealliance.org/api/{}/matches'.format(i),
headers={'Content-Type': 'application/json', 'X-TOA-Key': 'ef98a4e91bcabfcc23d2241046f3894e3521ab605a30af96b2f0c6a30f0fdcdf', 'X-Application-Origin': 'roboDojo'},
)
event_matches = json.loads(event_matches_request.content)
break
except:
print(event_matches_request)
time.sleep(15)
event_matches_request = requests.get(
'https://theorangealliance.org/api/{}/matches'.format(i),
headers={'Content-Type': 'application/json', 'X-TOA-Key': 'ef98a4e91bcabfcc23d2241046f3894e3521ab605a30af96b2f0c6a30f0fdcdf', 'X-Application-Origin': 'roboDojo'},
)
if str(event_matches_request) == "<Response [429]>":
print("still didn't work")
pass
if str(event_matches) != "{'_code': 404, '_message': 'Content not found.'}":
event_matches_df = pd.DataFrame(event_matches)
future_matches_df.append(event_matches_df)
future_matches_df.to_sql("future_matches_1920", con=conn, if_exists="replace")
# MOVING ON TO IMPORTING NEW MATCHES
useable_matches_1920_df = pd.DataFrame()
c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='all_matches_1920'")
#if the count is 1, then table exists
if c.fetchone()[0]==1 :
previously_logged_matches_1920_df = pd.read_sql_query("SELECT * FROM all_matches_1920", conn)
else:
previously_logged_matches_1920_df = pd.DataFrame()
previously_logged_event_keys = previously_logged_matches_1920_df["event_key"].unique().tolist()
lastSize = 500
count = 1
all_matches_df = pd.DataFrame()
while lastSize == 500:
while True:
try:
matches_start_request = requests.get(
'https://theorangealliance.org/api/match/all/1920?start={}'.format(count*500),
headers={'Content-Type': 'application/json', 'X-TOA-Key': 'ef98a4e91bcabfcc23d2241046f3894e3521ab605a30af96b2f0c6a30f0fdcdf', 'X-Application-Origin': 'roboDojo'},
)
matches_start = json.loads(matches_start_request.content)
break
except:
pass
if all_matches_df.empty == True:
all_matches_df = pd.DataFrame(matches_start)
else:
new_match_df = pd.DataFrame(matches_start)
all_matches_df = all_matches_df.append(new_match_df)
lastSize = len(event_data)
count = count + 1
events_with_matches_keys = all_matches_df["event_key"].unique().tolist()
all_matches_df = all_matches_df.drop()
# if len(eventList) != 0 or future_matches_df.empty == False:
# if len(eventList) !=0:
# for i in eventList:
# index = events_1920_df.index[events_1920_df["event_key"] == i][0]
# while True:
# try:
# match_request = requests.get(
# 'https://theorangealliance.org/api/{}/matches'.format(i),
# headers={'Content-Type': 'application/json', 'X-TOA-Key': 'ef98a4e91bcabfcc23d2241046f3894e3521ab605a30af96b2f0c6a30f0fdcdf', 'X-Application-Origin': 'roboDojo'},
# )
# if str(match_request) == "<Response [400]>" or str(match_request) == "<Response [404]>":
# print("Skipped event {}, row = {} out of {}".format(i, index, len(eventList)))
# break
# else:
# match_data = json.loads(match_request.content)
# if all_matches_1920_df.empty == True:
# all_matches_1920_df = pd.DataFrame(match_data, index=[0])
# future_matches_1920_df = pd.DataFrame(match_data, index=[0])
# else:
# new_match_data = pd.DataFrame(match_data, index=[0])
# all_matches_1920_df = all_matches_1920_df.append(new_match_data)
# useable_matches_1920_df = useable_matches_1920_df.append(new_match_data)
# break
# except:
# print("stopped at {}, row = {} out of {}".format(i, index, len(eventList)))
# time.sleep(15)
# match_request = requests.get(
# 'https://theorangealliance.org/api/{}/matches'.format(i),
# headers={'Content-Type': 'application/json', 'X-TOA-Key': 'ef98a4e91bcabfcc23d2241046f3894e3521ab605a30af96b2f0c6a30f0fdcdf', 'X-Application-Origin': 'roboDojo'},
# )
# if str(match_request) == "<Response [429]>":
# print("trying again")
# pass
# if future_matches_df.empty == False:
# for i in future_matches_df.event_key:
# index = future_matches_df.index[future_matches_df["event_key"] == i][0]
# while True:
# try:
# match_request = requests.get(
# 'https://theorangealliance.org/api/{}/matches'.format(i),
# headers={'Content-Type': 'application/json', 'X-TOA-Key': 'ef98a4e91bcabfcc23d2241046f3894e3521ab605a30af96b2f0c6a30f0fdcdf', 'X-Application-Origin': 'roboDojo'},
# )
# if str(match_request) == "<Response [400]>" or str(match_request) == "<Response [404]>":
# print("Skipped event {}, row = {} out of {}".format(i, index, len(DataFrame.index)))
# break
# else:
# match_data = json.loads(match_request.content)
# if all_matches_1920_df.empty == True:
# all_matches_1920_df = pd.DataFrame(match_data, index=[0])
# else:
# new_match_data = pd.DataFrame(match_data, index=[0])
# all_matches_1920_df = all_matches_1920_df.append(new_match_data)
# break
# except:
# print("stopped at {}, row = {} out of {}".format(i, index, len(DataFrame.index)))
# time.sleep(15)
# match_request = requests.get(
# 'https://theorangealliance.org/api/{}/matches'.format(i),
# headers={'Content-Type': 'application/json', 'X-TOA-Key': 'ef98a4e91bcabfcc23d2241046f3894e3521ab605a30af96b2f0c6a30f0fdcdf', 'X-Application-Origin': 'roboDojo'},
# )
# if str(match_request) == "<Response [429]>":
# print("trying again")
# pass
# else:
# print("No new matches to add")
# if all_matches_1920_df.empty == False:
# all_matches_1920_df = all_matches_1920_df[["match_key", "event_key", "red_score", "blue_score", "red_penalty", "blue_penalty", "red_auto_score", "blue_auto_score", "red_tele_score", "blue_tele_score", "red_end_score", "blue_end_score", "participants"]]
# useable_matches_1920_df = useable_matches_1920_df[["match_key", "event_key", "red_score", "blue_score", "red_penalty", "blue_penalty", "red_auto_score", "blue_auto_score", "red_tele_score", "blue_tele_score", "red_end_score", "blue_end_score", "participants"]]
# all_matches_1920_df["score_diff"] = all_matches_1920_df["red_score"] - all_matches_1920_df["blue_score"]
# useable_matches_1920_df["score_diff"] = useable_matches_1920_df["red_score"] - useable_matches_1920_df["blue_score"]
# def winner_num(x):
# if x > 0:
# return 0
# if x < 0:
# return 1
# return 2
# all_matches_1920_df["match_winner"] = all_matches_1920_df["score_diff"].apply(winner_num)
# useable_matches_1920_df["match_winner"] = useable_matches_1920_df["score_diff"].apply(winner_num)
# row_num = 0
# for i in all_matches_1920_df["participants"]:
# participantsL = []
# for j in i.split(","):
# if "'team': " in j:
# team = j.split(" ")[3]
# team = int(str(team).replace("'", ""))
# team = int(team)
# participantsL.append(team)
# if len(participantsL) == 4:
# all_matches_1920_df.loc[row_num, "red_team_1"] = participantsL[0]
# all_matches_1920_df.loc[row_num, "red_team_2"] = participantsL[1]
# all_matches_1920_df.loc[row_num, "blue_team_1"] = participantsL[2]
# all_matches_1920_df.loc[row_num, "blue_team_2"] = participantsL[3]
# else:
# all_matches_1920_df = all_matches_1920_df.drop(row_num)
# row_num = row_num + 1
# row_num = 0
# for i in useable_matches_1920_df["participants"]:
# participantsL = []
# for j in i.split(","):
# if "'team': " in j:
# team = j.split(" ")[3]
# team = int(str(team).replace("'", ""))
# team = int(team)
# participantsL.append(team)
# if len(participantsL) == 4:
# useable_matches_1920_df.loc[row_num, "red_team_1"] = participantsL[0]
# useable_matches_1920_df.loc[row_num, "red_team_2"] = participantsL[1]
# useable_matches_1920_df.loc[row_num, "blue_team_1"] = participantsL[2]
# useable_matches_1920_df.loc[row_num, "blue_team_2"] = participantsL[3]
# else:
# useable_matches_1920_df = useable_matches_1920_df.drop(row_num)
# row_num = row_num + 1
# all_matches_1920_df = all_matches_1920_df.dropna()
# useable_matches_1920_df = useable_matches_1920_df.dropna()
# all_matches_1920_df = all_matches_1920_df.reset_index()
# all_matches_1920_df = all_matches_1920_df.drop(columns=["index"])
# useable_matches_1920_df = useable_matches_1920_df.reset_index()
# useable_matches_1920_df = useable_matches_1920_df.drop(columns=["index"])
# all_matches_1920_df.to_sql("all_matches_1920", con=conn, if_exists="append")
# useable_matches_1920_df.to_sql("useable_matches_1920", con=conn, if_exists="append")
# print(list(events_1920_df))
print(future_events_1920_df.loc[future_events_1920_df.event_name == "2020 Pennsylvania Championship"])
conn.close() | ashabooga/robodojo | get_matches.py | get_matches.py | py | 11,640 | python | en | code | 0 | github-code | 13 |
73648316177 | import config
import mysql.connector
yhteys = mysql.connector.connect(
host='127.0.0.1',
port=3306,
database='flight_game',
user=config.user,
password=config.password,
autocommit=True
)
def hae_maa_koodilla(iso):
sql = f"SELECT TYPE, COUNT(*) FROM airport WHERE iso_country = '{iso}' GROUP BY TYPE;"
print(sql)
kursori = yhteys.cursor()
kursori.execute(sql)
tulos = kursori.fetchall()
if kursori.rowcount > 0:
for tieto in tulos:
print(f"Lentoaseman tyyppi on {tieto[0]} ja lkm: {tieto[1]}")
else:
print("Ei onnistunut.")
return
komento = input("Anna ISO-koodi: ")
hae_maa_koodilla(komento)
| Xanp0/NoelS_Ohjelmisto1 | moduuli_08/teht2_Maakoodi.py | teht2_Maakoodi.py | py | 772 | python | fi | code | 0 | github-code | 13 |
2312676422 | #! python3
# mclip.py - Dependendo da palavra chave dada, é copiada uma mensagem para o clipboard
TEXT = {'agree': """Yes, I agree. That sounds fine to me.""",
'busy': """Sorry, can we do this later this week or next week?""",
'upsell': """Would you consider making this a monthly donation?"""}
import sys, pyperclip
if len(sys.argv) < 2:
print('Usage: python mclip.py [keyphrase] - copy phrase text')
sys.exit()
keyphrase = sys.argv[1] # first command line arg is the keyphrase
if keyphrase in TEXT:
pyperclip.copy(TEXT[keyphrase])
print('O texto para {} foi copiado.'.format(keyphrase))
else:
print('Não tens um texto para {} predefenido, deseja adicionar um? (y)es ou (n)o.'.format(keyphrase))
response = input()
if response == 'y':
newMessage = input('Que mensagem quer adicionar para {}?'.format(keyphrase))
TEXT[keyphrase] = newMessage
print(TEXT)
else:
sys.exit() | claudioLamelas/projects | python/mclip.py | mclip.py | py | 989 | python | pt | code | 0 | github-code | 13 |
9077690360 | # 수도코드
# 1. 총합 가격부터 개수와 각 물건의 가격과 개수를 입력받는다.
# 2. 조건문을 사용해 물건의 개수*가격을 합한 금액이 총합과 일치하는지 판단한다.
total = int(input())
# 영수증의 총 금액
tc = int(input())
# 물건의 종류의 수
sum = 0
# 각 물건들을 총 합한 금액
for i in range(tc):
a, b = map(int, input().split())
sum += a*b
# 물건 종류의 수만큼 각각 금액과 수량을 입력받고 sum에 더해준다.
if total == sum:
print("Yes")
else:
print("No")
# 처음 입력받은 영수증의 총 금액과 같다면 Yes, 다르면 No
| Mins00oo/PythonStudy_CT | BACKJOON/Python/B5/B5_25304_영수증.py | B5_25304_영수증.py | py | 664 | python | ko | code | 0 | github-code | 13 |
73917236819 | from globs import *
"""
The first two consecutive numbers to have two distinct prime factors are:
14 = 2 × 7
15 = 3 × 5
The first three consecutive numbers to have three distinct prime factors are:
644 = 2² × 7 × 23
645 = 3 × 5 × 43
646 = 2 × 17 × 19.
Find the first four consecutive integers to have four distinct prime factors
each. What is the first of these numbers?
"""
def main():
consecutive_count = []
for i in range(1000, 150000):
g = gen_get_divisors(i)
count = 0
for n in g:
if is_prime(n):
count += 1
if count == 4:
consecutive_count.append(i)
else:
consecutive_count = []
if len(consecutive_count) == 4:
break
return consecutive_count[0]
if __name__ == '__main__':
answer = main()
show_answer(answer)
| gavinmcguigan/gav_euler_challenge_100 | Problem_47/DistinctPrimesFactors.py | DistinctPrimesFactors.py | py | 917 | python | en | code | 1 | github-code | 13 |
16178961995 | from __future__ import print_function, division
import os
import numpy as np
from mdtraj.core.topology import Topology
from mdtraj.utils import cast_indices, in_units_of, open_maybe_zipped
from mdtraj.formats.registry import FormatRegistry
from mdtraj.utils.unitcell import lengths_and_angles_to_box_vectors, box_vectors_to_lengths_and_angles
from mdtraj.utils import six
import warnings
__all__ = ['load_pdbx', 'PDBxTrajectoryFile']
##############################################################################
# Code
##############################################################################
@FormatRegistry.register_loader('.pdbx')
@FormatRegistry.register_loader('.cif')
def load_pdbx(filename, stride=None, atom_indices=None, frame=None, no_boxchk=False, top=None):
"""Load a PDBx/mmCIF file from disk.
Parameters
----------
filename : path-like
Path to the PDBx/mmCIF file on disk
stride : int, default=None
Only read every stride-th model from the file
atom_indices : array_like, default=None
If not None, then read only a subset of the atoms coordinates from the
file. These indices are zero-based (not 1 based, as used by the PDBx/mmCIF
format). So if you want to load only the first atom in the file, you
would supply ``atom_indices = np.array([0])``.
frame : int, default=None
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
no_boxchk : bool, default=False
By default, a heuristic check based on the particle density will be
performed to determine if the unit cell dimensions are absurd. If the
particle density is >1000 atoms per nm^3, the unit cell will be
discarded. This is done because all PDBx/mmCIF files from RCSB contain a ``cell``
record, even if there are no periodic boundaries, and dummy values are
filled in instead. This check will filter out those false unit cells and
avoid potential errors in geometry calculations. Set this variable to
``True`` in order to skip this heuristic check.
top : mdtraj.core.Topology, default=None
if you give a topology as input the topology won't be parsed from the file
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
Examples
--------
>>> import mdtraj as md
>>> pdbx = md.load_pdbx('2EQQ.pdbx')
>>> print(pdbx)
<mdtraj.Trajectory with 20 frames, 423 atoms at 0x110740a90>
See Also
--------
mdtraj.PDBxTrajectoryFile : Low level interface to PDBx/mmCIF files
"""
from mdtraj import Trajectory
if not isinstance(filename, (six.string_types, os.PathLike)):
raise TypeError('filename must be of type string or path-like for load_pdb. '
'you supplied %s' % type(filename))
atom_indices = cast_indices(atom_indices)
with PDBxTrajectoryFile(filename, top=top) as f:
atom_slice = slice(None) if atom_indices is None else atom_indices
if frame is not None:
coords = f.positions[[frame], atom_slice, :]
else:
coords = f.positions[::stride, atom_slice, :]
assert coords.ndim == 3, 'internal shape error'
n_frames = len(coords)
topology = f.topology
if atom_indices is not None:
# The input topology shouldn't be modified because
# subset makes a copy inside the function
topology = topology.subset(atom_indices)
if f.unitcell_angles is not None and f.unitcell_lengths is not None:
unitcell_lengths = np.array([f.unitcell_lengths] * n_frames)
unitcell_angles = np.array([f.unitcell_angles] * n_frames)
else:
unitcell_lengths = None
unitcell_angles = None
in_units_of(coords, f.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(unitcell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)
time = np.arange(len(coords))
if frame is not None:
time *= frame
elif stride is not None:
time *= stride
traj = Trajectory(xyz=coords, time=time, topology=topology,
unitcell_lengths=unitcell_lengths,
unitcell_angles=unitcell_angles)
if not no_boxchk and traj.unitcell_lengths is not None:
# Some PDBx/mmCIF files do not *really* have a unit cell, but still
# have a cell record with a dummy definition. These boxes are usually
# tiny (e.g., 1 A^3), so check that the particle density in the unit
# cell is not absurdly high. Standard water density is ~55 M, which
# yields a particle density ~100 atoms per cubic nm. It should be safe
# to say that no particle density should exceed 10x that.
particle_density = traj.top.n_atoms / traj.unitcell_volumes[0]
if particle_density > 1000:
warnings.warn('Unlikely unit cell vectors detected in PDB file likely '
'resulting from a dummy CRYST1 record. Discarding unit '
'cell vectors.', category=UserWarning)
traj._unitcell_lengths = traj._unitcell_angles = None
return traj
@FormatRegistry.register_fileobject('.pdbx')
@FormatRegistry.register_fileobject('.cif')
class PDBxTrajectoryFile(object):
"""Interface for reading and writing PDBx/mmCIF files
Parameters
----------
filename : path-like
The filename to open. A path to a file on disk.
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for write.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
top : mdtraj.core.Topology, default=None
if you give a topology as input the topology won't be parsed from the file
Attributes
----------
positions : np.ndarray, shape=(n_frames, n_atoms, 3)
topology : mdtraj.Topology
closed : bool
See Also
--------
mdtraj.load_pdbx : High-level wrapper that returns a ``md.Trajectory``
"""
distance_unit = 'nanometers'
def __init__(self, filename, mode='r', force_overwrite=True, top=None):
self._open = False
self._mode = mode
from openmm.app import PDBxFile
from openmm.unit import nanometers
if mode == 'r':
self._open = True
pdbx = PDBxFile(filename)
if top is None:
self._topology = Topology.from_openmm(pdbx.topology)
else:
self._topology = top
positions = [pdbx.getPositions(asNumpy=True, frame=i).value_in_unit(nanometers) for i in range(pdbx.getNumFrames())]
self._positions = np.array(positions)
vectors = pdbx.topology.getPeriodicBoxVectors()
if vectors is not None:
vectors = [np.array(v.value_in_unit(nanometers)) for v in vectors]
l1, l2, l3, alpha, beta, gamma = box_vectors_to_lengths_and_angles(*vectors)
self._unitcell_lengths = (l1, l2, l3)
self._unitcell_angles = (alpha, beta, gamma)
else:
self._unitcell_lengths = None
self._unitcell_angles = None
elif mode == 'w':
self._open = True
self._next_model = 0
self._file = open_maybe_zipped(filename, 'w', force_overwrite)
else:
raise ValueError("invalid mode: %s" % mode)
def write(self, positions, topology, unitcell_lengths=None, unitcell_angles=None):
"""Write one frame of a molecular dynamics trajectory to disk in PDBx/mmCIF format.
Parameters
----------
positions : array_like
The list of atomic positions to write.
topology : mdtraj.Topology
The Topology defining the model to write.
unitcell_lengths : {tuple, None}
Lengths of the three unit cell vectors, or None for a non-periodic system
unitcell_angles : {tuple, None}
Angles between the three unit cell vectors, or None for a non-periodic system
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
from openmm.app import PDBxFile
from openmm.unit import nanometers
if self._next_model == 0:
self._openmm_topology = topology.to_openmm()
if unitcell_lengths is None:
self._openmm_topology.setPeriodicBoxVectors(None)
else:
vectors = lengths_and_angles_to_box_vectors(*unitcell_lengths[0], *unitcell_angles[0])
self._openmm_topology.setPeriodicBoxVectors(vectors*nanometers)
PDBxFile.writeHeader(self._openmm_topology, self._file)
self._next_model = 1
if len(positions.shape) == 3:
positions = positions[0]
PDBxFile.writeModel(self._openmm_topology, positions*nanometers, self._file, self._next_model)
self._next_model += 1
@property
def positions(self):
"""The cartesian coordinates of all of the atoms in each frame. Available when a file is opened in mode='r'"""
return self._positions
@property
def topology(self):
"""The topology from this PDBx/mmCIF file. Available when a file is opened in mode='r'"""
return self._topology
@property
def unitcell_lengths(self):
"""The unitcell lengths (3-tuple) in this PDBx/mmCIF file. May be None"""
return self._unitcell_lengths
@property
def unitcell_angles(self):
"""The unitcell angles (3-tuple) in this PDBx/mmCIF file. May be None"""
return self._unitcell_angles
@property
def closed(self):
"""Whether the file is closed"""
return not self._open
def close(self):
"""Close the PDBx/mmCIF file"""
if self._mode == 'w' and self._open:
self._file.close()
self._open = False
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __len__(self):
"Number of frames in the file"
if str(self._mode) != 'r':
raise NotImplementedError('len() only available in mode="r" currently')
return len(self._positions)
| mdtraj/mdtraj | mdtraj/formats/pdbx.py | pdbx.py | py | 10,601 | python | en | code | 505 | github-code | 13 |
34389914982 | # Python program for two pointers technique
# Find if there is a pair [A0..N-1] with given sum
def isPairSum(A, N, X):
# First pointer
i = 0
# Second pointer
j = N - 1
while (i < j):
# If there is a match
if (A[i] + A[j] == X):
return True
# If sum of elements at current
# pointers is less, we move towards
# higher values by doing i += 1
elif (A[i] + A[j] < X):
i += 1
# If sum of elements at current
# pointers is more, we move towards
# lower values by doing j -= 1
else:
j -= 1
return False
# Array declaration
arr = [2, 3, 5, 8, 9, 10, 11]
# value to search
val = 18
print(isPairSum(arr, len(arr), val))
| NijazK/Two_Pointers_LeetCode | isPairSum.py | isPairSum.py | py | 801 | python | en | code | 0 | github-code | 13 |
10747206392 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
from forcepho.postprocess import Samples, Residuals
from prospect.plotting.corner import allcorner, scatter, marginal, corner, get_spans, prettify_axes
def multispan(parsets):
spans = []
for x in parsets:
spans.append(get_spans(None, x, weights=None))
spans = np.array(spans)
span = spans[:, :, 0].min(axis=0), spans[:, :, 1].max(axis=0)
span = tuple(np.array(span).T)
return span
def plot_corner(patchnames, config, band="CLEAR", smooth=0.05):
legends = [f"S/N={s:.0f}" for s in config.snrlist]
colors = ["slateblue", "darkorange", "firebrick", "grey", "cornflowerblue"]
labels = ["Flux", r'R$_{half}$ (")', r"$n_{\rm sersic}$", r"$\sqrt{b/a}$", r"PA (radians)"]
show = [band, "rhalf", "sersic", "q", "pa"]
print(show)
xx = []
for name in patchnames:
s = Samples(name)
x = np.array([s.chaincat[c][0] for c in show])
xx.append(x)
n_tune = s.n_tune
span = multispan([x[:, n_tune:] for x in xx])
kwargs = dict(hist_kwargs=dict(alpha=0.65, histtype="stepfilled"))
truths = np.atleast_2d(xx[0][:, 0]).T
fig, axes = pl.subplots(len(labels), len(labels), figsize=(12, 12))
for x, color in zip(xx, colors[:len(config.snrlist)]):
axes = corner(x[:, n_tune:], axes, span=span, color=color, **kwargs)
scatter(truths, axes, zorder=20, marker="o", color="k", edgecolor="k")
prettify_axes(axes, labels, label_kwargs=dict(fontsize=12), tick_kwargs=dict(labelsize=10))
[ax.axvline(t, linestyle=":", color="k") for ax, t in zip(np.diag(axes), truths[:, 0])]
from matplotlib.patches import Patch
artists = [Patch(color=color, alpha=0.6) for color in colors]
fig.legend(artists, legends, loc='upper right', bbox_to_anchor=(0.8, 0.8),
frameon=True, fontsize=14)
return fig, axes
def plot_residual(patchname, vmin=-1, vmax=5, rfig=None, raxes=None):
s = Samples(patchname)
r = Residuals(patchname.replace("samples", "residuals"))
data, _, _ = r.make_exp(value="data")
delta, _, _ = r.make_exp(value="residual")
ierr, _, _ = r.make_exp(value="ierr")
if raxes is None:
rfig, raxes = pl.subplots(2, 3, gridspec_kw=dict(height_ratios=[1, 40]))
kw = dict(origin="lower", vmin=vmin, vmax=vmax)
cb = raxes[1, 0].imshow((data * ierr).T, **kw)
cb = raxes[1, 1].imshow((delta * ierr).T, **kw)
cb = raxes[1, 2].imshow(((data-delta) * ierr).T, **kw)
[pl.colorbar(cb, label=r"$\chi$", cax=ax, orientation="horizontal")
for ax in raxes[0, :]]
val = s.get_sample_cat(-1)
return rfig, raxes, cb, val
def plot_traces(patchname, fig=None, axes=None):
s = Samples(patchname)
if axes is None:
fig, axes = pl.subplots(7, 1, sharex=True)
truth = s.get_sample_cat(0)
s.show_chain(axes=axes, truth=truth, bandlist=["CLEAR"])
span = 0.999999426697
q = 100 * np.array([0.5 - 0.5 * span, 0.5 + 0.5 * span])
lim = np.percentile(s.chaincat["CLEAR"], list(q))
axes[0].set_ylim(*lim)
labels = [r"Flux", r"RA", r"Dec", r"$\sqrt{b/a}$", r"PA (radians)", r"$n_{\rm sersic}$", r'R$_{half}$ (")']
for i, ax in enumerate(axes):
ax.set_ylabel(labels[i])
y = ax.get_ylim()
ax.fill_betweenx(y, [0, 0], [s.n_tune, s.n_tune], alpha=0.3, color="gray")
ax.set_xlim(0, s.chain.shape[0])
ax.set_xlabel("HMC iteration")
return fig, axes
if __name__ == "__main__":
# parser
parser = argparse.ArgumentParser()
parser.add_argument("--snrlist", type=float, nargs="*", default=[10, 30, 100])
config = parser.parse_args()
patchnames = [f"./output/v1/patches/patch_single_snr{s:03.0f}_samples.h5"
for s in config.snrlist]
fig, axes = plot_corner(patchnames, config)
fig.savefig("corner_snr.png", dpi=300)
pl.close(fig)
fig, axes, cb, val = plot_residual(patchnames[1])
fig.suptitle("S/N="+'{:.0f}'.format(config.snrlist[1]))
fig.savefig("residuals.png", dpi=300)
pl.close(fig)
fig, axes = pl.subplots(7, 1, sharex=True, figsize=(5, 8))
fig, axes = plot_traces(patchnames[1], fig=fig, axes=axes)
fig.suptitle("S/N="+'{:.0f}'.format(config.snrlist[1]))
fig.savefig("trace.png", dpi=300)
pl.close(fig)
| bd-j/forcepho | demo/demo_snr/single_plot.py | single_plot.py | py | 4,429 | python | en | code | 13 | github-code | 13 |
23235773597 | class Cake:
def __init__(self,name,kind,taste,additives,filling):
self.name = name
self.kind = kind
self.taste = taste
self.additives = additives
self.filling = filling
cake1 = Cake('apple pie','cake','apple',['apple'],'')
cake2 = Cake('strawberry pie','cake','strawberry',['strawberry'],'')
cake3 = Cake('Super Sweet Maringue','meringue', 'very sweet', [], '')
bakery_offer=[cake1,cake2,cake3]
print("Today in our offer:")
for cake in bakery_offer:
print(f"{cake.name}") | rzemien94/Python_courses | PythonSrednioZaawansowany/lesson82classess.py | lesson82classess.py | py | 522 | python | en | code | 0 | github-code | 13 |
5718606576 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'geecode@outlook.com'
__version__ = '1.0'
import sys
import math
from io import StringIO
import token
import tokenize
import argparse
import json
class CosineDiff(object):
@staticmethod
def __token_frequency(source):
"""
get valid token (name/number/string) and occur frequency.
"""
io_obj = StringIO(u'' + source)
tf = {}
prev_toktype = token.INDENT
last_lineno = -1
last_col = 0
tokgen = tokenize.generate_tokens(io_obj.readline)
for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen:
if slineno > last_lineno:
last_col = 0
if scol > last_col:
# out += (" " * (scol - last_col))
pass
if toktype == token.STRING and prev_toktype == token.INDENT:
# Docstring
# out += ("#--")
pass
elif toktype == tokenize.COMMENT:
# Comment
# out += ("##\n")
pass
elif toktype == tokenize.NAME or toktype == tokenize.NUMBER or toktype == tokenize.STRING:
# out += (ttext)
if ttext.strip():
key = str(toktype) + '.' + ttext # add token type as prefix
if tf.get(key):
tf[key] = tf.get(key) + 1
else:
tf[key] = 1
prev_toktype = toktype
last_col = ecol
last_lineno = elineno
return tf
@staticmethod
def __quadratic_sum(number_list):
result = 0
for x in number_list:
result += x * x
return result
@staticmethod
def __get_cosine(a_frequency, b_frequency):
up = 0.0
# print(a_frequency)
# print(b_frequency)
for key in a_frequency.keys():
if b_frequency.get(key):
up += a_frequency[key] * b_frequency[key]
a = CosineDiff.__quadratic_sum(a_frequency.values())
b = CosineDiff.__quadratic_sum(b_frequency.values())
return up / math.sqrt(a * b)
@staticmethod
def normalize(code_str_list):
tf_list = []
for index, code_str in enumerate(code_str_list):
tf = CosineDiff.__token_frequency(code_str)
tf_list.append((index, tf))
return tf_list
@staticmethod
def similarity(a_code, b_code):
"""
Simpler and faster implementation of difflib.unified_diff.
"""
assert a_code is not None
assert a_code is not None
return CosineDiff.__get_cosine(a_code, b_code)
def detect(code_str_list, diff_method=CosineDiff):
if len(code_str_list) < 2:
return []
code_list = diff_method.normalize(code_str_list)
base_index, base_code = code_list[0]
diff_result = []
for candidate_index, candidate_code in code_list[1:]:
diff_result.append((candidate_index, diff_method.similarity(base_code, candidate_code)))
return diff_result
def find_similar(similarity_threshold, code_list, limit):
if len(code_list) < 2:
return []
sim_result = detect(code_list)
def sim_of_item(val):
return val[1]
sim_result.sort(key=sim_of_item, reverse=True)
result = []
for code in sim_result:
if len(result) >= limit:
break
elif code[1] > similarity_threshold:
result.append(code_list[code[0]])
else:
break
return result
def getSimilarExample(code_list):
# print("getSimilarExample ------ ")
similarity_threshold = 0.0
limit = 1
examples = find_similar(similarity_threshold, code_list, limit)
# print("ccccc = "+json.dumps(examples, separators=(',', ':')))
return examples
# def run():
# """
# The console_scripts Entry Point in setup.py
# """
# def get_file(value):
# return open(value, 'r')
# parser = argparse.ArgumentParser(description='A simple example finder, read files from stdin as '
# 'json array or file list')
# parser.add_argument('-t', metavar='threshold', nargs='?', type=float, default=0.5,
# help='similarity threshold, 0.5 by default')
# parser.add_argument('-n', metavar='limit', nargs='?', type=int, default=1, help='result size, 1 by default')
# parser.add_argument('-f', metavar='file', nargs='+', type=get_file, help='the base & examples source files')
# args = parser.parse_args()
# similarity_threshold = args.t
# limit = args.n
# if args.f:
# code_list = [f.read() for f in args.f]
# else:
# code_list = [item for item in json.load(sys.stdin)]
# examples = find_similar(similarity_threshold, code_list, limit)
# print(json.dumps(examples, separators=(',', ':')))
# if __name__ == '__main__':
# run()
| qiuxfeng1985/geecode-sublime-plugin | geecode_similar.py | geecode_similar.py | py | 5,034 | python | en | code | 0 | github-code | 13 |
70838629459 | import tensorflow as tf
class IOU(tf.keras.metrics.Metric):
def __init__(self, **kwargs):
super(IOU, self).__init__(**kwargs)
self.iou = self.add_weight(name="iou", initializer="zeros")
self.total_iou = self.add_weight(name="total_iou", initializer="zeros")
self.num_ex = self.add_weight(name="num_ex", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
def get_box(y):
x1, y1, x2, y2 = y[:, 0], y[:, 1], y[:, 2], y[:, 3]
return x1, y1, x2, y2
def get_area(x1, y1, x2, y2):
return tf.math.abs(x2 - x1) * tf.math.abs(y2 - y1)
gt_x1, gt_y1, gt_x2, gt_y2 = get_box(y_true)
p_x1, p_y1, p_x2, p_y2 = get_box(y_pred)
i_x1 = tf.maximum(gt_x1, p_x1)
i_y1 = tf.maximum(gt_y1, p_y1)
i_x2 = tf.minimum(gt_x2, p_x2)
i_y2 = tf.minimum(gt_y2, p_y2)
i_area = get_area(i_x1, i_y1, i_x2, i_y2)
u_area = (
get_area(gt_x1, gt_y1, gt_x2, gt_y2)
+ get_area(p_x1, p_y1, p_x2, p_y2)
- i_area
)
iou = tf.math.divide(i_area, u_area)
self.num_ex.assign_add(1)
self.total_iou.assign_add(tf.reduce_mean(iou))
self.iou = tf.math.divide(self.total_iou, self.num_ex)
def result(self):
return self.iou
def reset_state(self):
# Called at end of each epoch
self.iou = self.add_weight(name="iou", initializer="zeros")
self.total_iou = self.add_weight(name="total_iou", initializer="zeros")
self.num_ex = self.add_weight(name="num_ex", initializer="zeros")
| DeepanChakravarthiPadmanabhan/object_localization_pets | localize_pets/loss_metric/iou.py | iou.py | py | 1,640 | python | en | code | 0 | github-code | 13 |
37170479213 | from itertools import takewhile
cCLnv=len
cCLnV=float
cCLnQ=int
cCLni=range
cCLnK=enumerate
cCLnR=list
cCLnr=max
cCLnF=min
from typing import NamedTuple
from p2.src.algorithm_api import Algorithm
from p2.src.data_api import Instance,Solution,Schedule,Task
cCLnG=1
cCLnA=0
def cCLne(cCLnu,cCLnE,cCLnP,cCLnb,cCLnJ,enumerated,cCLnz):
cCLnu.sort(key=lambda m:m.cCLnI)
cCLnE.sort(key=lambda task:task[cCLnG].duration)
for cCLnq in cCLnu:
if cCLnv(cCLnE)>0:
cCLny=cCLnE.pop(0)
t=cCLnz+cCLny[cCLnG].duration*cCLnq.cCLnI
cCLnP.append(cCLny[0])
cCLnb[cCLnq.index]=cCLnb[cCLnq.index]._replace(t=t)
cCLnJ[cCLnq.index].append(cCLny[0])
enumerated.remove(cCLny)
class Algorithm136715(Algorithm):
def run(self,cCLno:Instance)->Solution:
class MST(NamedTuple):
cCLnI:cCLnV
t:cCLnV
index:cCLnQ
cCLnz:cCLnV=0
cCLnP=[]
cCLnJ=[[]for _ in cCLni(cCLno.no_machines)]
cCLnb=[MST(cCLnI,0,i)for i,cCLnI in cCLnK(cCLno.machine_speeds)]
cCLnN=cCLno.tasks
cCLnN.sort(key=lambda task:task.ready)
cCLnm=[[i,val]for i,val in cCLnK(cCLnN,start=1)]
while cCLnv(cCLnP)<cCLno.no_tasks:
cCLnu=[cCLnq for cCLnq in cCLnb if cCLnq.t<=cCLnz]
cCLnE=cCLnR(takewhile(lambda task:task[cCLnG].ready<=cCLnz,cCLnm))
if cCLnv(cCLnu)>0 and cCLnv(cCLnE)>0:
cCLne(cCLnu,cCLnE,cCLnP,cCLnb,cCLnJ,cCLnm,cCLnz)
cCLnz+=1
elif cCLnv(cCLnu)==0:
cCLnz=cCLnr(cCLnF(cCLnb,key=lambda cCLnq:cCLnq.t).t,cCLnz)
elif cCLnv(cCLnE)==0:
cCLnz=cCLnF(cCLnm,key=lambda task:task[cCLnG].ready)[cCLnG].ready
else:
cCLnz+=1
cCLnd=0
for cCLnx in cCLni(cCLno.no_machines):
cCLnz=0
for cCLnO in cCLnJ[cCLnx]:
cCLnz+=cCLnr(cCLno.tasks[cCLnO-1].ready-cCLnz,0)
cCLnz+=cCLno.machine_speeds[cCLnx]*cCLno.tasks[cCLnO-1].duration
cCLnd+=cCLnz-cCLno.tasks[cCLnO-1].ready
cCLnd=cCLnd/cCLno.no_tasks
cCLnJ=Schedule(cCLno.no_tasks,cCLno.no_machines,cCLnJ)
return Solution(cCLnd,cCLnJ)
| KamilPiechowiak/ptsz | p2/src/id136715/algorithm.py | algorithm.py | py | 1,913 | python | en | code | 0 | github-code | 13 |
14739268805 | #Uses python3
import sys
def dfs(adj, used, order, x):
#write your code here
used[x] = True
for w in adj[x]:
if not used[w]:
dfs(adj, used, order, w)
return v
def toposort(adj):
used = [False] * len(adj) #[0] * len(adj)
order = []
for x in adj:
w = dfs(adj, used, order, x)
order.append(w)
return order
if __name__ == '__main__':
file1 = open("01top.txt", "r")
input = file1.read()
#input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))
adj = dict([(i, []) for i in range(n)]) #[[] for _ in range(n)]
for (a, b) in edges:
adj[a - 1].append(b - 1)
print(adj)
order = toposort(adj)
for x in order:
print(x + 1, end=' ')
| price-dj/Algorithms_On_Graphs | Week2/workspace/pset2/toposortv5.py | toposortv5.py | py | 853 | python | en | code | 0 | github-code | 13 |
23249081116 | #!/usr/bin/env python3
# encoding: utf-8
import random
from typing import List
class Solution:
def _pivot(self, nums: List[int], start: int, end: int) -> int:
# Put nums[start] to its right place. Keep the smaller (or equal) numbers
# on its left and the bigger numbers on the right. Return its index (order).
assert start < end
if start + 1 == end:
return start
val = nums[start]
# The first gt after start
gt = start + 1
while gt < end and nums[gt] <= val:
gt += 1
# The first le after gt
le = gt + 1
while le < end:
while le < end and nums[le] > val:
le += 1
if le < end:
nums[gt], nums[le] = nums[le], nums[gt]
gt += 1
le += 1
pivot_idx = gt - 1
nums[start], nums[pivot_idx] = nums[pivot_idx], val
return pivot_idx
def _find_kth(self, nums: List[int], k: int, start: int, end: int) -> int:
target_idx = random.randrange(start, end)
nums[start], nums[target_idx] = nums[target_idx], nums[start]
idx = self._pivot(nums, start, end)
if k > idx:
return self._find_kth(nums, k, idx + 1, end)
elif k < idx:
return self._find_kth(nums, k, start, idx)
else: # k == idx
return nums[k]
def findKthLargest(self, nums: List[int], k: int) -> int:
return self._find_kth(nums, len(nums) - k, 0, len(nums))
| misaka-10032/leetcode | coding/00215-kth-largest-element-in-array/solution.py | solution.py | py | 1,529 | python | en | code | 1 | github-code | 13 |
2487537375 | import spaco as spaco
import importlib
importlib.reload(spaco)
import numpy as np
import pandas as pd
import copy
def dataGen(I, T, J, q, rate, s=3, K0 = 3, SNR1 = 1.0, SNR2 = 3.0):
Phi0 = np.zeros((T, K0))
Phi0[:,0] = 1.0
Phi0[:,1] = np.arange(T)/T
Phi0[:, 1] = np.sqrt(1-Phi0[:,1]**2)
Phi0[:,2] = (np.cos((np.arange(T))/T * 4*np.pi))
for k in np.arange(K0):
Phi0[:, k] = Phi0[:, k]
Phi0[:, k] = Phi0[:, k] /(np.sqrt(np.mean(Phi0[:, k] ** 2))) * (np.log(J)+np.log(T))/np.sqrt(I*T*rate) *SNR1
V0 = np.random.normal(size=(J, K0))*1.0/np.sqrt(J)
Z = np.random.normal(size =(I, q))
U = np.random.normal(size =(I,K0))
beta = np.zeros((q,K0))
for k in np.arange(K0):
if q > 0:
if s > q:
s = q
beta[:s,k] = np.random.normal(size = (s)) * np.sqrt(np.log(q)/I) * SNR2
U[:,k] = U[:,k]+np.matmul(Z, beta[:,k])
U[:,k] = U[:,k] - np.mean(U[:,k])
U[:,k] = U[:,k]/np.std(U[:,k])
Xcomplete = np.random.normal(size=(I, T, J)) * 1.0
T0 = np.arange(T)
signal_complete = np.zeros(Xcomplete.shape)
PhiV0 = np.zeros((T, J, K0))
for k in np.arange(K0):
PhiV0[:,:,k] = np.matmul(Phi0[:,k].reshape((T,1)), V0[:,k].reshape(1,J))
for i in np.arange(I):
for k in np.arange(K0):
signal_complete[i, :, :] += PhiV0[:,:,k] * U[i,k]
Xcomplete[i, :, :] += signal_complete[i, :, :]
Obs = np.ones(Xcomplete.shape, dtype=int)
Xobs = Xcomplete.copy()
for i in np.arange(I):
ll = T0
tmp = np.random.choice(T0,replace=False,size=T - int(rate * T))
Obs[i, ll[tmp], :] = 0
Xobs[i, ll[tmp], :] = np.nan
return Xcomplete, signal_complete , Xobs, Obs, T0, Phi0, V0, U, PhiV0, Z, beta
it = 101
I = 100; T = 30; J = 10; q = 100;
SNR2 = 10.0; SNR1 = 1.0; rate = 0.1
spaco.seed_everything(seed=it)
data = dataGen(I=I, T=T, J=J, q=q, rate = rate, s=3, K0 = 3, SNR1 = SNR1, SNR2 = SNR2)
ranks = np.arange(1,11)
negliks = spaco.rank_selection_function(X = data[2], O = data[3], Z = data[9],
time_stamps = data[4], ranks=ranks, early_stop = True,
max_iter = 30, cv_iter = 5, add_std = 0.0)
means = negliks.mean(axis = 0)
means_std = means+negliks.std(axis = 0)/np.sqrt(I)*0.5
means=means[~np.isnan(means)]
means_std =means_std[~np.isnan(means_std)]
idx_min = np.argmin(means)
rank_min = ranks[idx_min]
rank_std= ranks[np.where(means<=means_std[idx_min])][0]
print(rank_min)
print(rank_std)
| LeyingGuan/SPACO | tests/example_spaco_RankSelection.py | example_spaco_RankSelection.py | py | 2,557 | python | en | code | 0 | github-code | 13 |
4534334079 | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 11 13:26:13 2021
@author: asus
"""
import numpy as np
import matplotlib.pyplot as plt
N = 256
Re = 400
title_u = "Re=" + str(Re) + "_N=" + str(N) + "_u.txt"
title_v = "Re=" + str(Re) + "_N=" + str(N) + "_v.txt"
u = np.loadtxt(title_u)
v = np.loadtxt(title_v)
xmin = 0
xmax = 1
ymin = 0
ymax = 1
xs = np.linspace(xmin, xmax, N)
ys = np.linspace(ymin, ymax, N)
x, y = np.meshgrid(xs, ys)
half = int(np.floor(N/2))
vertical = u[:,half]
horizontal = v[half,:]
if N%2 == 0:
vertical = (u[:,half] + u[:,half-1])/2
horizontal = (v[half,:] + v[half-1,:])/2
# vertical centerline at num half
#"""
plt.figure(dpi = 800)
plt.plot(ys, vertical, 'r')
plt.plot(ys, vertical, 'k.')
plt.title(r"$u$ along the the vertical centerline $x=0.5$")
plt.xlabel(r"$y$")
plt.ylabel(r"$x$-velocity: $u$")
plt.show()
print(f"u min is {min(vertical)}")
plt.figure(dpi = 800)
plt.plot(xs, horizontal, 'r')
plt.plot(xs, horizontal, 'k.')
plt.title(r"$v$ along the the horizontal centerline $y=0.5$")
plt.xlabel(r"$x$")
plt.ylabel(r"$y$-velocity: $v$")
plt.show()
#"""
print(f"v min is {min(horizontal)}")
print(f"v max is {max(horizontal)}")
print(f"\nu min is {min(vertical*2.5)}")
print(f"v min is {min(horizontal*2.5)}")
print(f"v max is {max(horizontal*2.5)}") | sbakkerm/Lid-Driven-Cavity | LBM/part_b_lineplots.py | part_b_lineplots.py | py | 1,375 | python | en | code | 3 | github-code | 13 |
14802809103 | import tkinter
from tkinter import filedialog, CENTER, NW
from PIL import ImageTk, Image
from gender_recognition_ai import gendernn
root = tkinter.Tk()
root.geometry("600x400+0+0")
root.title("Gender Recognition AI v1.0")
root.iconbitmap("gender.ico")
def open_picture(path="bg.png"):
width = 600
img = Image.open(path)
w_percent = (width / float(img.size[0]))
h_size = int((float(img.size[1]) * float(w_percent)))
img = img.resize((width, h_size), Image.ANTIALIAS)
root.geometry("%dx%d+%d+%d" %
(width, h_size, (root.winfo_screenwidth() - width)/2, (root.winfo_screenheight() - h_size*1.25)/2))
return img
picture = ImageTk.PhotoImage(open_picture())
pic_label = tkinter.Label(image=picture)
pic_label.grid(row=0, column=0, columnspan=3)
gender_label = tkinter.Label(text="Gender: none")
gender_label.config(font=("Arial", 20))
gender_label.place(relx=0, rely=0, anchor=NW)
# On button press, prompts the user to select a file then predicts the gender
def predict_pressed():
root.filename = filedialog.askopenfilename(
title="Select an image", filetypes=(("all files", "*.*"), ("png files", "*.png"), ("jpg files", "*.jpg")))
new_picture = ImageTk.PhotoImage(open_picture(root.filename))
pic_label.configure(image=new_picture)
pic_label.image = new_picture
prediction = gendernn.make_prediction(root.filename)
gender_label["text"] = "Gender: %s" % prediction
button_predict = tkinter.Button(root, text="Open Picture", command=predict_pressed)
button_predict.config(font=("Arial", 14))
button_predict.place(relx=0.5, rely=0.94, anchor=CENTER)
root.mainloop()
| AnakinTrotter/gender-recognition-ai | gender_recognition_ai/GUI.py | GUI.py | py | 1,650 | python | en | code | 4 | github-code | 13 |
3597331657 | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
# Определение функции Матиоша
def matyas(x):
return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1]
# Функция для отслеживания значений функции в каждой итерации
def track_convergence(result):
values = []
for iteration in result['nit']:
values.append(result['fun'])
return values
# Метод Нелдера-Мида (Nelder-Mead)
def nelder_mead():
return minimize(matyas, [0, 0], method='Nelder-Mead', options={'disp': True})
# Метод BFGS
def bfgs():
return minimize(matyas, [0, 0], method='BFGS', options={'disp': True})
# Метод L-BFGS-B
def l_bfgs_b():
return minimize(matyas, [0, 0], method='L-BFGS-B', options={'disp': True})
# Запуск и отслеживание сходимости каждого алгоритма
nelder_mead_result = nelder_mead()
bfgs_result = bfgs()
l_bfgs_b_result = l_bfgs_b()
nelder_mead_convergence = track_convergence(nelder_mead_result)
bfgs_convergence = track_convergence(bfgs_result)
l_bfgs_b_convergence = track_convergence(l_bfgs_b_result)
# Построение графиков сходимости
plt.plot(nelder_mead_convergence, label='Nelder-Mead')
plt.plot(bfgs_convergence, label='BFGS')
plt.plot(l_bfgs_b_convergence, label='L-BFGS-B')
plt.xlabel('Iteration')
plt.ylabel('Objective Function Value')
plt.legend()
plt.show()
# В этой секции мы будем минимизировать с помощью стандартного SGD алгоритма с моментом и уточнением Нестерова
def matyas(x, y):
return 0.26 * (x ** 2 + y ** 2) - 0.48 * x * y
def grad_matyas(x, y):
grad_x = 0.52 * x - 0.48 * y
grad_y = 0.52 * y - 0.48 * x
return grad_x, grad_y
def sgd_momentum_nesterov(lr, momentum, nesterov, num_epochs):
x = 0
y = 0
velocity_x = 0
velocity_y = 0
trajectory = []
for epoch in range(num_epochs):
grad_x, grad_y = grad_matyas(x, y)
velocity_x = momentum * velocity_x - lr * grad_x
velocity_y = momentum * velocity_y - lr * grad_y
if nesterov:
x_tilde = x + momentum * velocity_x
y_tilde = y + momentum * velocity_y
grad_x_tilde, grad_y_tilde = grad_matyas(x_tilde, y_tilde)
velocity_x = momentum * velocity_x - lr * grad_x_tilde
velocity_y = momentum * velocity_y - lr * grad_y_tilde
x += velocity_x
y += velocity_y
trajectory.append(matyas(x, y))
return trajectory
lr = 0.1
momentum = 0.9
nesterov = True
num_epochs = 100
trajectory = sgd_momentum_nesterov(lr, momentum, nesterov, num_epochs)
plt.plot(trajectory)
plt.xlabel('Iteration')
plt.ylabel('Objective Function Value')
plt.title('SGD with Momentum and Nesterov')
plt.show()
# В этой секции мы будем минимизировать с помощью стандартного алгоритма Adam
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def matyas(x):
return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1]
# Задаем начальные значения
x0 = [0, 0]
# Минимизируем функцию с помощью алгоритма Adam
result = minimize(matyas, x0, method='Nelder-Mead', options={'disp': True})
# Получаем оптимальные значения
x_opt = result.x
# Выводим оптимальные значения
print("Оптимальные значения:")
print(f"x: {x_opt[0]}, y: {x_opt[1]}")
# Построение графика функции Matyos
x = np.linspace(-10, 10, 100)
y = np.linspace(-10, 10, 100)
X, Y = np.meshgrid(x, y)
Z = matyas([X, Y])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('f(x, y)')
plt.title('Matyas Function')
plt.show()
| tigersing/dmytro.kocherzhenko | practice03.py | practice03.py | py | 4,046 | python | ru | code | 0 | github-code | 13 |
22478705465 | import os
import shutil
import random
from PIL import Image
from collections import Counter
def statistic_images(path):
'''
统计图片size
'''
trainset = os.listdir(path)
result = []
for filename in trainset:
image = Image.open(path+"/"+filename)
result.append(image.size)
image.close()
x = [item[0] for item in result]
y = [item[1] for item in result]
print("均值:", (sum(x)/len(x), sum(y)/len(y)) )
print("众数:", list(Counter(result).items())[0])
def resize_images(paths=[], size=20):
'''
更改图片尺寸
'''
for directory in paths:
if not os.path.exists(directory+"-s{}/".format(size)):
os.mkdir(directory+"-s{}/".format(size))
dataset = os.listdir(directory)
for filename in dataset:
image = Image.open(directory+"/"+filename)
try:
image.resize((size, size)).save(directory+"-s"+str(size)+"/"+filename[:-3]+"png")
except:
pass
image.close()
def rename_images(paths=[]):
'''
保证图片序号唯一
'''
roots = paths
root = "./images-renamed/"
cur = 0
for root1 in roots:
filenames = os.listdir(root1)
cnt = 0
total = len(filenames)
for filename in filenames:
cnt += 1
cur += 1
print("{}/{}/{}".format(cnt, total, cur), end="\r")
[header, body1, body2] = filename.split("-")
os.rename(root1+filename, root+"{}-{}-{}".format(cur, body1, body2))
print()
def count_male_female(path):
'''
统计图片的男女比例
'''
male = 0
female = 0
filenames = os.listdir(path)
print("total:{}".format(len(filenames)))
for filename in filenames:
gender = filename.split("-")[2][0]
if gender == "0":
female += 1
elif gender == "1":
male += 1
print("male:{}, female:{}".format(male, female))
def split_train_test(root, n_test):
'''
拆分训练集和测试集
'''
if not os.path.exists("./trainset/"):
os.mkdir("./trainset/")
if not os.path.exists("./testset/"):
os.mkdir("./testset/")
filenames = os.listdir(root)
random.shuffle(filenames)
trainset = []
testset = []
flag = "0"
cnt = 0
for filename in filenames:
gender = filename[:-4].split("-")[-1]
if cnt < n_test and gender == "0" and flag == "0":
testset.append(filename)
flag = "1"
cnt += 1
elif cnt < n_test and gender == "1" and flag == "1":
testset.append(filename)
flag = "0"
cnt += 1
else:
trainset.append(filename)
for name in trainset:
os.rename(root+name, "./trainset/"+name)
for name in testset:
os.rename(root+name, "./testset/"+name)
os.rmdir(root)
def resplit_images():
train_filenames = os.listdir("./trainset-9000/")
test_filenames = os.listdir("./testset-1000/")
female_cnt = 0
male_cnt = 0
for filename in train_filenames:
gender = filename[:-4].split("-")[-1]
if gender == "0" and female_cnt < 500:
shutil.copyfile("./trainset-9000/"+filename, "./trainset/"+filename)
female_cnt += 1
elif gender == "1" and male_cnt < 500:
shutil.copyfile("./trainset-9000/"+filename, "./trainset/"+filename)
male_cnt += 1
female_cnt = 0
male_cnt = 0
for filename in test_filenames:
gender = filename[:-4].split("-")[-1]
if gender == "0" and female_cnt < 50:
shutil.copyfile("./testset-1000/"+filename, "./testset/"+filename)
female_cnt += 1
elif gender == "1" and male_cnt < 50:
shutil.copyfile("./testset-1000/"+filename, "./testset/"+filename)
male_cnt += 1
resize_images(["images-1100"], 100)
split_train_test("images-1100-s100/", 100)
# count_male_female('images-1100') # 确定性别分类时判定为正的阈值
# statistic_images("images-1100") # 统计图片size
# rename_images(['images-1100']) # 保证图片序号唯一,由多次执行process导致
# resplit_images() # 从大数据集分割小数据集(临时用)
| NICE-FUTURE/predict-gender-and-age-from-camera | data/utils.py | utils.py | py | 4,306 | python | en | code | 33 | github-code | 13 |
16808508174 | import sys
from collections import namedtuple
from hypothesis.strategies import (
binary,
booleans,
builds,
complex_numbers,
decimals,
dictionaries,
fixed_dictionaries,
floats,
fractions,
frozensets,
integers,
just,
lists,
none,
one_of,
randoms,
recursive,
sampled_from,
sets,
text,
tuples,
)
from tests.common.debug import TIME_INCREMENT
__all__ = ["standard_types", "OrderedPair", "TIME_INCREMENT"]
OrderedPair = namedtuple("OrderedPair", ("left", "right"))
ordered_pair = integers().flatmap(
lambda right: integers(min_value=0).map(
lambda length: OrderedPair(right - length, right)
)
)
def constant_list(strat):
return strat.flatmap(lambda v: lists(just(v)))
ABC = namedtuple("ABC", ("a", "b", "c"))
def abc(x, y, z):
return builds(ABC, x, y, z)
standard_types = [
lists(none(), max_size=0),
tuples(),
sets(none(), max_size=0),
frozensets(none(), max_size=0),
fixed_dictionaries({}),
abc(booleans(), booleans(), booleans()),
abc(booleans(), booleans(), integers()),
fixed_dictionaries({"a": integers(), "b": booleans()}),
dictionaries(booleans(), integers()),
dictionaries(text(), booleans()),
one_of(integers(), tuples(booleans())),
sampled_from(range(10)),
one_of(just("a"), just("b"), just("c")),
sampled_from(("a", "b", "c")),
integers(),
integers(min_value=3),
integers(min_value=(-(2**32)), max_value=(2**64)),
floats(),
floats(min_value=-2.0, max_value=3.0),
floats(),
floats(min_value=-2.0),
floats(),
floats(max_value=-0.0),
floats(),
floats(min_value=0.0),
floats(min_value=3.14, max_value=3.14),
text(),
binary(),
booleans(),
tuples(booleans(), booleans()),
frozensets(integers()),
sets(frozensets(booleans())),
complex_numbers(),
fractions(),
decimals(),
lists(lists(booleans())),
lists(floats(0.0, 0.0)),
ordered_pair,
constant_list(integers()),
integers().filter(lambda x: abs(x) > 100),
floats(min_value=-sys.float_info.max, max_value=sys.float_info.max),
none(),
randoms(use_true_random=True),
booleans().flatmap(lambda x: booleans() if x else complex_numbers()),
recursive(base=booleans(), extend=lambda x: lists(x, max_size=3), max_leaves=10),
]
| HypothesisWorks/hypothesis | hypothesis-python/tests/common/__init__.py | __init__.py | py | 2,369 | python | en | code | 7,035 | github-code | 13 |
17922281055 | import os
import pandas as pd
from pyMetricBenchmark.matplot import boxplot, liniendiagramm
from pyMetricBenchmark.matplot import balkenplot
from pyMetricBenchmark.datei import download
from pyMetricBenchmark.fatjar import subfatjar
# Funktionen um die Daten der Performace csv in eigenständige Dataframm zu ändern
# Im column steht die jar file namen
def vrefDataframm(data, csvdata, jarsname):
ddict = {}
for i in range(len(csvdata)):
file = csvdata.at[i, "File"]
if (file == "<framework init> "):
continue
spfile = file.rsplit('/')
jarname = spfile.pop()
for name in jarsname:
if name == jarname:
ddict[name] = csvdata.at[i, "methods.vref"]
series = pd.Series(ddict)
data.loc[len(data.index + 1)] = series
def loopDataframm(data, csvdata, jarsname):
ddict = {}
for i in range(len(csvdata)):
file = csvdata.at[i, "File"]
if (file == "<framework init> "):
continue
spfile = file.rsplit('/')
jarname = spfile.pop()
for name in jarsname:
if name == jarname:
ddict[name] = csvdata.at[i, "methods.loop"]
series = pd.Series(ddict)
data.loc[len(data.index + 1)] = series
def wmcDataframm(data, csvdata, jarsname):
ddict = {}
for i in range(len(csvdata)):
file = csvdata.at[i, "File"]
if (file == "<framework init> "):
continue
spfile = file.rsplit('/')
jarname = spfile.pop()
for name in jarsname:
if name == jarname:
ddict[name] = csvdata.at[i, "wmc"]
series = pd.Series(ddict)
data.loc[len(data.index + 1)] = series
def vdDataframm(data, csvdata, jarsname):
dict = {}
for i in range(len(csvdata)):
file = csvdata.at[i, "File"]
if (file == "<framework init> "):
continue
spfile = file.rsplit('/')
jarname = spfile.pop()
for name in jarsname:
if name == jarname:
dict[name] = csvdata.at[i, "VariablesDeclared.count"]
series = pd.Series(dict)
data.loc[len(data.index + 1)] = series
def inStDataframm(data, csvdata, jarsname):
dict = {}
for i in range(len(csvdata)):
file = csvdata.at[i, "File"]
if (file == "<framework init> "):
continue
spfile = file.rsplit('/')
jarname = spfile.pop()
for name in jarsname:
if name == jarname:
dict[name] = csvdata.at[i, "Internal Stability"]
series = pd.Series(dict)
data.loc[len(data.index + 1)] = series
def benchmarkGroup5(fatjar, home, messungen):
# Verzeichniss Namen
globeljarsname = "globaljars"
group5 = "group5"
multifileg = "multifileguava"
multifilesp = "multifilespring"
# Pfade zur jars
globeljars = os.path.join(home, globeljarsname)
group5jars = os.path.join(home, group5)
multifileguava = os.path.join(home, multifileg)
multifilespring = os.path.join(home, multifilesp)
# Wechsel ins Verzeichniss wo die CSVs abgespeichert werden
# Alle Ergebnisse werden hier abgespeichert
csvfile = "ergebniss"
os.chdir(os.path.join(home, csvfile))
# Einrichtung der benötigten Variablen
allglobaljarsname = []
for filename in os.listdir(globeljars):
allglobaljarsname.append(filename)
globaljargroesse = download.datei_groesse(globeljars)
vrefglobal = pd.DataFrame(columns=allglobaljarsname)
vdglobal = pd.DataFrame(columns=allglobaljarsname)
wmcglobal = pd.DataFrame(columns=allglobaljarsname)
loopglobal = pd.DataFrame(columns=allglobaljarsname)
# Analysis der gemeinsames Benchmark
print("---------Beginne Auswertung des gemeinsamen Benchmark-------------")
for i in range(messungen):
subfatjar.runAllSingle(fatjar, globeljars)
df = pd.read_csv('performance-report.csv')
vrefDataframm(vrefglobal, df, allglobaljarsname)
wmcDataframm(wmcglobal, df, allglobaljarsname)
loopDataframm(loopglobal, df, allglobaljarsname)
vdDataframm(vdglobal, df, allglobaljarsname)
boxplot.boxplotD(vrefglobal, allglobaljarsname, "vrefglobalboxplot", messungen, "Vref Boxplot Gemeinsamer Benchmark")
boxplot.boxplotD(wmcglobal, allglobaljarsname, "wmcglobalboxplot", messungen, "Wmc Boxplot Gemeinsamer Benchmark")
boxplot.boxplotD(loopglobal, allglobaljarsname, "loopglobalboxplot", messungen, "Loop Boxplot Gemeinsamer Benchmark")
boxplot.boxplotD(vdglobal, allglobaljarsname, "vdecglobalboxplot", messungen, "Vdec BoxplotGemeinsamer Benchmark")
liniendiagramm.simpleline(vrefglobal, allglobaljarsname, "vrefgloballinien", messungen, globaljargroesse, "Vref Gemeinsamer Benchmark")
liniendiagramm.scatterdiagramm(vrefglobal, allglobaljarsname, "vrefglobalregression", messungen, globaljargroesse,
"Vref Gemeinsamer Benchmark")
liniendiagramm.simpleline(vdglobal, allglobaljarsname, "vdecgloballinien", messungen, globaljargroesse, "Vdec Gemeinsamer Benchmark")
liniendiagramm.scatterdiagramm(vdglobal, allglobaljarsname, "vdecglobalregression", messungen, globaljargroesse,
"Vdec Gemeinsamer Benchmark")
liniendiagramm.simpleline(wmcglobal, allglobaljarsname, "wmcgloballinien", messungen, globaljargroesse, "Wmc Gemeinsamer Benchmark")
liniendiagramm.scatterdiagramm(wmcglobal, allglobaljarsname, "wmcglobalregression", messungen, globaljargroesse,
"Wmc Gemeinsamer Benchmark")
liniendiagramm.simpleline(loopglobal, allglobaljarsname, "loopgloballinien", messungen, globaljargroesse, "Loop Gemeinsamer Benchmark")
liniendiagramm.scatterdiagramm(loopglobal, allglobaljarsname, "loopglobalregression", messungen, globaljargroesse,
"Loop Gemeinsamer Benchmark")
liniendiagramm.allbenchmarklinie(vrefglobal, vdglobal, wmcglobal, loopglobal, allglobaljarsname, "Global-Benchmark",
messungen, globaljargroesse, "Diagramm aller Metriken der Gruppe 5")
boxplot.boxplotallmetrics(vrefglobal, wmcglobal, loopglobal, vdglobal, allglobaljarsname, "boxplotallmetrics", messungen, "Boxplot aller Metrics Gemeinsamer Benchmark")
# Speziale Analyse von vref mit Argumente auf Global benchmark
print("------------Beginne auswertung mit speziellen argumente auf dem gemeinsamen Benchmark---------------------")
vrefglobalStandL = pd.DataFrame(columns=allglobaljarsname)
vrefglobalVa = pd.DataFrame(columns=allglobaljarsname)
vrefglobalAllArg = pd.DataFrame(columns=allglobaljarsname)
for i in range(messungen):
subfatjar.runSingleStoreAndLoad(fatjar, globeljars)
df = pd.read_csv('performance-report.csv')
vrefDataframm(vrefglobalStandL, df, allglobaljarsname)
subfatjar.runSingleInfo(fatjar, globeljars)
df2 = pd.read_csv('performance-report.csv')
vrefDataframm(vrefglobalVa, df2, allglobaljarsname)
subfatjar.runAllArgument(fatjar, globeljars)
df3 = pd.read_csv('performance-report.csv')
vrefDataframm(vrefglobalAllArg, df3, allglobaljarsname)
liniendiagramm.scatterdiagramm(vrefglobalAllArg, allglobaljarsname, "vrefargsglobalregression", messungen,
globaljargroesse,
"Vref all Argumente")
liniendiagramm.scatterdiagramm(vrefglobalVa, allglobaljarsname, "vrefinfoglobalregression", messungen,
globaljargroesse,
"Vref Argument Info")
liniendiagramm.scatterdiagramm(vrefglobalStandL, allglobaljarsname, "vrefstandlglobalregression", messungen,
globaljargroesse,
"Vref Argument StoreAndLoad")
liniendiagramm.allvreflinie(vrefglobal, vrefglobalVa, vrefglobalStandL, vrefglobalAllArg, allglobaljarsname,
"vrefglobalvergleich", messungen, globaljargroesse,
"Vref mit und ohne Argumente Vergleich auf dem gemeinsamen Benchmark")
balkenplot.balkenVref(vrefglobal, vrefglobalVa, vrefglobalStandL, vrefglobalAllArg, allglobaljarsname,
"vrefglobalvergleichbalken", messungen,"Vref mit und ohne Argumente Vergleich")
print("------------Beginne Multifile Analysis---------------------")
guavajarsname = []
for filename in os.listdir(multifileguava):
guavajarsname.append(filename)
springjarsname = []
for filename in os.listdir(multifilespring):
springjarsname.append(filename)
guavagrosse = download.datei_groesse(multifileguava)
springgroesse = download.datei_groesse(multifilespring)
guava = pd.DataFrame(columns=guavajarsname)
spring = pd.DataFrame(columns=springjarsname)
for i in range(messungen):
subfatjar.runMultiFile(fatjar, multifileguava)
df = pd.read_csv('performance-report.csv')
inStDataframm(guava, df, guavajarsname)
subfatjar.runMultiFile(fatjar, multifilespring)
df2 = pd.read_csv('performance-report.csv')
inStDataframm(spring, df2, springjarsname)
liniendiagramm.multifilelinie(guava, spring, guavajarsname, springjarsname, "multifile", messungen, guavagrosse, springgroesse, "Multifile Analysis")
print("-----------------------Beginne Auswerung des Benchmarks der Gruppe-------------------------------")
# Einrichtung der benötigten Variablen
gruppe5jarsname = []
for filename in os.listdir(group5jars):
gruppe5jarsname.append(filename)
gruppe5jargroesse = download.datei_groesse(group5jars)
vrefgruppe5 = pd.DataFrame(columns=gruppe5jarsname)
vdgruppe5 = pd.DataFrame(columns=gruppe5jarsname)
wmcgruppe5 = pd.DataFrame(columns=gruppe5jarsname)
loopgruppe5 = pd.DataFrame(columns=gruppe5jarsname)
for i in range(messungen):
subfatjar.runAllSingle(fatjar, group5jars)
df = pd.read_csv('performance-report.csv')
vrefDataframm(vrefgruppe5, df, gruppe5jarsname)
wmcDataframm(wmcgruppe5, df, gruppe5jarsname)
loopDataframm(loopgruppe5, df, gruppe5jarsname)
vdDataframm(vdgruppe5, df, gruppe5jarsname)
boxplot.boxplotD(vrefgruppe5, gruppe5jarsname, "vrefgruppe5boxplot", messungen, "Vref Boxplot Gruppe5")
boxplot.boxplotD(wmcgruppe5, gruppe5jarsname, "wmcgruppe5boxplot", messungen, "Wmc Boxplot Gruppe5")
boxplot.boxplotD(loopgruppe5, gruppe5jarsname, "loopgruppe5boxplot", messungen, "Loop Boxplot Gruppe5")
boxplot.boxplotD(vdgruppe5, gruppe5jarsname, "vdecgruppe5boxplot", messungen, "Vdec Boxplot Gruppe5")
liniendiagramm.simpleline(vrefgruppe5, gruppe5jarsname, "vrefgruppe5linien", messungen, gruppe5jargroesse, "Vref Gruppe5 Benchmark")
liniendiagramm.scatterdiagramm(vrefgruppe5, gruppe5jarsname, "vrefgruppe5regression", messungen, gruppe5jargroesse,
"Vref Gruppe5 Benchmark")
liniendiagramm.simpleline(vdgruppe5, gruppe5jarsname, "vdecgruppe5linien", messungen, gruppe5jargroesse, "Vdec Gruppe5 Benchmark")
liniendiagramm.scatterdiagramm(vdgruppe5, gruppe5jarsname, "vdecgruppe5regression", messungen, gruppe5jargroesse,
"Vdec Gruppe5 Benchmark")
liniendiagramm.simpleline(wmcgruppe5, gruppe5jarsname, "wmcgruppe5linien", messungen, gruppe5jargroesse, "Wmc Gruppe5 Benchmark")
liniendiagramm.scatterdiagramm(wmcgruppe5, gruppe5jarsname, "wmcgruppe5regression", messungen, gruppe5jargroesse,
"Wmc Gruppe5 Benchmark")
liniendiagramm.simpleline(loopgruppe5, gruppe5jarsname, "loopgruppe5linien", messungen, gruppe5jargroesse, "Loop Gruppe5 Benchmark")
liniendiagramm.scatterdiagramm(loopgruppe5, gruppe5jarsname, "loopgruppe5regression", messungen, gruppe5jargroesse,
"Loop Gruppe5 Benchmark")
liniendiagramm.allbenchmarklinie(vrefgruppe5, vdgruppe5, wmcgruppe5, loopgruppe5, gruppe5jarsname,
"Gruppe5-Benchmark",
messungen, gruppe5jargroesse, "Diagramm aller Metriken der Gruppe 5")
boxplot.boxplotallmetrics(vrefgruppe5, wmcgruppe5, loopgruppe5, vdgruppe5, gruppe5jarsname, "boxplotallmetricsgruppe5", messungen, "Boxplot aller Metrics Gruppe5 Benchmark")
# Speziale Analyse von vref mit Argumente auf denn Gruppe5 benchmark
print("------------Beginne auswertung mit speziellen Argumenten auf dem Gruppe5 Benchmark---------------------")
vrefgruppe5StandL = pd.DataFrame(columns=gruppe5jarsname)
vrefgruppe5Va = pd.DataFrame(columns=gruppe5jarsname)
vrefgruppe5AllArg = pd.DataFrame(columns=gruppe5jarsname)
for i in range(messungen):
subfatjar.runSingleStoreAndLoad(fatjar, group5jars)
df = pd.read_csv('performance-report.csv')
vrefDataframm(vrefgruppe5StandL, df, gruppe5jarsname)
subfatjar.runSingleInfo(fatjar, group5jars)
df2 = pd.read_csv('performance-report.csv')
vrefDataframm(vrefgruppe5Va, df2, gruppe5jarsname)
subfatjar.runAllArgument(fatjar, group5jars)
df3 = pd.read_csv('performance-report.csv')
vrefDataframm(vrefgruppe5AllArg, df3, gruppe5jarsname)
liniendiagramm.scatterdiagramm(vrefgruppe5AllArg, gruppe5jarsname, "vrefargsgruppe5regression", messungen,
gruppe5jargroesse,
"Vref all Argumente Gruppe5 Benchmark")
liniendiagramm.scatterdiagramm(vrefgruppe5Va, gruppe5jarsname, "vrefinfogruppe5regression", messungen,
gruppe5jargroesse,
"Vref Argument Info Gruppe5 Benchmark")
liniendiagramm.scatterdiagramm(vrefgruppe5StandL, gruppe5jarsname, "vrefstandlgruppe5regression", messungen,
gruppe5jargroesse,
"Vref Argument StoreAndLoad Gruppe5 Benchmark")
liniendiagramm.allvreflinie(vrefgruppe5, vrefgruppe5Va, vrefgruppe5StandL, vrefgruppe5AllArg, gruppe5jarsname,
"vrefgruppe5vergleich", messungen, gruppe5jargroesse,
"Vref mit und ohne Argumente Vergleich auf dem Gruppe5 Benchmark")
balkenplot.balkenVref(vrefgruppe5, vrefgruppe5Va, vrefgruppe5StandL, vrefgruppe5AllArg, gruppe5jarsname,
"vrefgruppe5vergleichbalken", messungen, "Vref mit und ohne Argumente Vergleich Gruppe5 Benchmark")
print("----------------------Auswertung beendet-----------------------------------------------------\n")
print("Die Graphen sind im Ordner Ergebnisse im Verzeichniss:" + home)
| skyfly18/pyMetricBenchmark | src/pyMetricBenchmark/benchmarkGroup5.py | benchmarkGroup5.py | py | 14,815 | python | de | code | 0 | github-code | 13 |
1902900652 | import cocos
from math import sin,cos,radians, atan2, pi, degrees
import pyglet
from cocos.actions import *
from time import sleep
from time import sleep
import threading
from classchar import Char
import cocos.collision_model as cm
import cocos.euclid as eu
from cocos.scenes.transitions import *
from random import randint
#from app.menu import MainMenu
from cocos.particle_systems import *
from cocos.particle import ParticleSystem, Color
from cocos.euclid import Point2
from bullet import *
from weapon import *
from weapons import *
class Man(Char):
def __init__(self,img):
Char.__init__(self,img)
self.speed=10
self.weapon = Pistol(self)
self.r = 0
self.directions = (
cocos.sprite.Sprite('char/1.png'), #down
cocos.sprite.Sprite('char/2.png'), #left
cocos.sprite.Sprite('char/3.png'), #right
cocos.sprite.Sprite('char/4.png'), #top
cocos.sprite.Sprite('char/5.png'), #leftdown
cocos.sprite.Sprite('char/6.png'), #rightdown
cocos.sprite.Sprite('char/1.png'), #allother
)
for i in self.directions: self.add(i,255)
self.weapon_sprite = cocos.sprite.Sprite('char/weapon.png')
self.add(self.weapon_sprite,254)
self.add(cocos.sprite.Sprite('char/body.png'),253)
#self.parent.get('hud').health.element.text=str('100%')
#self.collision_type=0
#self.shape.collision_type=3
#self.parent.parent.get('hud').health.element.text=str(self.health)+'%'
def hurt(self,damage=False):
super(Man,self).hurt(damage)
self.parent.parent.get('hud').health.element.text=str(self.health)+'%'
def die(self):
self.parent.gamover()
def shoot(self):
self.weapon.shoot()
def on_mouse_motion(self,x,y,dx,dy):
#print "m"
#print self.position
a = list(self.position)
b = list([x+dx,y+dy])
r = degrees(atan2(a[0] - b[0], a[1] - b[1]) )+180
#print r
for i in self.directions: i.opacity = 0
if (0 <=r<67.5 or r > 292): self.directions[3].opacity=255# print "1"
elif 67.5 <=r<112.5: self.directions[2].opacity=255 # |
elif 112.5 <=r<157.5: self.directions[5].opacity=255
elif 157.5 <=r<202.5: self.directions[0].opacity=255
elif 202.5 <=r<247.5: self.directions[4].opacity=255 #
elif 247.5 <r<292.5: self.directions[1].opacity=255
self.weapon_sprite.rotation = r+180
#self.move(False)
self.r = r
def on_mouse_press(self, x,y,button,modifiers):
self.weapon.on_mouse_press(x,y,button,modifiers)
#print button
if button==4:
#self.schedule(self.on_mouse_motion, x,y, False,False)
self.on_key_press(119)
def on_mouse_release(self,x,y,button,modifiers):
self.weapon.on_mouse_release(x,y,button,modifiers)
if button==4:
self.on_key_release(119)
def on_key_press(self, key):
self.weapon.on_key_press(key)
d = False
if key == 97: d=-5
if key == 100: d=5
if d: self.schedule(self.turn, d)
if key == 119: self.schedule(self.move)
if key == 65507: self.shoot()
def on_key_release(self, key):
self.weapon.on_key_release(key)
if key in (97,100): self.unschedule(self.turn)
if key == 119: self.unschedule(self.move)
| silago/gametest | classman.py | classman.py | py | 3,136 | python | en | code | 1 | github-code | 13 |
6584767835 | # Input data split indexes.
IP_ADDR = 0
REMOTE_USER = 1
TIME_LOCAL = 2
HTTP_METHOD = 3
RESOURCE_URL = 4
HTTP_VERSION = 5
STATUS = 6
BYTES_SENT = 7
HTTP_REFERER = 8
USER_AGENT = 9
def parse_line(line):
"""
Parses the raw log string. Return a tuple with ordered
indexes. Use above indexes to access them.
:param line: {str} access log
:return: {tuple}
"""
fields = line.split("\t")
return (str(fields[IP_ADDR]),
str(fields[REMOTE_USER]),
str(fields[TIME_LOCAL]),
str(fields[HTTP_METHOD]),
str(fields[RESOURCE_URL]),
str(fields[HTTP_VERSION]),
int(fields[STATUS]),
int(fields[BYTES_SENT]),
str(fields[HTTP_REFERER]),
str(fields[USER_AGENT]))
| yasinmiran/big-data-gcw | utils/common.py | common.py | py | 781 | python | en | code | 1 | github-code | 13 |
42095167313 | import os
import spotipy
import json
import calendar
import datetime
from dotenv import load_dotenv
import pytz
def spotipy_token(scope, username):
env_path = r'D:/Users/john/Documents/python_files/SpotifyAPI/.env'
project_folder = os.path.expanduser(env_path) # adjust as appropriate
load_dotenv(os.path.join(project_folder, '.env'))
token = spotipy.util.prompt_for_user_token(username, scope)
return token
def show_tracks(results, sp, playlist_name, total_tracks_list, playlist_id):
tracks_list = []
for item in results['items']:
track = item['track']
album = sp.album(show_tracks_album_uri(track))
if album['album_type'] == "album" or \
str.upper(album['name']).find(' EP') > 0:
tracks_list.append(track['uri'])
for track_uri in tracks_list:
total_tracks_list.append(track_uri)
return total_tracks_list
def show_tracks_album_uri(track):
album_uri = track['album']['uri']
return album_uri
def playlist_name():
EST = pytz.timezone('America/New_York')
lastFriday = datetime.datetime.now(EST)
oneday = datetime.timedelta(days=1)
while lastFriday.weekday() != calendar.FRIDAY:
lastFriday -= oneday
d1 = lastFriday.strftime("%m/%d/%Y")
name = "New Music Playlist " + d1
return name
def create_new_songs_playlist(sp, user_id, new_playlist_name):
return_string = sp.user_playlist_create(user_id,
new_playlist_name, public=True)
return return_string['uri']
NEW_MUSIC_PLAYLISTS_LIST = ['spotify:playlist:37i9dQZF1DX4JAvHpjipBk',
'spotify:playlist:6y4wz0Gmh2nMlBMjxduLCi',
'spotify:playlist:5X8lN5fZSrLnXzFtDEUwb9']
def get_release_radar(sp):
searches = sp.search(q='Release Radar', limit=1, offset=0,
type="playlist", market=None)
search_owner_id = searches['playlists']['items'][0]['owner']['id']
search_playlist_name = searches['playlists']['items'][0]['name']
search_playlist_uri = searches['playlists']['items'][0]['uri']
if search_owner_id == 'spotify' and \
search_playlist_name == 'Release Radar':
NEW_MUSIC_PLAYLISTS_LIST.append(search_playlist_uri)
def get_new_music_playlist_id(sp, new_playlist_name, user_id):
top_playlist_details = sp.current_user_playlists(limit=1) # must keep the
# new music playlist at the top if you're going to run it again
if top_playlist_details['items'][0]['name'] == new_playlist_name:
playlist_id = top_playlist_details['items'][0]['uri']
else:
playlist_id = create_new_songs_playlist(sp, user_id, new_playlist_name)
return playlist_id
def get_fields():
field1 = "tracks.items.track.uri,"
field2 = "tracks.items.track.album.type,"
field3 = "tracks.items.track.album.uri,"
field4 = "tracks.items.track.album.name,"
field5 = "next"
fields = field1 + field2 + field3 + field4 + field5
return fields
def add_tracks_already_on_playlist(sp, playlist_id):
already_on_list = []
response = sp.playlist_tracks(playlist_id, fields='items.track.uri,total')
if response['total'] > 0:
for track in response['items']:
already_on_list.append(track['track']['uri'])
return already_on_list
else:
return already_on_list
def remove_already_on_tracks(total_tracks_set, already_on_tracks_list):
for track_uri_a in already_on_tracks_list:
if track_uri_a in total_tracks_set:
total_tracks_set.remove(track_uri_a)
return total_tracks_set
def add_new_music_playlist_details(sp, user_id, playlist_uri):
playlist_id = playlist_uri[17:]
playlist_desc1 = '''This is a new music playlist created from code
by John Wilson (bonjohh on spotify). '''
playlist_desc2 = '''It was created by taking the featured album or EP
tracks (excluding singles tracks) '''
playlist_desc3 = '''from these 4 playlists: New Music Friday by Spotify,
The Alternative New Music Friday by getalternative, '''
playlist_desc4 = '''NPR Music's New Music Friday by NPR Music,
Release Radar by Spotify'''
playlist_desc = playlist_desc1 + playlist_desc2 + \
playlist_desc3 + playlist_desc4
sp.user_playlist_change_details(user_id, playlist_id,
description=playlist_desc)
def main(user_id):
scope = 'playlist-modify-public playlist-read-private'
token = spotipy_token(scope, user_id)
# get the spotify authorization token
sp = spotipy.Spotify(auth=token)
# get the spotify authorization object
new_playlist_name = playlist_name()
# get the dynamic new music playlist name
playlist_id = get_new_music_playlist_id(sp, new_playlist_name, user_id)
# get the new music playlist id if created or else create
# the new music playlist and get the id
get_release_radar(sp) # add personal spotify release radar to
# the list of new music playlists to pull from
add_new_music_playlist_details(sp, user_id, playlist_id)
# add new music playlist description
total_tracks_list = [] # initialize the total tracks list
already_on_tracks_list = add_tracks_already_on_playlist(sp, playlist_id)
# add tracks already on the new music playlist
for playlist_uri in NEW_MUSIC_PLAYLISTS_LIST:
# loop through the uris in the new music playlists list
fields = get_fields() # get the fields for the playlist search below
new_music_playlist = sp.playlist(playlist_uri, fields=fields)
# search for the new music playlist
new_playlist_tracks = new_music_playlist['tracks']
# get the tracks from the new music playlist
total_tracks_list = show_tracks(new_playlist_tracks, sp,
new_playlist_name,
total_tracks_list, playlist_id)
# call the show tracks function on the looped uri and append
# the returned tracks to the total tracks list
total_tracks_set = set(total_tracks_list)
# convert the list to a set to remove duplicates
total_tracks_set = remove_already_on_tracks(total_tracks_set,
already_on_tracks_list)
# remove tracks already on the new music playlist
if len(total_tracks_set) > 0:
# if there are any songs to add
sp.user_playlist_add_tracks(user_id, playlist_id, total_tracks_set)
# add the tracks from the total tracks list to the new music playlist
| bonjohh/SpotifyAPI | create_new_music_playlist/create_new_music_playlist.py | create_new_music_playlist.py | py | 6,597 | python | en | code | 0 | github-code | 13 |
559436834 | #!/bin/python3
import math
import os
import random
import re
import sys
def primality(n):
if n==1:
return "Not prime"
elif n==2:
return "Prime"
elif n%2==0:
return "Not prime"
else:
f=math.ceil(math.sqrt(n))
for i in range(3,f+1,2):
if n%i==0:
return "Not prime"
return "Prime"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
p = int(input().strip())
for p_itr in range(p):
n = int(input().strip())
result = primality(n)
fptr.write(result + '\n')
fptr.close()
| Quasar0007/Competitive_Programming | Primality.py | Primality.py | py | 637 | python | en | code | 0 | github-code | 13 |
35920431023 | """
Project:
This program is use to read table data from a pdf file.The create_folder function will create a folder name called'csv'
in the current working directory.
Author: <Hashimabdulla> <hashimabdulla69@gmail.com> , April 18 2020
Version: 0.1
Module: Pdf table data extractor.
"""
import os
import shutil
import camelot
"""This module create folder to save csv files."""
def create_folder(foldername):
pathaddress = os.getcwd()
newfolder = pathaddress +"/"+foldername
foldercheck = os.path.exists(newfolder)
if foldercheck==False:
os.mkdir(foldername)
return "new folder, {} created.".format(foldername)
else:
shutil.rmtree(newfolder)
os.mkdir(foldername)
return "folder already exist"
"""This module extract data from tables from each pages of input pdf file."""
def pdf_table_reader(file):
create_folder("csv")
pathaddress = os.getcwd()
csv_folder = pathaddress + '/csv'
tables=camelot.read_pdf(file,pages='all')
for i in range(tables.n):
tables[i].to_csv('{}/table_{}.csv'.format(csv_folder,i))
index = os.listdir(pathaddress + "/csv")
return index
pdf_table_reader("replace here with your pdffile path.") | sandyiswell/covid19Kerala | pdf_tabledata_into_csv.py | pdf_tabledata_into_csv.py | py | 1,215 | python | en | code | 5 | github-code | 13 |
21690505687 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mandats', '__first__'),
]
operations = [
migrations.CreateModel(
name='VueFederation',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('numero_de_federation', models.CharField(max_length=255, help_text="C'est le numéro de la fédé, par exemple '78'.")),
('federation', models.ForeignKey(to='mandats.Institution')),
('titres', models.ManyToManyField(help_text="Ce sont les titres qui permettent d'accéder aux données, par exemple 'secrétaire' et 'président'.", to='mandats.Titre')),
],
options={
'permissions': (('gere_les_mandats', 'gère les mandats'),),
},
),
]
| Brachamul/elan-democrate | datascope/migrations/0001_initial.py | 0001_initial.py | py | 979 | python | fr | code | 2 | github-code | 13 |
21264105816 | from collections import defaultdict
import heapq
from typing import List
class Solution:
def build_adjList(self, edges, succProb):
# undirect graph
adjList = defaultdict(list)
for i in range(len(edges)):
u, v, prob = edges[i][0], edges[i][1], succProb[i]
adjList[u].append((v, prob))
adjList[v].append((u, prob))
return adjList
def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:
adjList = self.build_adjList(edges, succProb)
max_prob = [0 for i in range(n)]
max_prob[start] = 1
# probability, vertex index
max_heap = [(-max_prob[start], start)]
while max_heap:
u_prob, u = heapq.heappop(max_heap)
u_prob = -u_prob
if u == end:
return u_prob
for v, uv_prob in adjList[u]:
if max_prob[v] < u_prob * uv_prob:
max_prob[v] = u_prob * uv_prob
heapq.heappush(max_heap, (-max_prob[v], v))
return max_prob[end]
n = 5
edges = [[1,4],[2,4],[0,4],[0,3],[0,2],[2,3]]
succProb = [0.37,0.17,0.93,0.23,0.39,0.04]
start = 3
end = 4
s = Solution()
t = s.maxProbability(n, edges, succProb, start, end)
print(t) | sundaycat/Leetcode-Practice | solution/1514. path-with-maximum-probability.py | 1514. path-with-maximum-probability.py | py | 1,347 | python | en | code | 0 | github-code | 13 |
37995738928 | def addHSG2VertexReconstruction( d3pdalg,
quadruplet_key = "QuadrupletCandidates",
vertex_key = "QuadrupletVertexCandidates",
electron_target = "el_",
muid_target = "mu_muid_",
staco_target = "mu_staco_",
calo_target = "mu_calo_",
muon_target = "mu_muon_" ):
# Add information about the vertices separately:
from TrackD3PDMaker.VertexD3PDObject import PrimaryVertexD3PDObject
d3pdalg += PrimaryVertexD3PDObject( 1, sgkey = vertex_key,
prefix = "quad_vertex_" )
# Add the information about the quadruplets:
from HiggsD3PDMaker.HSG2QuadrupletD3PDObject import HSG2QuadrupletD3PDObject
d3pdalg += HSG2QuadrupletD3PDObject( 10, sgkey = quadruplet_key,
prefix = "quad_",
VertexIndex_target = "quad_vertex_",
ElectronIndex_target = electron_target,
MuonMuidIndex_target = muid_target,
MuonStacoIndex_target = staco_target,
MuonCaloIndex_target = calo_target,
MuonIndex_target = muon_target )
return
| rushioda/PIXELVALID_athena | athena/PhysicsAnalysis/D3PDMaker/HiggsD3PDMaker/python/HSG2VertexReconstruction.py | HSG2VertexReconstruction.py | py | 1,482 | python | en | code | 1 | github-code | 13 |
39814383480 | """Mass-balance models"""
# Built ins
# External libs
import numpy as np
import pandas as pd
import netCDF4
from scipy.interpolate import interp1d
from scipy import optimize as optimization
# Locals
import oggm.cfg as cfg
from oggm.cfg import SEC_IN_YEAR, SEC_IN_MONTH
from oggm.utils import (SuperclassMeta, lazy_property, floatyear_to_date,
date_to_floatyear, monthly_timeseries)
class MassBalanceModel(object, metaclass=SuperclassMeta):
"""Common logic for the mass balance models.
All mass-balance models should implement this interface.
"""
def __init__(self):
""" Initialize."""
self._temp_bias = 0
self.valid_bounds = None
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self._temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
self._temp_bias = value
def get_monthly_mb(self, heights, year=None):
"""Monthly mass-balance at given altitude(s) for a moment in time.
Units: [m s-1], or meters of ice per second
Note: `year` is optional because some simpler models have no time
component.
Parameters
----------
heights: ndarray
the atitudes at which the mass-balance will be computed
year: float, optional
the time (in the "hydrological floating year" convention)
Returns
-------
the mass-balance (same dim as `heights`) (units: [m s-1])
"""
raise NotImplementedError()
def get_annual_mb(self, heights, year=None):
"""Like `self.get_monthly_mb()`, but for annual MB.
For some simpler mass-balance models ``get_monthly_mb()` and
`get_annual_mb()`` can be equivalent.
Units: [m s-1], or meters of ice per second
Note: `year` is optional because some simpler models have no time
component.
Parameters
----------
heights: ndarray
the atitudes at which the mass-balance will be computed
year: float, optional
the time (in the "floating year" convention)
Returns
-------
the mass-balance (same dim as `heights`) (units: [m s-1])
"""
raise NotImplementedError()
def get_specific_mb(self, heights, widths, year=None):
"""Specific mb for this year and a specific glacier geometry.
Units: [mm w.e. yr-1], or millimeter water equivalent per year
Parameters
----------
heights: ndarray
the atitudes at which the mass-balance will be computed
widths: ndarray
the widths of the flowline (necessary for the weighted average)
year: float, optional
the time (in the "hydrological floating year" convention)
Returns
-------
the specific mass-balance (units: mm w.e. yr-1)
"""
if len(np.atleast_1d(year)) > 1:
out = [self.get_specific_mb(heights, widths, year=yr)
for yr in year]
return np.asarray(out)
mbs = self.get_annual_mb(heights, year=year) * SEC_IN_YEAR * cfg.RHO
return np.average(mbs, weights=widths)
def get_ela(self, year=None):
"""Compute the equilibrium line altitude for this year
Parameters
----------
year: float, optional
the time (in the "hydrological floating year" convention)
Returns
-------
the equilibrium line altitude (ELA, units: m)
"""
if len(np.atleast_1d(year)) > 1:
return np.asarray([self.get_ela(year=yr) for yr in year])
if self.valid_bounds is None:
raise ValueError('attribute `valid_bounds` needs to be '
'set for the ELA computation.')
# Check for invalid ELAs
b0, b1 = self.valid_bounds
if (np.any(~np.isfinite(self.get_annual_mb([b0, b1], year=year))) or
(self.get_annual_mb([b0], year=year)[0] > 0) or
(self.get_annual_mb([b1], year=year)[0] < 0)):
return np.NaN
def to_minimize(x):
o = self.get_annual_mb([x], year=year)[0] * SEC_IN_YEAR * cfg.RHO
return o
return optimization.brentq(to_minimize, *self.valid_bounds, xtol=0.1)
class LinearMassBalance(MassBalanceModel):
"""Constant mass-balance as a linear function of altitude.
The "temperature bias" doesn't makes much sense in this context, but we
implemented a simple empirical rule: + 1K -> ELA + 150 m
"""
def __init__(self, ela_h, grad=3., max_mb=None):
""" Initialize.
Parameters
----------
ela_h: float
Equilibrium line altitude (units: [m])
grad: float
Mass-balance gradient (unit: [mm w.e. yr-1 m-1])
max_mb: float
Cap the mass balance to a certain value (unit: [mm w.e. yr-1])
"""
super(LinearMassBalance, self).__init__()
self.valid_bounds = [-1e4, 2e4] # in m
self.orig_ela_h = ela_h
self.ela_h = ela_h
self.grad = grad
self.max_mb = max_mb
@MassBalanceModel.temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to change the ELA."""
self.ela_h = self.orig_ela_h + value * 150
self._temp_bias = value
def get_monthly_mb(self, heights, year=None):
mb = (np.asarray(heights) - self.ela_h) * self.grad
if self.max_mb is not None:
mb = mb.clip(None, self.max_mb)
return mb / SEC_IN_YEAR / cfg.RHO
def get_annual_mb(self, heights, year=None):
return self.get_monthly_mb(heights, year=year)
class PastMassBalance(MassBalanceModel):
"""Mass balance during the climate data period."""
def __init__(self, gdir, mu_star=None, bias=None, prcp_fac=None,
filename='climate_monthly', input_filesuffix=''):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mustar you want to use
(the default is to use the calibrated value)
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
prcp_fac : float, optional
set to the alternative value of the precipitation factor
you want to use (the default is to use the calibrated value)
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
"""
super(PastMassBalance, self).__init__()
self.valid_bounds = [-1e4, 2e4] # in m
if mu_star is None:
df = pd.read_csv(gdir.get_filepath('local_mustar'))
mu_star = df['mu_star'][0]
if bias is None:
if cfg.PARAMS['use_bias_for_run']:
df = pd.read_csv(gdir.get_filepath('local_mustar'))
bias = df['bias'][0]
else:
bias = 0.
if prcp_fac is None:
df = pd.read_csv(gdir.get_filepath('local_mustar'))
prcp_fac = df['prcp_fac'][0]
self.mu_star = mu_star
self.bias = bias
# Parameters
self.t_solid = cfg.PARAMS['temp_all_solid']
self.t_liq = cfg.PARAMS['temp_all_liq']
self.t_melt = cfg.PARAMS['temp_melt']
# Public attrs
self.temp_bias = 0.
# Read file
fpath = gdir.get_filepath(filename, filesuffix=input_filesuffix)
with netCDF4.Dataset(fpath, mode='r') as nc:
# time
time = nc.variables['time']
time = netCDF4.num2date(time[:], time.units)
ny, r = divmod(len(time), 12)
if r != 0:
raise ValueError('Climate data should be N full years')
# This is where we switch to hydro float year format
# Last year gives the tone of the hydro year
self.years = np.repeat(np.arange(time[-1].year-ny+1,
time[-1].year+1), 12)
self.months = np.tile(np.arange(1, 13), ny)
# Read timeseries
self.temp = nc.variables['temp'][:]
self.prcp = nc.variables['prcp'][:] * prcp_fac
self.grad = nc.variables['grad'][:]
self.ref_hgt = nc.ref_hgt
def get_monthly_climate(self, heights, year=None):
"""Monthly climate information at given heights.
Note that prcp is corrected with the precipitation factor.
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
y, m = floatyear_to_date(year)
pok = np.where((self.years == y) & (self.months == m))[0][0]
# Read timeseries
itemp = self.temp[pok] + self.temp_bias
iprcp = self.prcp[pok]
igrad = self.grad[pok]
# For each height pixel:
# Compute temp and tempformelt (temperature above melting threshold)
npix = len(heights)
temp = np.ones(npix) * itemp + igrad * (heights - self.ref_hgt)
tempformelt = temp - self.t_melt
tempformelt[:] = np.clip(tempformelt, 0, tempformelt.max())
# Compute solid precipitation from total precipitation
prcp = np.ones(npix) * iprcp
fac = 1 - (temp - self.t_solid) / (self.t_liq - self.t_solid)
prcpsol = prcp * np.clip(fac, 0, 1)
return temp, tempformelt, prcp, prcpsol
def get_monthly_mb(self, heights, year=None):
_, tmelt, _, prcpsol = self.get_monthly_climate(heights, year=year)
y, m = floatyear_to_date(year)
mb_month = prcpsol - self.mu_star * tmelt
mb_month -= self.bias * SEC_IN_MONTH / SEC_IN_YEAR
return mb_month / SEC_IN_MONTH / cfg.RHO
def get_annual_mb(self, heights, year=None):
year = np.floor(year)
pok = np.where(self.years == year)[0]
if len(pok) < 1:
raise ValueError('Year {} not in record'.format(int(year)))
# Read timeseries
itemp = self.temp[pok] + self.temp_bias
iprcp = self.prcp[pok]
igrad = self.grad[pok]
# For each height pixel:
# Compute temp and tempformelt (temperature above melting threshold)
heights = np.asarray(heights)
npix = len(heights)
grad_temp = np.atleast_2d(igrad).repeat(npix, 0)
grad_temp *= (heights.repeat(12).reshape(grad_temp.shape) -
self.ref_hgt)
temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp
temp2dformelt = temp2d - self.t_melt
temp2dformelt[:] = np.clip(temp2dformelt, 0, temp2dformelt.max())
# Compute solid precipitation from total precipitation
prcpsol = np.atleast_2d(iprcp).repeat(npix, 0)
fac = 1 - (temp2d - self.t_solid) / (self.t_liq - self.t_solid)
fac = np.clip(fac, 0, 1)
prcpsol *= fac
mb_annual = np.sum(prcpsol - self.mu_star * temp2dformelt, axis=1)
return (mb_annual - self.bias) / SEC_IN_YEAR / cfg.RHO
class ConstantMassBalance(MassBalanceModel):
"""Constant mass-balance during a chosen period.
This is useful for equilibrium experiments.
"""
def __init__(self, gdir, mu_star=None, bias=None, prcp_fac=None,
y0=None, halfsize=15):
"""Initialize
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mustar you want to use
(the default is to use the calibrated value)
bias : float, optional
set to the alternative value of the annual bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
prcp_fac : float, optional
set to the alternative value of the precipitation factor
you want to use (the default is to use the calibrated value)
y0 : int, optional, default: tstar
the year at the center of the period of interest. The default
is to use tstar as center.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
"""
super(ConstantMassBalance, self).__init__()
self.mbmod = PastMassBalance(gdir, mu_star=mu_star, bias=bias,
prcp_fac=prcp_fac)
if y0 is None:
df = pd.read_csv(gdir.get_filepath('local_mustar'))
y0 = df['t_star'][0]
# This is a quick'n dirty optimisation
try:
fls = gdir.read_pickle('model_flowlines')
h = []
for fl in fls:
# We use bed because of overdeepenings
h = np.append(h, fl.bed_h)
h = np.append(h, fl.surface_h)
zminmax = np.round([np.min(h)-50, np.max(h)+2000])
except FileNotFoundError:
# in case we don't have them
with netCDF4.Dataset(gdir.get_filepath('gridded_data')) as nc:
zminmax = [nc.min_h_dem-250, nc.max_h_dem+1500]
self.hbins = np.arange(*zminmax, step=10)
self.valid_bounds = self.hbins[[0, -1]]
self.y0 = y0
self.halfsize = halfsize
self.years = np.arange(y0-halfsize, y0+halfsize+1)
@MassBalanceModel.temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.temp_bias = value
self._temp_bias = value
@lazy_property
def interp_yr(self):
# annual MB
mb_on_h = self.hbins*0.
for yr in self.years:
mb_on_h += self.mbmod.get_annual_mb(self.hbins, year=yr)
return interp1d(self.hbins, mb_on_h / len(self.years))
@lazy_property
def interp_m(self):
# monthly MB
months = np.arange(12)+1
interp_m = []
for m in months:
mb_on_h = self.hbins*0.
for yr in self.years:
yr = date_to_floatyear(yr, m)
mb_on_h += self.mbmod.get_monthly_mb(self.hbins, year=yr)
interp_m.append(interp1d(self.hbins, mb_on_h / len(self.years)))
return interp_m
def get_climate(self, heights, year=None):
"""Average climate information at given heights.
Note that prcp is corrected with the precipitation factor.
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
yrs = monthly_timeseries(self.years[0], self.years[-1],
include_last_year=True)
heights = np.atleast_1d(heights)
nh = len(heights)
shape = (len(yrs), nh)
temp = np.zeros(shape)
tempformelt = np.zeros(shape)
prcp = np.zeros(shape)
prcpsol = np.zeros(shape)
for i, yr in enumerate(yrs):
t, tm, p, ps = self.mbmod.get_monthly_climate(heights, year=yr)
temp[i, :] = t
tempformelt[i, :] = tm
prcp[i, :] = p
prcpsol[i, :] = ps
# Note that we do not weight for number of days per month - bad
return (np.mean(temp, axis=0),
np.mean(tempformelt, axis=0) * 12,
np.mean(prcp, axis=0) * 12,
np.mean(prcpsol, axis=0) * 12)
def get_monthly_mb(self, heights, year=None):
yr, m = floatyear_to_date(year)
return self.interp_m[m-1](heights)
def get_annual_mb(self, heights, year=None):
return self.interp_yr(heights)
class RandomMassBalance(MassBalanceModel):
"""Random shuffle of all MB years within a given time period.
This is useful for finding a possible past glacier state or for sensitivity
experiments.
Note that this is going to be sensitive to extreme years in certain
periods, but it is by far more physically reasonable than other
approaches based on gaussian assumptions.
"""
def __init__(self, gdir, mu_star=None, bias=None, prcp_fac=None,
y0=None, halfsize=15, seed=None):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mustar you want to use
(the default is to use the calibrated value)
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
prcp_fac : float, optional
set to the alternative value of the precipitation factor
you want to use (the default is to use the calibrated value)
y0 : int, optional, default: tstar
the year at the center of the period of interest. The default
is to use tstar as center.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
seed : int, optional
Random seed used to initialize the pseudo-random number generator.
"""
super(RandomMassBalance, self).__init__()
self.valid_bounds = [-1e4, 2e4] # in m
self.mbmod = PastMassBalance(gdir, mu_star=mu_star, bias=bias,
prcp_fac=prcp_fac)
if y0 is None:
df = pd.read_csv(gdir.get_filepath('local_mustar'))
y0 = df['t_star'][0]
# Climate period
self.years = np.arange(y0-halfsize, y0+halfsize+1)
self.yr_range = (y0-halfsize, y0+halfsize+1)
self.ny = len(self.years)
# RandomState
self.rng = np.random.RandomState(seed)
self._state_yr = dict()
@MassBalanceModel.temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
self.mbmod.temp_bias = value
self._temp_bias = value
def get_state_yr(self, year=None):
"""For a given year, get the random year associated to it."""
year = int(year)
if year not in self._state_yr:
self._state_yr[year] = self.rng.randint(*self.yr_range)
return self._state_yr[year]
def get_monthly_mb(self, heights, year=None):
ryr, m = floatyear_to_date(year)
ryr = date_to_floatyear(self.get_state_yr(ryr), m)
return self.mbmod.get_monthly_mb(heights, year=ryr)
def get_annual_mb(self, heights, year=None):
ryr = self.get_state_yr(int(year))
return self.mbmod.get_annual_mb(heights, year=ryr)
| Chris35Wills/oggm | oggm/core/massbalance.py | massbalance.py | py | 19,317 | python | en | code | null | github-code | 13 |
39984916026 | from django.http import HttpResponse
import json
# pylint: disable=attribute-defined-outside-init
class JSONMixin(object):
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
self.request = request
self.args = args
self.kwargs = kwargs
# If the request wants JSON, return handler get_json or post_json, etc.
# Expect the method to return a dict that can be passed to json.dumps
if request.prefer_json and request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower() + '_json', handler)
json_data = handler(request, *args, **kwargs)
return HttpResponse(json.dumps(json_data), content_type='application/json')
return handler(request, *args, **kwargs)
| TelemarkAlpint/slingsby | slingsby/general/mixins.py | mixins.py | py | 1,200 | python | en | code | 3 | github-code | 13 |
23442005224 | import torch
import transforms as T
from pollination_model import get_model_instance_segmentation
from PIL import Image
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def main(path_to_image):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
num_classes=2
model = get_model_instance_segmentation(num_classes)
# move model to the right device
model.to(device)
model.load_state_dict(torch.load('weights.pth'))
model.eval()
im = Image.open(path_to_image)
transform = get_transform(train=False)
img,_ = transform(im,im)
with torch.no_grad():
prediction = model([img.to(device)])
Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy()).show()
masks = prediction[0]['masks']
n,_,_,_=masks.shape
x=[]
for i in range(0,n):
x.append(masks[i,0].mul(255).byte().cpu().numpy())
Image.fromarray(sum(x)).show()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Get image path')
parser.add_argument('--p', type=str,default='input.png',help='path to image')
args = parser.parse_args()
main(args.p) | a7med12345/Pollination_project | test.py | test.py | py | 1,315 | python | en | code | 1 | github-code | 13 |
786844052 | # To Plot and analyze different results obtained by the model
import os
import matplotlib.pyplot as plt
import pickle
import numpy as np
from utils import provide_shuffle_idx
from io_args import args
pkl_filename = args.gt_pkl_filename
gt_labels = np.squeeze(pickle.load(open(pkl_filename, 'rb')))
pred_labels = np.squeeze(pickle.load(open('results.pkl', 'rb')))
test_set_predictions = np.squeeze(pickle.load(open('test_results.pkl', 'rb')))
f = open('test.txt', 'w')
f2 = open('train.txt', 'w')
for elem in test_set_predictions:
f.write("%7f\n" % elem)
f.close()
new_labels = pred_labels.copy()
mov_avg_idx = 5
for idx in range(new_labels.shape[0]):
if idx > mov_avg_idx:
new_labels[idx] = (new_labels[idx] + np.sum(new_labels[idx-mov_avg_idx:idx])) / np.float(mov_avg_idx+1)
for elem in new_labels:
f2.write("%7f\n" % elem)
f2.close()
test_idx = provide_shuffle_idx(pred_labels.shape[0], ratio=0.75, data_mode='test')
test_gt = gt_labels[test_idx]
test_pred = pred_labels[test_idx]
new_test_pred = new_labels[test_idx]
print(np.mean((gt_labels - pred_labels)**2))
print(np.mean((gt_labels - new_labels)**2))
print(np.mean((test_pred - test_gt)**2))
print(np.mean((new_test_pred - test_gt)**2))
plt.figure(1)
# plt.hold()
plt.grid()
# plt.plot(pred_labels, c='b')
plt.plot(new_labels, c='g', label='Prediction')
plt.plot(gt_labels, c='r', label='Ground Truth')
plt.title('Results on Train+Val (extracted from a single video)')
plt.xlabel('Frame- ID')
plt.ylabel('Velocity of the Car')
plt.legend(loc = 'best')
plt.figure(2)
# plt.hold()
plt.grid()
plt.plot(test_set_predictions, c='b')
plt.show() | ashar6194/velo_from_video | rough_plots.py | rough_plots.py | py | 1,629 | python | en | code | 0 | github-code | 13 |
13317913063 | from typing import Callable, Union
import operator
Operand = Union[str, int]
Register = str
Position = int
Registers = dict[Register, int]
Modification = Callable[[Registers, Position, Operand, Operand], Position]
def modify_register(modification: Callable[[int, int], int]) -> Modification:
def apply_modification(registers: Registers, position: Position, register: str, value: Operand) -> Position:
registers[register] = modification(registers[register], eval_operand(value, registers))
return position + 1
return apply_modification
def eval_operand(operand: Operand, registers: Registers) -> int:
try:
return int(operand)
except ValueError:
return registers[operand]
def perform_jump(registers: Registers, position: Position, condition: Operand, offset: Operand, predicate: Callable[[int], bool]):
return position + (eval_operand(offset, registers) if predicate(eval_operand(condition, registers)) else 1)
def perform_jgz(registers: Registers, position: Position, condition: Operand, offset: Operand):
return perform_jump(registers, position, condition, offset, lambda c: c > 0)
def perform_jnz(registers: Registers, position: Position, condition: Operand, offset: Operand):
return perform_jump(registers, position, condition, offset, lambda c: c != 0)
handlers = {
'set': modify_register(lambda old, new: new),
'add': modify_register(operator.add),
'mul': modify_register(operator.mul),
'mod': modify_register(operator.mod),
'sub': modify_register(operator.sub),
'jgz': perform_jgz,
'jnz': perform_jnz
} | takemyoxygen/advent-of-code | 2017/common/registers.py | registers.py | py | 1,609 | python | en | code | 1 | github-code | 13 |
4923125156 | import numpy
#Global Variables
ciphertext = ""
keymatrix = []
plaintextmatrix = []
ciphertextmatrix = []
#Function to calculate multiplicative inverse
def multiplicativemodinverse(base):
for x in range(1, 26):
if (((base%26) * (x%26)) % 26 == 1):
return x
return -1
def getkeymatrix ():
k = -1;
#Get Key Matrix as User Input
for i in range (keysize):
temp = []
for j in range (keysize):
k = k + 1
element = int(keyfile[k])
temp.append (element)
keymatrix.append (temp)
print("Keymatrix :")
print (keymatrix)
#Function to Calculate Modular Multiplicative Inverse of Key Matrix
def matrixinverse() :
#Find the Multiplicative Inverse Modulo m of Determinant
detinverse = multiplicativemodinverse(determinant)
# print("detinverse :")
# print(detinverse)
#Find the Cofactor of Key Matrix
adjointmatrix = numpy.linalg.inv(keymatrix) * determinant
# cofactormatrix = cofactormatrix.astype(int)
# print("adjoint matrix :")
# print(adjointmatrix)
#Multiply Cofactor Matrix with Determinant Inverse
invkeymatrix = adjointmatrix * detinverse
# print(invkeymatrix)
invkeymatrix = numpy.mod(invkeymatrix, 26)
return(invkeymatrix)
#Function to Perform Encryption
def decryption ():
cipherarray = []
ptmatrix = []
for i in ciphertext:
cipherarray.append(int(ord(i))-65)
ciphertextmatrix = numpy.reshape(cipherarray,(-1, keysize))
# print("ciphertextmatrix :")
# print(ciphertextmatrix)
for i in ciphertextmatrix:
temprow = numpy.dot(inversekeymatrix, i)
ptmatrix.append(temprow)
# print("ptmatrix without mod :")
# print(ptmatrix)
ptmatrix = numpy.mod(ptmatrix,26)
# print("plaintextmatrix :")
# print(ptmatrix)
return(ptmatrix)
ctfilename = input("Enter name of cipher text input file: ")
inputFile = open(ctfilename, "r")
ciphertext = inputFile.read();
keysize = int (input ("Enter Size of Key Matrix (Order) : "))
#Function to Get Key Matrix
kmfile = input("Enter key matrix input file: ")
IF = open(kmfile,"r")
keyfile = IF.readlines();
#Get Plain Text as User Input
#ciphertext = input ("Enter Cipher Text : ")
ciphertext = ciphertext.replace(" ","")
ciphertext = ciphertext.upper()
#Get Size of Key Matrix
#keysize = int (input ("Enter Size of Key Matrix (Order) : "))
#Check if Dummy Character is Needed to Append at last of Plain Text
appendsize = len(ciphertext)%keysize
if appendsize != 0 :
appendsize = keysize - appendsize
# print(appendsize)
for i in range(appendsize):
ciphertext = ciphertext + "Z"
print("Ciphertext is :")
print(ciphertext)
#Call Function to Get Key Matrix
getkeymatrix()
#Check if Key Matrix is valid or not by calculating determinant
determinant = int(numpy.linalg.det (keymatrix))
#print("determinant :")
#print(determinant)
if determinant == 0:
print ("Key Matrix is invalid, please enter again")
getkeymatrix()
else:
print("Key Matrix is Valid")
inversekeymatrix = matrixinverse()
#print("inversekeymatrix :")
#print(inversekeymatrix)
print("Decrypting Cipher Text......")
plaintextmatrix = decryption()
stringarray = plaintextmatrix.ravel()
#print(stringarray)
plaintext = ""
for i in stringarray:
j = round(i + 65)
# print(j)
plaintext = plaintext + chr(j)
#Print the Output
print("Plain Text is :")
print(plaintext)
#====== END OF PROGRAM ========== | snutesh/Cryptography_and_Computer_Security | Assignment_1/HillCipher_Decryption2.py | HillCipher_Decryption2.py | py | 3,452 | python | en | code | 0 | github-code | 13 |
44000658211 | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 10 16:49:09 2016
@author: Zhian Wang
GWID: G33419803
Analyzing sereal data files by puting them into a dataframe,
compute the total births, seclect top 5 names, plot a graph, etc.
"""
import time
import pandas as pd
def getData():
"""
Reads multiple files and returns contents in a pandas dataframe.
Args:
None:
Requests for the name of the path for the files in the program
Returns:
a list with the file contents
"""
start_time = time.time()
#get path name, ending with /
pathname = input("Please provide the path for the name files ...")
# Create empty dataframe
dfAll=pd.DataFrame({'Name' : [],'Sex' : [],'Count' : [],'Year' : []})
print ('Started ...')
for year in range(1880,2016):
filename = 'yob'+str(year)+'.txt'
filepath = pathname + filename
# Read a new file into a dataframe
df = pd.read_csv(filepath, header=None)
df.columns = ['Name', 'Sex', 'Count']
df['Year'] = str(year)
dfAll = pd.concat([dfAll,df])
print('Done...')
print ('It took', round(time.time()-start_time,4), 'seconds to read all the data into a dataframe.')
return (dfAll)
#Part1
def q1(myDF):
"""
Compute total number of births for each year and provide a formatted printout of that
Args:
filename: the pandas dataframe with all data
Returns:
Nothing
"""
dfCount = myDF['Count'].groupby(myDF['Year']).sum()
s = '{:>5}'.format('Year')
s = s + '{:>10}'.format('Births')
print(s)
for myIndex, myValue in dfCount.iteritems():
s = '{:>5}'.format(myIndex)
s = s + '{:>10}'.format(str(int(myValue)))
print (s)
#Part2
def q2(myDF):
"""
Compute the total births each year (from 1990 to 2014) for males and females
and provide a plot for that.
Args:
filename: the pandas dataframe with all data
Returns:
Nothing
"""
import matplotlib # import the libraries to plot
matplotlib.style.use('ggplot') # set the plot style to ggplot type
# Exceute the condition provided in the assignment
dfSubset = myDF[ (myDF['Year'] >= '1990') & (myDF['Year'] <= '2014') ]
# Subset by sex and sum the variable of interest
dfCountBySex = dfSubset['Count'].groupby(dfSubset['Sex']).sum()
# dfCountBySex # Display the data frame
dfCountBySex.plot.bar() # Draw the bar plot
# Part3
def q3(myDF):
"""
Print the top 5 names for each year starting 1950.
Args:
filename: the pandas dataframe with all data
Returns:
Nothing
"""
# Prepare header
s = ''
s = '{:>5}'.format('Year')
s = s + '{:>10}'.format('Name 1')
s = s + '{:>10}'.format('Name 2')
s = s + '{:>10}'.format('Name 3')
s = s + '{:>10}'.format('Name 4')
s = s + '{:>10}'.format('Name 5')
# Print header
print (s)
# Now go through all the years for the report
for i in range(1950,1954):
fn = myDF[(myDF['Year'] == str(i))] # Create a data frame for a matching year
fn = fn.sort_values('Count', ascending=False).head(5) # Sort by count and retain the top five rows
s = ''
s = s = s + '{:>5}'.format(str(i))
# Now iterate through the data frame with five records
for idx, row in fn.iterrows():
s = s + '{:>10}'.format(row["Name"])
print(s)
# Part4
def q4(dfAll):
"""
Find the top 3 female and top 3 male names for years 2010 and up
and plot the frequency by gender.
Args:
filename: the pandas dataframe with all data
Returns:
Nothing
"""
# Prepare header
s = ''
s = '{:>5}'.format('Year')
s = s + '{:>10}'.format('Female1')
s = s + '{:>10}'.format('Female2')
s = s + '{:>10}'.format('Female3')
s = s + '{:>10}'.format('Male1')
s = s + '{:>10}'.format('Male2')
s = s + '{:>10}'.format('Male3')
# Print header
print (s)
# creat a dataframe for concatanating the data of each year
totalgraph=pd.DataFrame({'Count' : [],'Name' : [],'Sex' : [],'Year': []})
# Now go through all the years for the report
for i in range(2010,2016):
#rearrange the data by year and sex
fnFemale = dfAll[(dfAll['Year'] == str(i)) & (dfAll['Sex'] == 'F')]
fnMale = dfAll[(dfAll['Year'] == str(i)) & (dfAll['Sex'] == 'M')]
# sort by Count and retain the top three
fn1 = fnFemale.sort_values('Count', ascending=False).head(3)
fn2 = fnMale.sort_values('Count', ascending=False).head(3)
# concatanate the female data and male data
FandM = pd.concat([fn1,fn2])
#concatanate the data of each year in the new blank dataframe
totalgraph = pd.concat([totalgraph,FandM])
#print the table
s = ''
s = s + '{:>5}'.format(str(i))
for idx, row in FandM.iterrows():
s = s + '{:>10}'.format(row["Name"])
print(s)
import matplotlib # import the libraries to plot
matplotlib.style.use('ggplot') # set the plot style to ggplot type
dfCountBySex = totalgraph['Count'].groupby(totalgraph['Sex']).sum()
# dfCountBySex # Display the data frame
dfCountBySex.plot.bar() # Draw the bar plot
# Part5
def q5(dfAll):
"""
Plot the trend of the Names'John','Harry','Mary'and'Marilyn' over all of the years of the data set.
a. Stack 4 plots one over the other
b. Plot all four trends in one plot
Args:
filename: the pandas dataframe with all data
Returns:
Nothing
"""
#extract the name accordingly
FnJ = dfAll[(dfAll['Name'] == 'John')]
FnJ = FnJ['Count'].groupby(FnJ['Year']).sum()
FnH = dfAll[(dfAll['Name'] == 'Harry')]
FnH = FnH['Count'].groupby(FnH['Year']).sum()
FnM = dfAll[(dfAll['Name'] == 'Mary')]
FnM = FnM['Count'].groupby(FnM['Year']).sum()
FnMln = dfAll[(dfAll['Name'] == 'Marilyn')]
FnMln = FnMln['Count'].groupby(FnMln['Year']).sum()
import matplotlib.pyplot as plt
#draw the plot using the data provided above
plt.figure(1)
#define the x and y axis limit
#plt.xlim([1880,2020])
#plt.ylim([0,90000])
FnJ.plot().text(110,80000,'---John',color='r',fontsize=15)
plt.figure(2)
FnH.plot().text(110,9000,'---Harry',color='r',fontsize=15)
plt.figure(3)
FnM.plot().text(110,70000,'---Mary',color='r',fontsize=15)
plt.figure(4)
FnMln.plot().text(90,10500,'---Marilyn',color='r',fontsize=15)
#plot all trends in one plot
plt.figure(5)
plt.plot(FnJ,'r')
#add the title of each line to clarify
plt.text(1995,80000,'---John',color='r',fontsize=12)
plt.plot(FnH,'b')
plt.text(1995,74000,'---Harry',color='b',fontsize=12)
plt.plot(FnM,'k')
plt.text(1995,68000,'---Mary',color='k',fontsize=12)
plt.plot(FnMln,'g')
plt.text(1995,62000,'---Marilyn',color='g',fontsize=12)
# Part6
def q6(dfAll):
"""
Find the ten names that have showen the greatest variation over the years. Plot this.
Args:
filename: the pandas dataframe with all data
Returns:
Nothing
"""
# Exceute the condition in the assignment
dfCountByName = dfAll.groupby(['Name','Year']).sum()
t = dfCountByName.reset_index()
#compute the variation
variation = t.groupby('Name').var()
#sort by Count and retain the top ten
topten = variation.sort_values('Count', ascending=False).head(10)
import matplotlib.pyplot as plt
plt.figure(1)
topten.plot.bar()
| zhianwang/DNSC-6211-Programming_for_Business_Analytics | Assignment1/A01_G33419803.py | A01_G33419803.py | py | 7,934 | python | en | code | 0 | github-code | 13 |
43006383296 | from copy import copy
def merge_sort(numbers):
copy_numbers = copy(numbers)
swap_count = m_sort(copy_numbers, [None] * len(numbers), 0, len(numbers)-1)
print (swap_count)
return copy_numbers
def m_sort(numbers, temp, left, right):
if left >= right:
temp[left] = numbers[left]
return 0
mid = (right+left)//2
swap_count1 = m_sort(numbers, temp, left, mid)
swap_count2 = m_sort(numbers, temp, mid+1, right)
swap_count3 = merge(numbers, temp, left, mid+1, right)
return swap_count1 + swap_count2 + swap_count3
def merge(numbers, temp, left, mid, right):
left_end = mid - 1
tmp_pos = left
num_elements = right - left + 1
swap_count = 0
i, j = 0, 0
while(left <= left_end and mid <= right):
if numbers[left] <= numbers[mid]:
temp[tmp_pos] = numbers[left]
tmp_pos += 1
left += 1
swap_count += j
else:
j += 1
temp[tmp_pos] = numbers[mid]
tmp_pos += 1
mid += 1
if left <= left_end:
temp[tmp_pos: tmp_pos + left_end - left] = numbers[left: left_end + 1]
tmp_pos += (left_end - left)
if mid <= right:
temp[tmp_pos: tmp_pos + right - mid] = numbers[mid: right + 1]
numbers[right - num_elements + 1: right + 1] = temp[right - num_elements + 1: right + 1]
return swap_count
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
numbers = list(map(int, sys.argv[1:]))
sorted_list = merge_sort(numbers)
print(sorted_list)
else:
numbers = input("Please enter space seperated numbers: ")
numbers = list(map(int, numbers.strip().split(' ')))
sorted_list = merge_sort(numbers)
print (sorted_list) | atiq1589/algorithms | python/merge_sort.py | merge_sort.py | py | 1,991 | python | en | code | 0 | github-code | 13 |
20101425942 | import os
import shutil
from PIL import Image, ImageStat
import PIL
import glob
import hashlib
def validate_images(input_dir: str, output_dir: str, log_file: str, formatter: str = "07d"):
log_file = log_file+".txt"
input_dir = os.path.abspath(input_dir)
if not os.path.isdir(input_dir):
raise ValueError(f"'{input_dir}' must be an existing directory")
# create a new folder "output_dir" if it does not exist
os.makedirs(output_dir, exist_ok=True)
# to clear the old log_file
f = open(log_file, 'w')
f.close()
# find all files in beginning at the given input
found_files = glob.glob(input_dir + "\\**\\*.*", recursive=True)
found_files.sort()
copied_files = []
num_copiedfiles = 0
for imag_path in found_files:
imag_size = os.path.getsize(imag_path) # get image size
head, tail = os.path.split(imag_path)
file_name, ext = os.path.splitext(tail)
file_basename = os.path.basename(imag_path)
try:
with Image.open(imag_path) as im:
no_image = False
check_4 = False
# check the couler scale
if im.mode != "RGB" and (im.size[0] < 100 or im.size[1] < 100):
check_4 = True
# Varianz variante
stat = ImageStat.Stat(im)
variance = stat.var[0]
# create Hashcode
hasher = hashlib.sha256()
data = im.tobytes()
hasher.update(data)
img_hash = hasher.hexdigest()
except PIL.UnidentifiedImageError as ex:
no_image = True
# Create new filename and the new path
formats = "{:" + formatter + "}"
new_name = formats.format(num_copiedfiles) + ".jpg" # .jpg for all copied files
new_name_path = os.path.join(output_dir, new_name)
if ext not in {".jpg", ".JPG", ".jpeg", ".JPEG"}: # 1.
print("Error 1: wrong file attribute")
with open(log_file, 'a') as f:
f.write(f"{file_basename},1\n")
elif imag_size > 250000: # 2.
print("Error 2: file is to big")
with open(log_file, 'a') as f:
f.write(f"{file_basename},2\n")
elif no_image: # 3.
print("Error 3: this file is not read as image")
with open(log_file, 'a') as f:
f.write(f"{file_basename},3\n")
elif check_4: # 4.
print("Error 4: less Pixel size or wrong colour scale")
with open(log_file, 'a') as f:
f.write(f"{file_basename},4\n")
elif variance <= 0: # 5.
print("ERROR 5: The Pixelvariance is 0")
with open(log_file, 'a') as f:
f.write(f"{file_basename},5\n")
elif os.path.exists(new_name_path) or img_hash in copied_files: # 6.
print("ERROR 6: the file already exist")
with open(log_file, 'a') as f:
f.write(f"{file_basename},6\n")
else: # Copy files
shutil.copy(imag_path, output_dir)
os.rename(os.path.join(output_dir, tail), os.path.join(output_dir, new_name))
copied_files.append(img_hash)
num_copiedfiles += 1
return num_copiedfiles
if __name__ == "__main__":
path_in = os.path.join(os.getcwd(), "Test_errors")
path_out = os.path.join(os.getcwd(), "TestOut")
print(validate_images(path_in, path_out, "log_file", "03d"))
| FloGr1234/Python_II | Unit_1/a1_ex2.py | a1_ex2.py | py | 3,535 | python | en | code | 0 | github-code | 13 |
16013147700 | from PyOpenGL.line import LineDDA, LineBres
# from PyOpenGL.curve import Circle, Ellipse
if __name__ == '__main__':
xa, ya, xb, yb = tuple(map(int, input('Enter 2 end points: ').strip().split()))
# lineDDA = LineDDA(xa, ya, xb, yb)
# lineDDA.draw()
lineBres = LineBres(xa, ya, xb, yb)
lineBres.draw()
# x_center, y_center, radius = tuple(map(int, input('Enter center and radius: ').strip().split()))
# circle = Circle(x_center, y_center, radius)
# circle.draw()
# rad_x, rad_y, x_center, y_center = tuple(map(int, input('Enter ellipse param: ').strip().split()))
# ellipse = Ellipse(rad_x, rad_y, x_center, y_center)
# ellipse.draw()
| sagar-spkt/Learning | main.py | main.py | py | 680 | python | en | code | 0 | github-code | 13 |
31154391159 | from rest_framework import viewsets, response, status
from trackangle.place.api.v1.serializers import PlaceSerializer, CommentSerializer, BudgetSerializer, RatingSerializer
from trackangle.route.api.v1.serializers import RouteSerializer
from trackangle.route.models import RouteHasPlaces
from trackangle.place.models import Place, Comment, Budget, Rating
from django.db import IntegrityError, transaction
from rest_framework.decorators import detail_route,list_route
from rest_framework.permissions import IsAuthenticated
class PlaceViewSet(viewsets.ModelViewSet):
lookup_field = 'id'
serializer_class = PlaceSerializer
queryset = Place.objects.all()
# def get_permissions(self):
# return (True,)
def get_serializer_context(self):
return {'request': self.request}
def list(self, request, *args, **kwargs):
serializer = self.serializer_class(self.queryset, many=True)
return response.Response(serializer.data)
def retrieve(self, request, id):
data = None
try:
place = Place.objects.get(pk=id)
serializer = self.serializer_class(place)
data = serializer.data
except:
print("Place does not exist")
return response.Response(data)
@detail_route(methods=['get'])
def get_routes(self, request, id=None, *args, **kwargs):
place = Place.objects.get(pk=id)
route_has_places = RouteHasPlaces.objects.filter(place=place)
print(len(route_has_places))
routes = []
for rhp in route_has_places:
routes.append(rhp.route)
serializer = RouteSerializer(routes, many=True)
return response.Response(serializer.data)
@detail_route(methods=['post'], permission_classes=[IsAuthenticated])
def set_comment(self, request, id=None, *args, **kwargs):
serializer = CommentSerializer(data=request.data)
if serializer.is_valid():
text = serializer.validated_data.pop('text')
comment, created = Comment.objects.get_or_create(place_id=id, author=request.user, defaults={"text": text})
comment.text = text
comment.save()
content = {"id": comment.id}
return response.Response(content, status=status.HTTP_201_CREATED)
return response.Response(status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['post'], permission_classes=[IsAuthenticated])
def set_budget(self, request, id=None, *args, **kwargs):
serializer = BudgetSerializer(data=request.data)
if serializer.is_valid():
budgetObj = serializer.validated_data.pop('budget')
budget, created = Budget.objects.get_or_create(owner=request.user, place_id = id, defaults={"budget":budgetObj})
budget.budget = budgetObj
budget.save()
content = {"id": budget.id}
return response.Response(content, status=status.HTTP_201_CREATED)
return response.Response(status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['post'], permission_classes=[IsAuthenticated])
def set_rating(self, request, id=None, *args, **kwargs):
serializer = RatingSerializer(data=request.data)
if serializer.is_valid():
rate = serializer.validated_data.pop('rate')
rating, created = Rating.objects.get_or_create(rater=request.user, place_id = id, defaults={"rate":rate})
rating.rate = rate
rating.save()
content = {"id": rating.id}
return response.Response(content, status=status.HTTP_201_CREATED)
return response.Response(status=status.HTTP_400_BAD_REQUEST)
| trackangle/trackangle-angular | trackangle/place/api/v1/views.py | views.py | py | 3,687 | python | en | code | 0 | github-code | 13 |
26062065223 | from typing import Dict, Tuple
import pytest
import torch
from torch import nn
import merlin.models.torch as mm
from merlin.models.torch import link
from merlin.models.torch.batch import Batch
from merlin.models.torch.block import Block, ParallelBlock, get_pre, set_pre
from merlin.models.torch.container import BlockContainer, BlockContainerDict
from merlin.models.torch.utils import module_utils
from merlin.schema import Tags
class PlusOne(nn.Module):
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return inputs + 1
class PlusOneDict(nn.Module):
def forward(self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
return {k: v + 1 for k, v in inputs.items()}
class PlusOneTuple(nn.Module):
def forward(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return inputs + 1, inputs + 1
class TestBlock:
def test_identity(self):
block = Block()
inputs = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
outputs = module_utils.module_test(block, inputs, batch=Batch(inputs))
assert torch.equal(inputs, outputs)
assert mm.schema.output(block) == mm.schema.output.tensors(inputs)
def test_insertion(self):
block = Block()
block.prepend(PlusOne())
block.append(PlusOne())
inputs = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
outputs = module_utils.module_test(block, inputs, batch=Batch(inputs))
assert torch.equal(outputs, inputs + 2)
block.append(PlusOne(), link="residual")
assert isinstance(block[-1], link.Residual)
def test_copy(self):
block = Block(PlusOne())
copied = block.copy()
assert isinstance(copied, Block)
assert isinstance(copied[0], PlusOne)
assert copied != block
copied.some_attribute = "new value"
assert not hasattr(block, "some_attribute")
def test_repeat(self):
block = Block(PlusOne())
assert isinstance(block.repeat(2), Block)
assert len(block.repeat(2)) == 2
with pytest.raises(TypeError, match="n must be an integer"):
block.repeat("invalid_input")
with pytest.raises(ValueError, match="n must be greater than 0"):
block.repeat(0)
def test_repeat_with_link(self):
block = Block(PlusOne())
repeated = block.repeat(2, link="residual")
assert isinstance(repeated, Block)
assert len(repeated) == 2
assert isinstance(repeated[-1], link.Residual)
inputs = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
outputs = module_utils.module_test(repeated, inputs)
assert torch.equal(outputs, (inputs + 1) + (inputs + 1) + 1)
def test_from_registry(self):
@Block.registry.register("my_block")
class MyBlock(Block):
def forward(self, inputs):
_inputs = inputs + 1
return super().forward(_inputs)
block = Block.parse("my_block")
assert block.__class__ == MyBlock
inputs = torch.randn(1, 3)
assert torch.equal(block(inputs), inputs + 1)
class TestParallelBlock:
def test_init(self):
pb = ParallelBlock({"test": PlusOne()})
assert isinstance(pb, ParallelBlock)
assert isinstance(pb.pre, BlockContainer)
assert isinstance(pb.branches, BlockContainerDict)
assert isinstance(pb.post, BlockContainer)
assert pb.__repr__().startswith("ParallelBlock")
def test_init_list_of_dict(self):
pb = ParallelBlock(({"test": PlusOne()}))
assert len(pb) == 1
assert "test" in pb
def test_forward(self):
inputs = torch.randn(1, 3)
pb = ParallelBlock({"test": PlusOne()})
outputs = module_utils.module_test(pb, inputs)
assert isinstance(outputs, dict)
assert "test" in outputs
def test_forward_dict(self):
inputs = {"a": torch.randn(1, 3)}
pb = ParallelBlock({"test": PlusOneDict()})
outputs = module_utils.module_test(pb, inputs)
assert isinstance(outputs, dict)
assert "a" in outputs
def test_forward_dict_duplicate(self):
inputs = {"a": torch.randn(1, 3)}
pb = ParallelBlock({"1": PlusOneDict(), "2": PlusOneDict()})
with pytest.raises(RuntimeError):
pb(inputs)
def test_forward_tensor_duplicate(self):
class PlusOneKey(nn.Module):
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
return inputs["2"] + 1
pb = ParallelBlock({"1": PlusOneDict(), "2": PlusOneKey()})
inputs = {"2": torch.randn(1, 3)}
with pytest.raises(RuntimeError):
pb(inputs)
def test_schema_tracking(self):
pb = ParallelBlock({"a": PlusOne(), "b": PlusOne()})
inputs = torch.randn(1, 3)
outputs = mm.schema.trace(pb, inputs)
schema = mm.schema.output(pb)
for name in outputs:
assert name in schema.column_names
assert schema[name].dtype.name == str(outputs[name].dtype).split(".")[-1]
assert len(schema.select_by_tag(Tags.EMBEDDING)) == 2
def test_forward_tuple(self):
inputs = torch.randn(1, 3)
pb = ParallelBlock({"test": PlusOneTuple()})
with pytest.raises(RuntimeError):
module_utils.module_test(pb, inputs)
def test_append(self):
module = PlusOneDict()
pb = ParallelBlock({"test": PlusOne()})
pb.append(module)
assert len(pb.post._modules) == 1
assert pb[-1][0] == module
assert pb[2][0] == module
repr = pb.__repr__()
assert "(post):" in repr
module_utils.module_test(pb, torch.randn(1, 3))
def test_prepend(self):
module = PlusOne()
pb = ParallelBlock({"test": module})
pb.prepend(module)
assert len(pb.pre._modules) == 1
assert pb[0][0] == module
repr = pb.__repr__()
assert "(pre):" in repr
module_utils.module_test(pb, torch.randn(1, 3))
def test_append_to(self):
module = nn.Module()
pb = ParallelBlock({"test": module})
pb.append_to("test", module)
assert len(pb["test"]) == 2
def test_prepend_to(self):
module = nn.Module()
pb = ParallelBlock({"test": module})
pb.prepend_to("test", module)
assert len(pb["test"]) == 2
def test_append_for_each(self):
module = nn.Module()
pb = ParallelBlock({"a": module, "b": module})
pb.append_for_each(module)
assert len(pb["a"]) == 2
assert len(pb["b"]) == 2
assert pb["a"][-1] != pb["b"][-1]
pb.append_for_each(module, shared=True)
assert len(pb["a"]) == 3
assert len(pb["b"]) == 3
assert pb["a"][-1] == pb["b"][-1]
def test_prepend_for_each(self):
module = nn.Module()
pb = ParallelBlock({"a": module, "b": module})
pb.prepend_for_each(module)
assert len(pb["a"]) == 2
assert len(pb["b"]) == 2
assert pb["a"][0] != pb["b"][0]
pb.prepend_for_each(module, shared=True)
assert len(pb["a"]) == 3
assert len(pb["b"]) == 3
assert pb["a"][0] == pb["b"][0]
def test_getitem(self):
module = nn.Module()
pb = ParallelBlock({"test": module})
assert isinstance(pb["test"], BlockContainer)
with pytest.raises(IndexError):
pb["invalid_key"]
def test_set_pre(self):
pb = ParallelBlock({"a": PlusOne(), "b": PlusOne()})
set_pre(pb, PlusOne())
assert len(pb.pre) == 1
block = Block(pb)
assert not get_pre(Block())
set_pre(block, PlusOne())
assert len(get_pre(block)) == 1
def test_input_schema_pre(self):
pb = ParallelBlock({"a": PlusOne(), "b": PlusOne()})
outputs = mm.schema.trace(pb, torch.randn(1, 3))
input_schema = mm.schema.input(pb)
assert len(input_schema) == 1
assert len(mm.schema.output(pb)) == 2
assert len(outputs) == 2
pb2 = ParallelBlock({"a": PlusOne(), "b": PlusOne()})
assert not get_pre(pb2)
pb2.prepend(pb)
assert not get_pre(pb2) == pb
assert get_pre(pb2)[0] == pb
pb2.append(pb)
assert input_schema == mm.schema.input(pb2)
assert mm.schema.output(pb2) == mm.schema.output(pb)
| EJHortala/models-1 | tests/unit/torch/test_block.py | test_block.py | py | 8,439 | python | en | code | null | github-code | 13 |
6791800798 | # ======================================================================================================================
# =========================== Définit et stocke les informations des environnements
class Biome:
def __init__(
self,
biome_id: int,
name: str,
mobs: list,
turn: int,
events: list
):
self.biome_id = biome_id
self.name = name
self.mobs = mobs
self.turn = turn
self.events = events
biome_village = Biome(
biome_id=0,
name=l10n.biome.base_village.name,
mobs=[],
turn=-1,
events=[event_biome_neutral]
)
biome_forest = Biome(
biome_id=1,
name=l10n.biome.forest.name,
mobs=[npc_slime, npc_bat, npc_goblin],
turn=0,
events=[event_enigmatic_guy, event_biome_neutral, event_bear_attack]
)
biome_caves = Biome(
biome_id=2,
name=l10n.biome.caves.name,
mobs=[npc_rat, npc_smol_goblin, npc_bat, npc_hobgoblin],
turn=10,
events=[event_biome_neutral]
)
biome_dumeors_den = Biome(
biome_id=3,
name=l10n.biome.dumeors_den.name,
mobs=[npc_dumeors],
turn=20,
events=[event_biome_neutral]
)
# Définit une liste de tous les biomes
biome_list = [biome_village, biome_forest, biome_caves, biome_dumeors_den]
# Définit un dictionnaire avec les biomes par ordre d'apparence
biome_dict = dict(sorted({biome.turn: biome for biome in biome_list}.items(), reverse=True))
| Dinoxel/tobias_game | old_code/game_data.py | game_data.py | py | 1,487 | python | fr | code | 0 | github-code | 13 |
35806175962 | from Point2D import *
from math import sin, cos
from bazier import vec2d
class Missile:
def __init__(self,x ,y, rad, player):
self.position = vec2d(x, y)
self.rad = rad
self.player = player
def move(self, x, y):
self.position.y += y
self.position.x += x
def update(self):
dx = cos(self.rad) * 6
dy = -sin(self.rad) * 6
self.move(dx, dy)
| AlexVestin/GameJam | Missile.py | Missile.py | py | 426 | python | en | code | 0 | github-code | 13 |
7828564566 | '''
===============================================================================
-- Author: Hamid Doostmohammadi, Azadeh Nazemi
-- Create date: 28/10/2020
-- Description: This code is for skewing or deskewing using perspective
transform based on having 4 coordinate values to address them.
================================================================================
'''
import numpy as np
import cv2
import imutils
import sys
import os
def order_points(pts):
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def four_point_transform(image, pts):
rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
# this part is for skewing the image based on the value of m or n. If m!=0 and n=0 then image will be skewed toward left or right. If n!=0 and m=0 then image will be skewed toward top and bottom. Please modify m and n in line 71 and 72 accordingly.
def trans(image, m, n):
h, w = image.shape[:2]
# Arguments m ,n are highly depndent on size and rotation of image and should be modified accordingly
pts = np.array([(0, n), (w, n), (w-m, h), (m, h-n)], dtype="float32")
warped = four_point_transform(image, pts)
return warped
fileMode = "jpg"
for root, dirs, files in os.walk(sys.argv[1]):
for filename in files:
ext = filename[filename.rfind("."):].lower()
fn = os.path.join(root, filename)
imagePath = fn
image = cv2.imread(imagePath)
(h, w) = image.shape[:2]
# you can resize your image in line 70 if you need to.
# image = cv2.resize(image, (int(w/10), int(h/10)))
m = -90
n = 0
(h, w) = image.shape[:2]
# Arguments m ,n should be modified
warped = trans(image, m, n)
cv2.imwrite(filename, warped)
| HamidDoost/basic-image-processing-concepts | skewOrDeskewTransform.py | skewOrDeskewTransform.py | py | 2,720 | python | en | code | 0 | github-code | 13 |
72106319699 | ## return two primes a and b whose sum is equal to given even number
def get_primes(num):
lp = [0]*(num+1) ## to store least prime divisors
primes = []
for val in range(2,num+1):
if not lp[val]:
lp[val] = val ## least divisor of prime is the number itself (ignoring 1)
primes.append(val)
for j in primes:
if val*j <= num:
lp[j*val] = j ## all the multiples of these primes are not prime
return primes,lp
def main():
t = int(input())
for _ in range(t):
num = int(input())
primes,lp = get_primes(num)
for p in primes:
q = num-p
if lp[q]==q:
print('{} {}'.format(p,q))
break
if __name__ == '__main__':
main() | JARVVVIS/ds_algo_practice | gfg/goldbach.py | goldbach.py | py | 787 | python | en | code | 0 | github-code | 13 |
74638433296 | #Importing
from selenium import webdriver
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import time
import pandas as pd
import requests
import csv
#Assigning the value of the constant
START_URL = "https://en.wikipedia.org/wiki/List_of_brightest_stars_and_other_record_stars"
browser = webdriver.Chrome("/Users/apoorvelous/Downloads/chromedriver")
browser.get(START_URL)
time.sleep(11)
# function definition to identify specific part of the given webpage to extract data from
def scrape():
headers = ["Name", "Distance", "Mass", "Radius"]
#Creating an empty list
starData = []
#Running a nested loop
for i in range(0, 97):
soup = BeautifulSoup(browser.page_source, "html.parser")
for ulTag in soup.find_all("ul", attrs = {"class", "exoplanet"}):
liTags = ulTag.find_all("li")
tempList = []
for index, liTag in enumerate(liTags):
if index == 0:
tempList.append(liTag.find_all("a")[0].contents[0])
else:
try:
tempList.append(liTag.contents[0])
except:
tempList.append("")
starData.append(tempList)
browser.find_element_by_xpath('//*[@id="primary_column"]/footer/div/div/div/nav/span[2]/a').click()
with open("scrapper_2.csv", "w") as f:
csvwriter = csv.writer(f)
csvwriter.writerow(headers)
csvwriter.writerows(starData)
#function call
scrape()
newStarData = []
def scrapMoreData(hyperlink):
try:
page = requests.get(hyperlink)
soup = BeautifulSoup(page.content, "html.parser")
tempList = []
for tr_tag in soup.find_all("tr", attrs={"class": "fact_row"}):
tdTags = tr_tag.find_all("td")
for tdTag in tdTags:
try:
tempList.append(tdTag.find_all("div", attrs = {"class" : "value"})[0].contents[0])
except:
tempList.append("")
newStarData.append(tempList)
except:
time.sleep(1)
scrapMoreData(hyperlink)
for index, data in enumerate(starData):
scrapMoreData(data[5])
print(f"scraping at hyperlink {index+1} is completed.")
# [start, stop]
print(newStarData[0:10])
final_star_data = []
for index, data in enumerate(starData):
new_star_data_element = newStarData[index]
new_star_data_element = [elem.replace("\n", "") for elem in new_star_data_element]
# From the start to the 7th element.
new_star_data_element = new_star_data_element[:7]
final_star_data.append(data + new_star_data_element)
with open("starData.csv", "w") as f:
csvwriter = csv.writer(f)
csvwriter.writerow(headers)
csvwriter.writerows(starData) | CodingAkshita/webscraping2 | scraper.py | scraper.py | py | 3,168 | python | en | code | 0 | github-code | 13 |
1338942154 | from socket import *
import findDog as FD
import dog_bowl as bowl
import user_setting as usr
from gpiozero import LED
from time import sleep
flag = False
_led = LED(17)
while True:
#안드로이드 앱과 통신
clientSocket = socket(AF_INET, SOCK_STREAM)
ADDR = (usr.Mobile,5050)
clientSocket.connect(ADDR)
print("connect") #socket connection check
_led.off()
if not flag :
data = clientSocket.recv(1024)
data = data.decode()
print(data)
if data=="1\n":
flag = True
print("Find!!")
past = data
#Find 명령 수행
data = FD.findDog(clientSocket)
elif data == "2\n":
flag = True
print("Check!!")
#먹이확인 명령 수행
result = bowl.remain_food_check()
if not result: # 판별 실패 시 에러를 방지하기 위해 사용
result = "Full"
if result:
print(result)
clientSocket.send(result.encode())
flag = False
elif data == "3\n":
flag = True
_led.on()
print("Aircon!!")
flag = False
clientSocket.close()
| god102104/oh_spaghetti | client_socket.py | client_socket.py | py | 1,055 | python | en | code | 0 | github-code | 13 |
65778793 | import numpy as np
import math
"""
以三硬币模型作为最简单的模拟
"""
class EM:
def __init__(self, prob):
self.pro_A, self.pro_B, self.pro_C = prob
# e_step
def pmf(self, i,data):
pro_1 = self.pro_A * math.pow(self.pro_B, data[i]) * math.pow((1 - self.pro_B), 1 - data[i])
pro_2 = (1 - self.pro_A) * math.pow(self.pro_C, data[i]) * math.pow((1 - self.pro_C), 1 - data[i])
return pro_1 / (pro_1 + pro_2)
# m_step
def fit(self, data):
count = len(data)
print('init prob:{}, {}, {}'.format(self.pro_A, self.pro_B, self.pro_C))
for d in range(count):
_ = yield
_pmf = [self.pmf(k,data) for k in range(count)] # 观测变量服从B分布的后验概率列表
## 更新
pro_A = 1 / count * sum(_pmf)
pro_B = sum([_pmf[k] * data[k] for k in range(count)]) / sum([_pmf[k] for k in range(count)])
pro_C = sum([(1 - _pmf[k]) * data[k] for k in range(count)]) / sum([(1 - _pmf[k]) for k in range(count)])
## 打印
print('{}/{} pro_a:{:.3f}, pro_b:{:.3f}, pro_c:{:.3f}'.format(d + 1, count, pro_A, pro_B, pro_C))
self.pro_A = pro_A
self.pro_B = pro_B
self.pro_C = pro_C
def main():
data = [1, 1, 0, 1, 0, 0, 1, 0, 1, 1]
em = EM(prob=[0.5, 0.5, 0.5])
f = em.fit(data)
next(f)
print(f.send(1))
print(f.send(2))
if __name__ == '__main__':
main()
| HitAgain/Machine-Learning-practice | EM/EM.py | EM.py | py | 1,487 | python | en | code | 2 | github-code | 13 |
2791633720 | #!/usr/bin/python3
import gi
from pathlib import Path
gi.require_version('Gtk', '3.0')
from gi.repository import GLib, Gtk
ROOT = Path( __file__ ).parent.absolute()
try:
gi.require_version('AyatanaAppIndicator3', '0.1')
from gi.repository import AyatanaAppIndicator3 as AppIndicator
except (ImportError, ValueError):
gi.require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3 as AppIndicator
class App:
def __init__(self) -> None:
self.indicator = AppIndicator.Indicator.new(
"testcase",
str(ROOT / "emblem-web.svg"),
AppIndicator.IndicatorCategory.APPLICATION_STATUS
)
self.indicator.set_attention_icon_full(
str(ROOT / "unread.png"),
"test"
)
self.indicator.set_status(AppIndicator.IndicatorStatus.ACTIVE)
self.indicator.set_title("test")
self.menu = Gtk.Menu()
for i in range(3):
buf = "Test-undermenu - %d" % i
menu_items = Gtk.MenuItem(label=buf)
self.menu.append(menu_items)
menu_items.show()
self.indicator.set_menu(self.menu)
self.i = 1
GLib.timeout_add_seconds(5, self.tick, None)
def tick(self, any):
print(self.i)
if self.i % 2:
self.indicator.set_label(str(self.i), "99")
self.indicator.set_status(AppIndicator.IndicatorStatus.ATTENTION)
else:
self.indicator.set_label("", "99")
self.indicator.set_status(AppIndicator.IndicatorStatus.ACTIVE)
self.i = min(99, self.i + 1)
return True
def main(self):
Gtk.main()
print("Quit")
if __name__ == '__main__':
App().main()
| nE0sIghT/appindicator-testcase | testcase.py | testcase.py | py | 1,745 | python | en | code | 0 | github-code | 13 |
28252066853 | from django.shortcuts import render,redirect
from django.views import View
from .forms import RegisterForm , LoginForm , ImageForm
from django.contrib.auth import authenticate,login,logout
from .models import CategoryModel,ImageModel
from django.contrib import messages
from django.core.files.storage import FileSystemStorage
from .dl_model.model import classify_image
from django.http import QueryDict
# Create your views here.
def signout_view(request):
logout(request)
return redirect('home')
class home_view(View):
def get(self , request):
if request.user.is_authenticated:
return redirect('addimage')
forms = LoginForm()
context = {'forms':forms}
print(request.user)
print(type(request.user))
print("context is ")
print(context)
return render(request , 'home.html' , context)
def post(self , request):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username = username , password = password)
if user is not None:
login(request , user)
return redirect('gallery')
return redirect('home')
class register_view(View):
def get(self , request):
if request.user.is_authenticated:
return redirect('gallery')
forms = RegisterForm()
context = {'forms':forms}
return render(request , 'register.html' , context)
def post(self , request):
forms = RegisterForm(request.POST)
if forms.is_valid():
forms.save()
return redirect('home')
context = {'forms':forms}
return render(request , 'register.html' , context)
class gallery_view(View):
def get(self , request):
category = CategoryModel.objects.all()
Images = ImageModel.objects.all()
context = {'category':category , 'Images':Images}
return render(request , 'gallery.html',context)
def post(self , request):
return render(request , 'gallery.html')
# return redirect('gallery')
class Cat_view(View):
def get(self , request ,id):
Images = ImageModel.objects.filter(cat = id)
category = CategoryModel.objects.all()
context = {'category':category , 'Images':Images}
return render(request , 'gallery.html',context)
class myupload_view(View) :
def get(self ,request):
Images =ImageModel.objects.filter(uploaded_by = request.user)
context = {'Images':Images}
return render(request , 'myupload.html',context)
class addimage_view(View):
def get(self , request):
forms = ImageForm()
context = {'forms':forms}
return render(request , 'addimage.html',context)
def post(self , request):
img = request.FILES['image']
img.seek(0)
# convert the file to bytes
image = img.read()
result = classify_image(image)
#Select the top three predictions according to their probabilities
top1 = '1. Species: %s, Status: %s, Probability: %.4f'%(result[0][0], result[0][1], result[0][2])
top2 = '2. Species: %s, Status: %s, Probability: %.4f'%(result[1][0], result[1][1], result[1][2])
top3 = '3. Species: %s, Status: %s, Probability: %.4f'%(result[2][0], result[2][1], result[2][2])
predictions = [ { 'pred':top1 }, { 'pred':top2 }, { 'pred':top3 } ]
print(predictions)
img.seek(0)
context = { 'predictions':predictions }
q = QueryDict()
d={}
catmap={"Apple":"4","Blueberry":"5","Cherry":"6","Corn":"7","Grape":"8","Orange":"9","Peach":"10","Pepper,":"11","Potato":"12","Raspberry":"13","Soybean":"14","Squash":"15","Strawberry":"16","Tomato":"17","Corn_(maize)":'18',"Cherry_(including_sour)":"19","Pepper,_bell":"20"}
d['csrfmiddlewaretoken'] = request.POST['csrfmiddlewaretoken']
d['title'] = result[0][0] +" "+ result[0][1]
d['cat'] = catmap[result[0][0]]
# d['cat'] = request.POST['cat']
d['desc'] = f'{top1}'
q = QueryDict('', mutable=True)
q.update(d)
forms = ImageForm(q , request.FILES)
if forms.errors:
print(forms.errors)
if forms.is_valid():
print(request.POST)
print(type(request.POST))
task = forms.save(commit=False)
task.uploaded_by = request.user
task.save()
fs = FileSystemStorage()
filename = fs.save(request.FILES['image'].name, request.FILES['image'])
uploaded_file_url = fs.url(filename)
context['url'] = uploaded_file_url
# return redirect('gallery')
return render(request, 'predict.html', context)
return render(request , 'addimage.html')
# create view for single view img
def view_image(request,image_id):
print(type(image_id))
image = ImageModel.objects.get(id=image_id)
print('inside view')
print(image)
context = {'image': image}
print(context)
print(request)
return render(request, 'image.html', context)
def about_view(request):
return render(request , 'about.html')
| Apeksha2311/LeafDetective | PlantDiseaseApp/views.py | views.py | py | 5,358 | python | en | code | 0 | github-code | 13 |
37410142069 | from functools import reduce
from itertools import product
from random import random
import cv2 as cv
import numpy as np
from scipy import ndimage
from data import uint
class Augmentor:
def __init__(self,
rotation_rng=(-20, 20),
g_shift_x_rng=(-10, 10),
g_shift_y_rng=(-10, 10),
noise_prob=0.01,
l_shift_x_rng=(-10, 10),
l_shift_y_rng=(-10, 10),
perspective_corners_rng=((5, 5), (5, 5), (5, 5), (5, 5)),
light_rng=(.7, 1.1)):
self._rotation_rng = rotation_rng
self._g_shift_x_rng = g_shift_x_rng
self._g_shift_y_rng = g_shift_y_rng
self._noise_prob = noise_prob
self._l_shift_x_rng = l_shift_x_rng
self._l_shift_y_rng = l_shift_y_rng
self._perspective_corners_rng = perspective_corners_rng
self._light_rng = light_rng
def augment(self, img, n):
yield img
for _ in range(n):
yield self._generate_from(img)
def _generate_from(self, img):
pipeline = [self._rotate, self._shift, self._noise, self._move, self._change_perspective, self._light]
return reduce(lambda op, fun: fun(op), pipeline, img)
def _rotate(self, img):
return ndimage.rotate(img, np.random.uniform(*self._rotation_rng), reshape=False)
def _shift(self, img):
dx = int(np.random.uniform(*self._g_shift_x_rng))
dy = int(np.random.uniform(*self._g_shift_y_rng))
M = np.float32([[1, 0, dx],
[0, 1, dy]])
return cv.warpAffine(img, M, img.shape[::-1])
def _noise(self, img):
h, w = img.shape
for x, y in product(range(h), range(w)):
if random() <= self._noise_prob:
img[x, y] = 255 * random()
return img
def _move(self, img):
binarized = np.where(img > 0, 1, 0).astype(uint)
nlabels, labels = cv.connectedComponents(binarized, 8, cv.CV_32S)
shifts = [(int(np.random.uniform(*self._g_shift_x_rng)),
int(np.random.uniform(*self._g_shift_y_rng)))
for _ in range(nlabels)]
h, w = img.shape
clip_h = lambda v: 0 if v < 0 else min(v, h - 1)
clip_w = lambda v: 0 if v < 0 else min(v, w - 1)
moved = np.zeros_like(img, dtype=uint)
for x, y in product(range(h), range(w)):
label = labels[x, y]
if label == 0: continue
dx, dy = shifts[label]
moved[clip_h(x + dx), clip_w(y + dy)] = img[x, y]
return moved
def _change_perspective(self, img):
ds = [[int(np.random.uniform(r[0], r[0])), int(np.random.uniform(r[1], r[1]))]
for r in self._perspective_corners_rng]
h, w = img.shape
corners = list(product([0, w], [0, h]))
clip_h = lambda v, r: v + r if v + r < h else v - r
clip_w = lambda v, r: v + r if v + r < w else v - r
pts1 = np.float32([[clip_w(x, r[0]), clip_h(y, r[1])] for (x, y), r in zip(corners, ds)])
pts2 = np.float32(corners)
M = cv.getPerspectiveTransform(pts1, pts2)
return cv.warpPerspective(img, M, (w, h))
def _light(self, img):
return (np.random.uniform(*self._light_rng) * img).astype(uint)
| pmikolajczyk41/retina-matcher | data/augmentor.py | augmentor.py | py | 3,317 | python | en | code | 0 | github-code | 13 |
42659464564 | str = input('Give the string to encrypt\n')
key = int(input('Give the key for encryption\n'))
def enc(c, key) :
if c.islower() :
return chr((ord(c) - 97 + key)%26 + 97)
return chr((ord(c) - 65 + key)%26 + 65)
def dec(c, key) :
if c.islower() :
return chr((ord(c) - 97 - key + 26)%26 + 97)
return chr((ord(c) - 65 - key + 26)%26 + 65)
def encrypt(str, key) :
ls = []
for c in str :
ls.append(enc(c, key))
return ''.join(ls)
def decrypt(str, key) :
ls = []
for c in str:
ls.append(dec(c, key))
return ''.join(ls)
str1 = encrypt(str, key)
print('The encrypted string -->')
print(str1)
str1 = decrypt(str1, key)
print('Decrypted string -->')
print(str1)
| AatirNadim/Socket-Programming | substitution_cipher/no_socket.py | no_socket.py | py | 727 | python | en | code | 0 | github-code | 13 |
11721024053 | import telebot
from telebot import types
from data import langs, menu, translations # noqa
from settings import DEBUG, managers, token # noqa
# todo
# /тейкэвей добавить ссылку на мозогао
# При выборе доставки спросить локацию
# При выборе PhonePe вернуть ссылку для оплаты (с суммой?)
# При заказе - повторить список позиций
# Языки и переводы!!
# Фото и подробности блюд
# добавить ведж меню
# загляните так же в "напитки" <- показывать после добавления в корзину
# searate data for users
cart = {
# 'user_id': {
# 'cart': {},
# 'order_type': {},
# 'pay_type': {},
# 'comments': {},
# 'last_call': None
# }
}
lang = {
# 'user_id': 'smth'
}
curr_menu = {
# 'user_id': 'smth'
}
menu_hash = {
# hash for link strings woth dictionary
# str( hash( 'Full menu:drinks:...') ): 'Full menu:drinks:...',
}
def get_menu_hash(path):
# create hash for buttons text instead of full menu string
global menu_hash
_hash = str(hash(path))
if _hash not in menu_hash:
menu_hash[_hash] = path
return _hash
# order_type
REST = 'REST'
AWAY = 'AWAY'
DLVR = 'DLVR'
# pay_type
CASH = 'CASH'
PHPE = 'PHNE'
messages = {
# 'user_id': []
}
known_users = {}
f = open('known_users.txt', 'r')
x = f.readline() # headers
while x:
x = f.readline()
if '::' in x:
x = x.replace('\n', '')
user_id = int(x.split('::')[0])
known_users[user_id] = {'username': x.split('::')[1], 'comment': x.split('::')[3]}
lang[user_id] = x.split('::')[2] if x.split('::')[2] != 'None' else None
f.close()
bot = telebot.TeleBot(token)
def logger(message):
global DEBUG
if DEBUG:
print(message)
# else log somewhere else
def reset_settings(user_id, soft=False):
global lang, curr_menu, cart
if not soft:
lang[user_id] = None
curr_menu[user_id] = None
if user_id in cart:
del cart[user_id]
def get_translation(s, user_id):
global lang, translations
current_language = lang.get(user_id, 'eng')
if current_language == 'eng':
return s
return translations.get(current_language, {}).get(s, s)
def get_concrete_data(crnt, default=menu):
if crnt is None:
return default
if ':' in crnt:
return get_concrete_data(':'.join(crnt.split(':')[1:]), default[crnt.split(':')[0]])
return default[crnt]
def track_and_clear_messages(message, and_clear=True):
global messages
if message.chat.id not in messages:
messages[message.chat.id] = []
not_inserted = True
for m in messages[message.chat.id]:
if m.id == message.id:
not_inserted = False
if and_clear:
try:
bot.delete_message(m.chat.id, m.id)
except Exception as e:
logger(
'EXCEPTION WARNING while deleting message "{}" ({}): {}'.format(
m.text, m.id, e
)
)
messages[message.chat.id].remove(m)
logger(
'track message "{}" ({}), already there (={}): [{}]'.format(
message.text,
message.id,
'no' if not_inserted else 'yes',
[(m.text, m.id) for m in messages[message.chat.id]],
)
)
if not_inserted:
messages[message.chat.id].append(message)
def get_current_cart(user_id):
global cart, REST
if user_id not in cart:
cart[user_id] = {
'cart': {},
'order_type': REST,
'pay_type': None,
'comments': [],
'last_call': None,
}
return cart[user_id]
def check_lang(user_id):
global lang, langs
m_ = False
current_language = lang.get(user_id)
logger(f'current_language is {current_language}')
if not current_language:
keyboard = types.InlineKeyboardMarkup()
for lang_name, call in langs:
lang_key = types.InlineKeyboardButton(text=lang_name, callback_data=call)
keyboard.add(lang_key)
question = '?'
m_ = bot.send_message(user_id, text=question, reply_markup=keyboard)
track_and_clear_messages(m_, False)
logger(
'Language check for {}: {} ({})'.format(user_id, True if m_ else False, lang.get(user_id))
)
return m_
def update_langs():
global lang, known_users
f = open('known_users.txt', 'w')
f.write('#user_id::username::lang::comment\n')
f.writelines(
[
'{}::{}::{}::{}\n'.format(
x, known_users[x]['username'], lang[x], known_users[x]['comment']
)
for x in known_users
]
)
f.close()
def show_menu(message, show='menu'):
logger('showing menu, type ' + show)
global lang, curr_menu, menu, cart
messages_stack = []
current_cart = get_current_cart(message.chat.id)
def make_keyboard(current):
keyboard = types.InlineKeyboardMarkup()
def add_menu_buttons(submenu_data, prev_path):
for i in submenu_data:
path = get_menu_hash(prev_path + i)
if isinstance(submenu_data[i], list):
template_text = '{} / {}'
name = get_translation(i, message.chat.id)
text_ = template_text.format(name, submenu_data[i][2])
callback_ = 'open_item_' + path # for showing product info
callback_ = 'order_' + path
else:
text_ = get_translation(i, message.chat.id)
callback_ = 'open_menu_' + path
item_key = types.InlineKeyboardButton(text=text_, callback_data=callback_)
logger(f'::adding "{text_}", callback "{callback_}"')
keyboard.add(item_key)
if current:
text_ = get_translation('Go to top menu', message.chat.id)
callback_ = 'open_menu'
logger(f'::adding "{text_}", callback "{callback_}"')
keyboard.add(types.InlineKeyboardButton(text=text_, callback_data=callback_))
data_ = get_concrete_data(current)
if isinstance(data_, dict):
add_menu_buttons(data_, '' if not current else (current + ':'))
else:
logger('WARNING!! for path {current} not possible make menu (data type is not dict)')
return keyboard
if show == 'cart':
# show cart content
keyboard = types.InlineKeyboardMarkup()
for c in current_cart['cart']:
template_text = get_translation('{} * {} = {} rs.', message.chat.id)
name = get_translation(c, message.chat.id)
amount = str(current_cart['cart'][c][3])
total = str(int(current_cart['cart'][c][2]) * int(current_cart['cart'][c][3]))
text_ = template_text.format(name, amount, total)
callback_ = 'remove_order_{}'.format(get_menu_hash(current_cart['cart'][c][4]))
logger(f'::adding "{text_}", callback "{callback_}"')
item_key = types.InlineKeyboardButton(text=text_, callback_data=callback_)
keyboard.add(item_key)
text_ = get_translation('Proceed to order', message.chat.id)
callback_ = 'order_proceed_2'
logger(f'::adding "{text_}", callback "{callback_}"')
item_key = types.InlineKeyboardButton(text=text_, callback_data=callback_)
keyboard.add(item_key)
elif show == 'product':
# show info about product and order buttons
data = get_concrete_data(curr_menu.get(message.chat.id))
text = '***___{}___***</b>\n{}, {} rs.'.format(
curr_menu.get(message.chat.id).split(':')[-2], data[0], data[2]
)
messages_stack.append(text)
# send picture with url data_[1]
# m_ = bot.send_message(message.chat.id, text=question)
# track_and_clear_messages(m_, False)
# track_and_clear_messages(product_description, 'track_only')
# track_and_clear_messages(product_description, 'track_only')
# todo need to keep item's messages while changing count of items
keyboard = types.InlineKeyboardMarkup()
text_ = get_translation('Add 1', message.chat.id)
callback_ = 'order_' + curr_menu.get(message.chat.id)
logger(f'::adding "{text_}", callback "{callback_}"')
item_key = types.InlineKeyboardButton(text=text_, callback_data=callback_)
keyboard.add(item_key)
for i in current_cart['cart']:
if current_cart['cart'][i][0] == data[0] and current_cart['cart'][i][2] == data[2]:
if current_cart['cart'][i][3] > 0:
text_ = get_translation('Remove 1', message.chat.id)
callback_ = 'remove_order_' + get_menu_hash(curr_menu.get(message.chat.id))
logger(f'::adding "{text_}", callback "{callback_}"')
item_key = types.InlineKeyboardButton(text=text_, callback_data=callback_)
keyboard.add(item_key)
else: # if show == 'menu':
# show current menu
keyboard = make_keyboard(curr_menu.get(message.chat.id))
if current_cart['cart'] and show != 'cart':
cart_items = 0
cart_price = 0
for c in current_cart['cart']:
cart_items += int(current_cart['cart'][c][3])
cart_price += int(current_cart['cart'][c][3]) * int(current_cart['cart'][c][2])
text_ = get_translation('Cart: {} items = {} rs.', message.chat.id).format(
cart_items, cart_price
)
callback_ = 'order_proceed'
logger(f'::adding "{text_}", callback "{callback_}"')
item_key = types.InlineKeyboardButton(text=text_, callback_data=callback_)
keyboard.add(item_key)
question = get_translation('Please select ', message.chat.id)
# Назад или сразу полное меню
if curr_menu.get(message.chat.id):
text_ = get_translation('<< back', message.chat.id)
callback_ = 'go_back' if show == 'menu' else 'open_menu_'
logger(f'::adding "{text_}", callback "{callback_}"')
item_key = types.InlineKeyboardButton(text=text_, callback_data=callback_)
keyboard.add(item_key)
question = ' > '.join(
[
get_translation(s, message.chat.id)
for s in curr_menu.get(message.chat.id).split(':')
]
)
if show == 'cart':
question = get_translation(
'Select positions to delete or proceed to order', message.chat.id
)
track_and_clear_messages(message)
for m in messages_stack:
m_ = bot.send_message(message.chat.id, text=question)
track_and_clear_messages(m_, False)
question = m
m_ = bot.send_message(message.chat.id, text=question, reply_markup=keyboard)
track_and_clear_messages(m_)
@bot.message_handler(content_types=['text']) # ['text', 'document', 'audio']
def get_text_messages(message):
logger('message received')
global DEBUG, curr_menu
track_and_clear_messages(message)
if message.chat.id not in known_users:
f = open('known_users.txt', 'a')
comment = 'Auto added'
f.write(
'{}::{}::{}::{}\n'.format(
message.chat.id, message.chat.username, lang.get(message.chat.id, None), comment
)
)
if not message.chat.username:
bot.forward_message(managers[0], message.chat.id, message.id)
bot.send_message(managers[0], text=f'Forwarded from {message.chat.id} {message.chat}')
bot.forward_message(managers[1], message.chat.id, message.id)
bot.send_message(managers[1], text=f'Forwarded from {message.chat.id} {message.chat}')
known_users[message.chat.id] = {'username': message.chat.username, 'comment': comment}
f.close()
if message.text == '/menu':
curr_menu[message.chat.id] = None
elif message.text == '/clear':
reset_settings(message.chat.id)
# elif message.text == '/feedback':
# todo receive a message from client and send it to manager
else:
current_cart = get_current_cart(message.chat.id)
current_cart['comments'].append(message.text)
if not check_lang(message.chat.id):
show_menu(message)
@bot.callback_query_handler(func=lambda call: True)
def callback_worker(call):
try:
logger(
'callback_worker from {} : {} [{}]'.format(
call.message.chat.username, call.data, call.message.text
)
)
global lang, curr_menu, cart
show_type = 'menu'
current_cart = get_current_cart(call.message.chat.id)
current_cart['last_call'] = call.data
check_lang(call.message.chat.id)
# set language
if call.data.startswith('set_') and call.data.endswith('_lang'):
lang[call.message.chat.id] = call.data[4:-5]
update_langs()
# todo ask name, phone
# show top section
elif call.data == 'open_menu':
curr_menu[call.message.chat.id] = None
# show submenu
elif call.data.startswith('open_menu_'):
if call.data[10:]:
curr_menu[call.message.chat.id] = menu_hash[call.data[10:]]
# show product info
# elif call.data.startswith('open_item_'):
# show_type = 'product'
# curr_menu[call.message.chat.id] = call.data[10:]
# add product to cart (one per time)
elif call.data.startswith('order_') and not call.data.startswith('order_proceed'):
full_path = menu_hash[call.data[6:]]
ordered_item = get_concrete_data(full_path)
name = full_path.split(':')[-1]
content = ordered_item
if name not in current_cart['cart']:
content.append(1) # amount
content.append(full_path) # full path
current_cart['cart'][name] = content
else:
current_cart['cart'][name][3] += 1
# todo add detection, from where we add product: cart or menu
# show_type = 'product'
# remove product from cart
elif call.data.startswith('remove_order_') and current_cart['cart']:
# removed_item = get_concrete_data(call.data[13:])
name = menu_hash[call.data[13:]].split(':')[-1]
current_cart['cart'][name][3] -= 1
if current_cart['cart'][name][3] <= 0:
del current_cart['cart'][name]
show_type = 'cart'
# show cart for confirmation
elif call.data == 'order_proceed':
show_type = 'cart'
# todo flush product_info
# ask if any extra wishings
elif call.data == 'order_proceed_2' and current_cart['cart']:
'''
keyboard = types.InlineKeyboardMarkup()
item_key = types.InlineKeyboardButton(
text=get_translation('No, proceed', call.message.chat.id), callback_data='order_proceed_3'
)
keyboard.add(item_key)
m_ = bot.send_message(
call.message.chat.id,
text=get_translation('Do you have any extra wishes?', call.message.chat.id),
reply_markup=keyboard
)
track_and_clear_messages(m_)
return
# suggest to choose type of delivery
elif call.data == 'order_proceed_3' and current_cart['cart']:
'''
keyboard = types.InlineKeyboardMarkup()
text_ = get_translation('Proceed at the restaurant', call.message.chat.id)
callback_ = 'order_proceed_restaurant'
logger(f'::adding "{text_}", callback "{callback_}"')
item_key = types.InlineKeyboardButton(text=text_, callback_data=callback_)
keyboard.add(item_key)
text_ = get_translation('Wish to takeaway', call.message.chat.id)
callback_ = 'order_proceed_takeaway'
logger(f'::adding "{text_}", callback "{callback_}"')
item_key = types.InlineKeyboardButton(text=text_, callback_data=callback_)
keyboard.add(item_key)
# item_key = types.InlineKeyboardButton(
# text=get_translation('I will be there in ..', call.message.chat.id),
# callback_data='order_proceed_delay',
# )
# keyboard.add(item_key)
# item_key = types.InlineKeyboardButton(
# text=get_translation('Delivery', call.message.chat.id),
# callback_data='order_proceed_delivery',
# )
# keyboard.add(item_key)
text_ = get_translation('<< back', call.message.chat.id)
callback_ = 'open_menu_'
logger(f'::adding "{text_}", callback "{callback_}"')
item_key = types.InlineKeyboardButton(text=text_, callback_data=callback_)
keyboard.add(item_key)
text = get_translation(
'You are welcome at <a href="https://www.google.com/maps/place/Mozogao+Bar+'
'%26+Restaurant/@15.654396,73.7527975,21z/data=!4m5!3m4!1s0x3bbfec1ec5e2714b:'
'0x6ec5c26f0656f0de!8m2!3d15.6543352!4d73.7528804">MozoGao</a>. '
'Your order will be ready as soon as it is possible.\nWhat are you prefer?',
call.message.chat.id,
)
m_ = bot.send_location(call.message.chat.id, 15.654315282911606, 73.75289136506875)
track_and_clear_messages(m_)
m_ = bot.send_message(
call.message.chat.id, text=text, reply_markup=keyboard, parse_mode='HTML'
)
track_and_clear_messages(m_, False)
return
# Under processing
elif (
call.data
in [
'order_proceed_delivery',
'order_proceed_delay',
'order_proceed_takeaway',
'order_proceed_restaurant',
]
and current_cart['cart']
):
if call.data == 'order_proceed_delivery':
current_cart['order_type'] = DLVR
# если доставка - спросить локацию
elif call.data == 'order_proceed_takeaway':
current_cart['order_type'] = AWAY
''' Пока что закомментировать всё остальное. Лишнее
# show payment options
# способ оплаты - кэш/phonepe
keyboard = types.InlineKeyboardMarkup()
item_key = types.InlineKeyboardButton(
text=get_translation('Cash', call.message.chat.id),
callback_data='order_proceed_cash',
)
keyboard.add(item_key)
item_key = types.InlineKeyboardButton(
text=get_translation('PhonePe', call.message.chat.id),
callback_data='order_proceed_phonepe',
)
keyboard.add(item_key)
text = get_translation('Choose payment type', call.message.chat.id)
m_ = bot.send_message(call.message.chat.id, text=text, reply_markup=keyboard)
track_and_clear_messages(call.message, False)
track_and_clear_messages(m_)
return
# todo ask for promocode or smth discount
elif call.data == 'order_proceed_4' and current_cart['cart']:
# скидка 10%? - промокод или ссылка на отзыв (скриншот)
pass
# Confirm order, send messages for everyone
elif call.data in ['order_proceed_cash', 'order_proceed_phonepe'] and current_cart['cart']:
'''
# сообщение менеджерам о новом заказе
cart_text = '\n'.join(
[
'{} [{}] x {} = {} rs.'.format(
c,
current_cart['cart'][c][0],
current_cart['cart'][c][3],
int(current_cart['cart'][c][2]) * int(current_cart['cart'][c][3]),
)
for c in current_cart['cart']
]
)
amount = sum(
[
int(current_cart['cart'][c][2]) * int(current_cart['cart'][c][3])
for c in current_cart['cart']
]
)
delivery_map = {DLVR: 'Delivery', AWAY: 'Takeaway', REST: 'Restaurant'}
delivery = delivery_map.get(current_cart['order_type'])
pay_type = '' # 'Cash' if call.data == 'order_proceed_cash' else 'PhonePe'
comments = (
''
) # '\nПожелания:\n'+'\n'.join(current_cart['comments']) if current_cart['comments'] else ''
for m in managers:
bot.send_message(
m,
text='New order from @{} ({}):\n{} {}\n{}\nTotal: {} rs.{}'.format(
call.message.chat.username,
call.message.chat.id,
delivery,
pay_type,
cart_text,
amount,
comments,
),
)
m_ = bot.send_message(
call.message.chat.id,
text=get_translation(
'Your order is:\n{}\nTotal amount: {} rs.', call.message.chat.id
).format(cart_text, amount),
)
track_and_clear_messages(m_)
m_ = bot.send_message(
call.message.chat.id,
text=get_translation(
'Thank you for your order! Our managers will reach you soon',
call.message.chat.id,
),
)
track_and_clear_messages(m_, False)
reset_settings(call.message.chat.id, soft=True)
return
# return up in the menu
elif call.data == 'go_back':
# todo flush product_info
if call.message.chat.id not in curr_menu or ':' not in curr_menu[call.message.chat.id]:
curr_menu[call.message.chat.id] = None
else:
curr_menu[call.message.chat.id] = ':'.join(
curr_menu[call.message.chat.id].split(':')[:-1]
)
show_menu(call.message, show_type)
except Exception as e:
logger('Callback exception! + ' + str(e) + ', ' + str(e.__dict__))
m_ = bot.send_message(call.message.chat.id, text='Oops, something went wrong!')
track_and_clear_messages(m_, False)
curr_menu[call.message.chat.id] = None
show_menu(call.message, 'menu')
bot.polling(none_stop=True, interval=0)
| kamucho-ru/rest_bot | bot.py | bot.py | py | 23,237 | python | en | code | 0 | github-code | 13 |
24844423188 | from typing import List, Tuple
from abcurve import AugmentedBondingCurve
from collections import namedtuple
from utils import attrs
import config
def vesting_curve(day: int, cliff_days: int, halflife_days: float) -> float:
"""
The vesting curve includes the flat cliff, and the halflife curve where tokens are gradually unlocked.
It looks like _/--
"""
return 1 - config.vesting_curve_halflife**((day - cliff_days)/halflife_days)
def convert_80p_to_cliff_and_halflife(days: int, v_ratio: int = 2) -> Tuple[float, float]:
"""
For user's convenience, we ask him after how many days he would like 80% of his tokens to be unlocked.
This needs to be converted into a half life (unit days).
2.321928094887362 is log(base0.5) 0.2, or log 0.2 / log 0.5.
v_ratio is cliff / halflife, and its default is determined by Commons Stack
"""
halflife_days = days / (config.log_base05_of_02 + v_ratio)
cliff_days = v_ratio * halflife_days
return cliff_days, halflife_days
def hatch_raise_split_pools(total_hatch_raise, hatch_tribute) -> Tuple[float, float]:
"""Splits the hatch raise between the funding / collateral pool based on the fraction."""
funding_pool = hatch_tribute * total_hatch_raise
collateral_pool = total_hatch_raise * (1-hatch_tribute)
return funding_pool, collateral_pool
VestingOptions = namedtuple("VestingOptions", "cliff_days halflife_days")
class TokenBatch:
def __init__(self, vesting: float, nonvesting: float, vesting_options=None):
self.vesting = vesting
self.nonvesting = nonvesting
self.vesting_spent = 0.0
self.age_days = 0
self.cliff_days = 0 if not vesting_options else vesting_options.cliff_days
self.halflife_days = 0 if not vesting_options else vesting_options.halflife_days
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, attrs(self))
@property
def total(self):
return (self.vesting - self.vesting_spent) + self.nonvesting
def __bool__(self):
if self.total > 0:
return True
return False
def __add__(self, other):
total_vesting = self.vesting + other.vesting
total_nonvesting = self.nonvesting + other.nonvesting
return total_vesting, total_nonvesting
def __sub__(self, other):
total_vesting = self.vesting - other.vesting
total_nonvesting = self.nonvesting - other.nonvesting
return total_vesting, total_nonvesting
def update_age(self, iterations: int = 1):
"""
Adds the number of iterations to TokenBatch.age_days
"""
self.age_days += iterations
return self.age_days
def unlocked_fraction(self) -> float:
"""
returns what fraction of the TokenBatch is unlocked to date
"""
if self.cliff_days and self.halflife_days:
u = vesting_curve(self.age_days,
self.cliff_days, self.halflife_days)
return u if u > 0 else 0
else:
return 1.0
def spend(self, x: float):
"""
checks if you can spend so many tokens, then decreases this TokenBatch
instance's value accordingly
"""
if x > self.spendable():
raise Exception("Not so many tokens are available for you to spend yet ({})".format(
self.age_days))
y = x - self.nonvesting
if y > 0:
self.vesting_spent += y
self.nonvesting = 0.0
else:
self.nonvesting = abs(y)
return self.vesting, self.vesting_spent, self.nonvesting
def spendable(self) -> float:
"""
spendable() = (self.unlocked_fraction * self.vesting - self.vesting_spent) + self.nonvesting
Needed in case some Tokens were burnt before.
"""
return ((self.unlocked_fraction() * self.vesting) - self.vesting_spent) + self.nonvesting
def create_token_batches(hatcher_contributions: List[int], desired_token_price: float, cliff_days: float, halflife_days: float) -> Tuple[List[TokenBatch], float]:
"""
hatcher_contributions: a list of hatcher contributions in DAI/ETH/whatever
desired_token_price: used to determine the initial token supply
vesting_80p_unlocked: vesting parameter - the number of days after which 80% of tokens will be unlocked, including the cliff period
"""
total_hatch_raise = sum(hatcher_contributions)
initial_token_supply = total_hatch_raise / desired_token_price
# In the hatch, everyone buys in at the same time, with the same price. So just split the token supply amongst the hatchers proportionally to their contributions
tokens_per_hatcher = [(x / total_hatch_raise) *
initial_token_supply for x in hatcher_contributions]
token_batches = [TokenBatch(
x, 0, vesting_options=VestingOptions(cliff_days, halflife_days)) for x in tokens_per_hatcher]
return token_batches, initial_token_supply
class Commons:
def __init__(self, total_hatch_raise, token_supply, hatch_tribute=0.2, exit_tribute=0, kappa=2):
# a fledgling commons starts out in the hatching phase. After the hatch phase ends, money from new investors will only go into the collateral pool.
# Essentials
self.hatch_tribute = hatch_tribute
# (1-0.35) -> 0.65 * total_hatch_raise = 65% collateral, 35% funding
self._collateral_pool = (1-hatch_tribute) * total_hatch_raise
self._funding_pool = hatch_tribute * \
total_hatch_raise # 0.35 * total_hatch_raise = 35%
self._token_supply = token_supply
# hatch_tokens keeps track of the number of tokens that were created when hatching, so we can calculate the unlocking of those
self._hatch_tokens = token_supply
self.bonding_curve = AugmentedBondingCurve(
self._collateral_pool, token_supply, kappa=kappa)
# Options
self.exit_tribute = exit_tribute
def deposit(self, dai):
"""
Deposit DAI after the hatch phase. This means all the incoming deposit goes to the collateral pool.
"""
tokens, realized_price = self.bonding_curve.deposit(
dai, self._collateral_pool, self._token_supply)
self._token_supply += tokens
self._collateral_pool += dai
return tokens, realized_price
def burn(self, tokens):
"""
Burn tokens, with/without an exit tribute.
"""
dai, realized_price = self.bonding_curve.burn(
tokens, self._collateral_pool, self._token_supply)
self._token_supply -= tokens
self._collateral_pool -= dai
money_returned = dai
if self.exit_tribute:
self._funding_pool += self.exit_tribute * dai
money_returned = (1-self.exit_tribute) * dai
return money_returned, realized_price
def dai_to_tokens(self, dai):
"""
Given the size of the common's collateral pool, return how many tokens would x DAI buy you.
"""
price = self.bonding_curve.get_token_price(self._collateral_pool)
return dai / price
def token_price(self):
"""
Query the bonding curve for the current token price, given the size of the commons's collateral pool.
"""
return self.bonding_curve.get_token_price(self._collateral_pool)
def spend(self, amount):
"""
Decreases the Common's funding_pool by amount.
Raises an exception if this would make the funding pool negative.
"""
if self._funding_pool - amount < 0:
raise Exception("{} funds requested but funding pool only has {}".format(
amount, self._funding_pool))
self._funding_pool -= amount
return
| commons-stack/commons-simulator | simulation/hatch.py | hatch.py | py | 7,835 | python | en | code | 32 | github-code | 13 |
4531859367 | import csv
import sys
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
TEST_SIZE = 0.4
def main():
# Check command-line arguments
if len(sys.argv) != 2:
sys.exit("Usage: python shopping.py data")
# Load data from spreadsheet and split into train and test sets
evidence, labels = load_data(sys.argv[1])
X_train, X_test, y_train, y_test = train_test_split(
evidence, labels, test_size=TEST_SIZE
)
# Train model and make predictions
model = train_model(X_train, y_train)
predictions = model.predict(X_test)
sensitivity, specificity = evaluate(y_test, predictions)
# Print results
print(f"Correct: {(y_test == predictions).sum()}")
print(f"Incorrect: {(y_test != predictions).sum()}")
print(f"True Positive Rate: {100 * sensitivity:.2f}%")
print(f"True Negative Rate: {100 * specificity:.2f}%")
def load_data(filename):
"""
Load shopping data from a CSV file `filename` and convert into a list of
evidence lists and a list of labels. Return a tuple (evidence, labels).
evidence should be a list of lists, where each list contains the
following values, in order:
- Administrative, an integer
- Administrative_Duration, a floating point number
- Informational, an integer
- Informational_Duration, a floating point number
- ProductRelated, an integer
- ProductRelated_Duration, a floating point number
- BounceRates, a floating point number
- ExitRates, a floating point number
- PageValues, a floating point number
- SpecialDay, a floating point number
- Month, an index from 0 (January) to 11 (December)
- OperatingSystems, an integer
- Browser, an integer
- Region, an integer
- TrafficType, an integer
- VisitorType, an integer 0 (not returning) or 1 (returning)
- Weekend, an integer 0 (if false) or 1 (if true)
labels should be the corresponding list of labels, where each label
is 1 if Revenue is true, and 0 otherwise.
"""
import pandas as pd
evidence=[]
labels=[]
data = pd.read_csv("shopping.csv")
data["Administrative"]= data["Administrative"].astype(int)
data["Administrative_Duration"]= data["Administrative_Duration"].astype(float)
data["Informational"]= data["Informational"].astype(float)
data["Informational_Duration"]= data["Informational_Duration"].astype(float)
data["ProductRelated"]= data["ProductRelated"].astype(int)
data["ProductRelated_Duration"]= data["ProductRelated_Duration"].astype(float)
data["BounceRates"]= data["BounceRates"].astype(float)
data["ExitRates"]= data["ExitRates"].astype(float)
data["PageValues"]= data["PageValues"].astype(float)
data["SpecialDay"]= data["SpecialDay"].astype(float)
look_up_month = {'Jan': '00', 'Feb': '01', 'Mar': '02', 'Apr': '03', 'May': '04',
'June': '05', 'Jul': '06', 'Aug': '07', 'Sep': '08', 'Oct': '09', 'Nov': '10', 'Dec': '11'}
data["Month"]= data["Month"].apply(lambda x: look_up_month[x])
data["Month"]= data["Month"].astype(int)
data["OperatingSystems"]= data["OperatingSystems"].astype(int)
data["Browser"]= data["Browser"].astype(int)
data["TrafficType"]= data["TrafficType"].astype(int)
look_up_VisitorType={'New_Visitor':'0', 'Returning_Visitor':'1', 'Other':'2'}
data["VisitorType"]= data["VisitorType"].apply(lambda x: look_up_VisitorType[x])
data["VisitorType"]= data["VisitorType"].astype(int)
data["Weekend"]= data["Weekend"].astype(int)
data["Revenue"]= data["Revenue"].astype(int)
print(f"Fetching evidence and labels...")
for index, rows in data.iterrows():
evidence_list =[rows.Administrative, rows.Administrative_Duration, rows.Informational,rows.Informational_Duration,rows.ProductRelated,rows.ProductRelated_Duration,rows.BounceRates,rows.ExitRates,rows.PageValues,rows.SpecialDay,rows.Month,rows.OperatingSystems,rows.Browser,rows.Region,rows.TrafficType,rows.VisitorType,rows.Weekend]
label=rows.Revenue
evidence.append(evidence_list)
labels.append(label)
return evidence, labels
def train_model(evidence, labels):
"""
Given a list of evidence lists and a list of labels, return a
fitted k-nearest neighbor model (k=1) trained on the data.
"""
neigh = KNeighborsClassifier(n_neighbors=1)
neigh.fit(evidence, labels)
return neigh
def evaluate(labels, predictions):
"""
Given a list of actual labels and a list of predicted labels,
return a tuple (sensitivity, specificty).
Assume each label is either a 1 (positive) or 0 (negative).
`sensitivity` should be a floating-point value from 0 to 1
representing the "true positive rate": the proportion of
actual positive labels that were accurately identified.
`specificity` should be a floating-point value from 0 to 1
representing the "true negative rate": the proportion of
actual negative labels that were accurately identified.
"""
sensitivity = float(0)
specificity = float(0)
if labels.count(1)==0:
sys.exit("No positve label in true labels")
if labels.count(0)==0:
sys.exit("No negative label in true labels")
common_ones = [1 if i==j and j==1 else 0 for i, j in zip(labels,predictions)]
common_ones_count=common_ones.count(1)
labels_ones_count=labels.count(1)
sensitivity=common_ones_count/labels_ones_count
common_zeros=[1 if i==j and j==0 else 0 for i,j in zip(labels,predictions)]
common_zeros_count=common_zeros.count(1)
labels_zeros_count=labels.count(0)
specificity=common_zeros_count/labels_zeros_count
return sensitivity, specificity
#raise NotImplementedError
if __name__ == "__main__":
main()
| yadavjp75/shoppingCS50 | shopping.py | shopping.py | py | 5,913 | python | en | code | 0 | github-code | 13 |
21326088885 | from requests_html import HTMLSession
import csv
import datetime
import sqlite3
#connect to/create database
conn = sqlite3.connect('amztracker.db')
c = conn.cursor()
#only create the table once, then comment out or delete the line
#c.execute('''CREATE TABLE prices(date DATE, asin TEXT, price FLOAT, title TEXT)''')
#start session and create lists
s = HTMLSession()
asins = []
#read csv to list
with open('asins.csv', 'r') as f:
csv_reader = csv.reader(f)
for row in csv_reader:
asins.append(row[0])
#scrape data
for asin in asins:
r = s.get(f'https://www.amazon.co.uk/dp/{asin}')
r.html.render(sleep=1)
try:
price = r.html.find('#price_inside_buybox')[0].text.replace('£','').replace(',','').strip()
except:
price = r.html.find('#priceblock_ourprice')[0].text.replace('£','').replace(',','').strip()
title = r.html.find('#productTitle')[0].text.strip()
asin = asin
date = datetime.datetime.today()
c.execute('''INSERT INTO prices VALUES(?,?,?,?)''', (date, asin, price, title))
print(f'Added data for {asin}, {price}')
conn.commit()
print('Committed new entries to database')
| jhnwr/amazon-price-tracker | amzpricers.py | amzpricers.py | py | 1,154 | python | en | code | 11 | github-code | 13 |
74847606417 | def falling(n, k):
"""Compute the falling factorial of n to depth k.
>>> falling(6, 3) # 6 * 5 * 4
120
>>> falling(4, 3) # 4 * 3 * 2
24
>>> falling(4, 1) # 4
4
>>> falling(4, 0)
1
"""
sum = 1
while(k>0):
sum *= n
k -= 1
n -= 1
return sum
def sum_digits(y):
"""Sum all the digits of y.
>>> sum_digits(10) # 1 + 0 = 1
1
>>> sum_digits(4224) # 4 + 2 + 2 + 4 = 12
12
>>> sum_digits(1234567890)
45
>>> a = sum_digits(123) # make sure that you are using return rather than print
>>> a
6
"""
sum = 0
while(y>=10):
sum += y%10
y %= 10
return sum+y
def double_eights(n):
"""Return true if n has two eights in a row.
>>> double_eights(8)
False
>>> double_eights(88)
True
>>> double_eights(2882)
True
>>> double_eights(880088)
True
>>> double_eights(12345)
False
>>> double_eights(80808080)
False
"""
"*** YOUR CODE HERE ***"
n = abs(n)
if(n<88):
return False
else:
s = str(n)
'''for i in range(len(s)-1):
if(s[i] == '8' and s[i+1] == '8'):
return True
return False
'''
'''下面是一种更加高效的办法'''
return s.count('88')>=1
| kiroitorat/CS61A | lab/lab01/lab01.py | lab01.py | py | 1,383 | python | en | code | 0 | github-code | 13 |
23052557590 | from lift import Elevator
elevator_1 = Elevator("OTIS")
elevator_2 = Elevator("PHILLIPS")
# Везем человека в лифте под именем OTIS
elevator_1.lift()
# Везем двоих человек в лифте под именем PHILLIPS
elevator_2.lift()
elevator_2.lift()
# Получаем информацию по лифту под именем OTIS
elevator_1.info()
# Получаем информацию по лифту под именем PHILLIPS
elevator_2.info() | nvovk/python | OOP/0 - Lift (example)/index.py | index.py | py | 498 | python | ru | code | 0 | github-code | 13 |
17881982262 | import sys
import clipboard
import json
SAVED_DATA = "clipboard.json"
def save_items(filepath, data):
with open(filepath, "w") as f:
json.dump(data, f)
#save_items("clipboard.json", {"Data" : "value"})
def load_items(filepath):
try:
with open(filepath, "r") as f:
data = json.load(f)
return data
except:
return{}
#data = clipboard.paste()
#print(data)
#clipboard.copy("sani")
#print(sys.argv[0])
#print(sys.argv[1])
#print(sys.argv[2])
#print(sys.argv[3])
if len(sys.argv) == 2:
command = sys.argv[1]
data = load_items(SAVED_DATA)
if command == "save":
key = input("enter a key: ")
data[key] = clipboard.paste()
save_items(SAVED_DATA, data)
elif command == "load":
key = input("enter a key: ")
if key in data:
clipboard.copy(data[key])
else:
print("Key does not exist")
elif command == "list":
print(data)
else:
print("Invalid command")
else:
print("Please pass exactly one command")
| Mithiran-coder/My_Python_programs | multiclipboard.py | multiclipboard.py | py | 1,096 | python | en | code | 0 | github-code | 13 |
72070992339 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 12:12:13 2020
@author: dakar
"""
#%%
import warnings
import random
from copy import copy, deepcopy
import matplotlib.pyplot as plt
import networkx as nx
from tsp_heuristics.io.read_data import make_tsp
from tsp_heuristics.sol_generators import random_tour_list, ordered_tour_list, greedy_tour_list
class TSP(object):
def __init__(self,incoming_data=None,**kwargs):
self.dist_dod = dict()
self.nodes = set()
# call a function to determine what format the data is in
# then fill the dist_dod and nodes parameters accordingly
if incoming_data is not None:
make_tsp(incoming_data,obj_to_use = self,**kwargs)
def __str__(self):
return 'this is the string representation'
def __iter__(self):
'''
Iterates through nodes in the TSP problem
Yields
-------
nodes.
'''
for node in self.nodes:
yield(node)
def __len__(self):
'''
Number of nodes in the TSP problem
'''
return(len(self.nodes))
def __copy__(self):
if len(self.dist_dod) == 0:
incoming_data = None
else:
incoming_data = self.dist_dod
result = type(self)(incoming_data = incoming_data)
result.__dict__.update(self.__dict__)
return(result)
def __deepcopy__(self,memo):
if len(self.dist_dod) == 0:
incoming_data = None
else:
incoming_data = self.dist_dod
result = type(self)(incoming_data = incoming_data)
result.__dict__.update(self.__dict__)
memo[id(self)] = result
# make deep copies of all attributes
for key,value in self.__dict__.items():
setattr(result,key,deepcopy(value,memo))
return(result)
def _get_updated_node_dist_dict_for(self,new_nodes,orig_nodes,node_dist_dict,default=0):
'''
Takes the provided node_dist_dict and makes sure new nodes have to and from distances
For the new nodes, it makes sure they have from and to distances, using
the symmetric value if present or the default. Then loops through the original
nodes from the object and makes sure there are distances to all the new nodes.
Keeps all provided distances between original nodes and doesn't update the
non-provided symmetric part. This function provides user warnings of
missing distances and tells whether using symmetric or default value.
Parameters
----------
new_nodes : set
Nodes being added to the TSP from this step.
orig_nodes : set
Nodes that were already in the TSP.
node_dist_dict : dict of dicts
Outer keys are new and original nodes. Inner keys are other nodes besides this outer node.
Inner values are the distances
default : numeric, optional
The value to use when a required distance doesn't exist and the
symmetric value doesn't exist. The default is 0.
Returns
-------
new_node_dist_dict : dict of dicts
The updated node_dist_dict with all required distances between new
nodes and existing nodes.
'''
missing_distances = {}
new_node_dist_dict = {node:{} for node in orig_nodes.union(new_nodes)}
for new_node in new_nodes:
symmetric_replacements = []
default_replacements = []
for other_node in new_nodes.union(orig_nodes).difference(set([new_node])):
if other_node in node_dist_dict[new_node].keys():
new_node_dist_dict[new_node][other_node] = node_dist_dict[new_node][other_node]
elif other_node in node_dist_dict.keys():
if new_node in node_dist_dict[other_node].keys():
new_node_dist_dict[new_node][other_node] = node_dist_dict[other_node][new_node]
symmetric_replacements.append(other_node)
else:
new_node_dist_dict[new_node][other_node] = default
default_replacements.append(other_node)
else:
new_node_dist_dict[new_node][other_node] = default
default_replacements.append(other_node)
if len(symmetric_replacements) > 0 or len(default_replacements) > 0:
msg = 'New node ({}) missing distances'.format(new_node)
if len(symmetric_replacements) > 0:
msg = '\n\t'.join([msg,
'distances to nodes ({}) replaced with symmetric distance'
.format(', '.join([str(i) for i in symmetric_replacements]))])
if len(default_replacements) > 0:
msg = '\n\t'.join([msg,
'distances to nodes ({}) replaced with default ({}) distance'
.format(', '.join([str(i) for i in default_replacements]),
default)])
missing_distances[new_node] = msg
# distances for original nodes
# keep any existing distances
# and go through all the new nodes that aren't already listed and use
# the symmetric distance or default
for orig_node in orig_nodes:
symmetric_replacements = []
default_replacements = []
if orig_node in node_dist_dict.keys():
new_node_dist_dict[orig_node] = node_dist_dict[orig_node]
else:
new_node_dist_dict[orig_node] = {}
# use either symmetric or default distance for new nodes not listed
for new_node in new_nodes.difference(set(new_node_dist_dict[orig_node].keys())):
if orig_node in node_dist_dict[new_node].keys():
new_node_dist_dict[orig_node][new_node] = node_dist_dict[new_node][orig_node]
symmetric_replacements.append(new_node)
else:
new_node_dist_dict[orig_node][new_node] = default
default_replacements.append(new_node)
if len(symmetric_replacements) > 0 or len(default_replacements) > 0:
msg = 'Original node ({}) missing distances'.format(orig_node)
if len(symmetric_replacements) > 0:
msg = '\n\t'.join([msg,
'distances to new nodes ({}) replaced with symmetric distance'
.format(', '.join([str(i) for i in symmetric_replacements]))])
if len(default_replacements) > 0:
msg = '\n\t'.join([msg,
'distances to new nodes ({}) replaced with default ({}) distance'
.format(', '.join([str(i) for i in default_replacements]),
default)])
missing_distances[new_node] = msg
# Print warning message if necessary
if len(missing_distances) > 0:
warnings.warn('\n\n'.join([value for value in missing_distances.values()]))
return(new_node_dist_dict)
def _get_updated_node_dist_dict_comprehension(self,new_nodes,orig_nodes,node_dist_dict,default=0):
'''
Takes the provided node_dist_dict and makes sure new nodes have to and from distances
For the new nodes, it makes sure they have from and to distances, using
the symmetric value if present or the default. Then loops through the original
nodes from the object and makes sure there are distances to all the new nodes.
Keeps all provided distances between original nodes and doesn't update the
non-provided symmetric part. This function is pythonic in using comprehension
but does not provide user warnings of missing distances.
Parameters
----------
new_nodes : set
Nodes being added to the TSP from this step.
orig_nodes : set
Nodes that were already in the TSP.
node_dist_dict : dict of dicts
Outer keys are new and original nodes. Inner keys are other nodes besides this outer node.
Inner values are the distances
default : numeric, optional
The value to use when a required distance doesn't exist and the
symmetric value doesn't exist. The default is 0.
Returns
-------
new_node_dist_dict : dict of dicts
The updated node_dist_dict with all required distances between new
nodes and existing nodes.
'''
new_node_dist_dict = {
# entries for new nodes
# make a dict for each new node that has distances to other nodes (existing and new nodes)
# that distance is either the entry in provided dict, the inverse entry
# or the default distance
**{new_node:{other_node:node_dist_dict[new_node].get(other_node,
node_dist_dict.get(other_node,
{new_node:default}).get(new_node,default))
for other_node in new_nodes.union(orig_nodes).difference(set([new_node]))}
for new_node in new_nodes},
# entries for existing nodes
# need to add a distance to every new node provided (first dict)
# plus keep any updated distances provided in the dict (second dict)
**{orig_node:{
# get the provided distance from the orig_node to each new_node
# if not provided, get the provided distance from the new_node to the orig_node (symmetric),
# and if that's not provided, use the default
**{new_node:node_dist_dict.get(orig_node,
{new_node:node_dist_dict[new_node].get(orig_node,default)}).get(new_node,default)
for new_node in new_nodes},
# get the provided distances fom orig_node to other original nodes
# if they exist in the provided node_dist_dict
**{other_orig_node:node_dist_dict.get(orig_node,
{other_orig_node:default}).get(other_orig_node,default)
for other_orig_node in node_dist_dict.get(orig_node,{}).keys()}}
for orig_node in orig_nodes}
}
return(new_node_dist_dict)
def add_nodes(self,node_dist_dict : dict,default = 0):
'''
Add nodes or update distances in the TSP object
Must provide the dict of dicts to maintain the complete graph representation.
If adding nodes without providing distances
Parameters
----------
node_dist_dict : dict of dicts
Outer keys are nodes to add/update. Inner keys are the endpoint of the edge to update.
If you want to update all required distances with a default value,
the inner dictionaries can be empty dictionaries. If the edges are
symmetric (distance from A to B is equal to distance from B to A)
when adding a new node then you can just provide one of those distances
and it will be used for either. If you're updating edges however,
you must provide both directions.
default : numeric
The default value to use if a required edge distance not included.
The default is 0
Returns
-------
None.
'''
new_nodes = set(node_dist_dict.keys()).difference(self.nodes)
orig_nodes = self.nodes
# If there are new nodes we need to add along with their distances
# Make sure the distances from and to the new nodes are entries in the dict
# If the to distances don't exist, use the from distances (if exist)
# and as a last resort, use the default value
if len(new_nodes) > 0:
node_dist_dict = self._get_updated_node_dist_dict_for(new_nodes,
orig_nodes,
node_dist_dict,default=default)
# This function is more pythonic (list comprehensions), but doesn't provide the warnings
# node_dist_dict = self._get_updated_node_dist_dict_comprehension(new_nodes,
# orig_nodes,
# node_dist_dict,default=default)
# Add the new nodes to the node set
self.nodes.update(new_nodes)
# Actually update the distance dict of dicts
self._update_dist_dod(node_dist_dict,default=default)
def _update_dist_dod(self,node_dist_dict : dict,default=0):
'''
Updates values in the dist_dod with values in the node_dist_dict
Only updates existing entries and expects all keys in the outer and inner dict
to already exist in the TSP
Parameters
----------
node_dist_dict : dict
Dict of dicts where outer key are the "from" nodes. The inner dicts
are keyed by the "to" nodes and have the from-to distance as the values
Returns
-------
None.
'''
for node in node_dist_dict.keys():
self.dist_dod[node] = {other_node:node_dist_dict[node].get(other_node,
self.dist_dod.get(node,
{other_node:'never_used_but_created'}).get(other_node,default))
for other_node in self.nodes.difference(set([node]))}
# def make_tour(self,funct = 'random',**kwargs):
# default_dict = {'random':self.random_tour_list,
# 'ordered':self.ordered_tour_list,
# 'greedy':self.greedy_tour_list}
# default_str = 'random'
# if type(funct) == str:
# if funct not in default_dict.keys():
# warnings.warn('{} is not a valid string function type. It must be one of ({}). Defaulting to {}'.format(funct,
# ', '.join(default_dict.keys()),
# default_str))
# funct = default_str
# funct = default_dict[default_str]
# tour = TSPTour.from_tsp(self,funct,**kwargs)
# return(tour)
class TSPTour(object):
@classmethod
def from_tsp(cls,tsp,funct,**kwargs):
default_dict = {'random':random_tour_list,
'ordered':ordered_tour_list,
'greedy':greedy_tour_list}
default_str = 'random'
if type(funct) == str:
if funct not in default_dict.keys():
warnings.warn('{} is not a valid string function type. It must be one of ({}). Defaulting to {}'.format(funct,
', '.join(default_dict.keys()),
default_str))
funct = default_str
funct = default_dict[default_str]
return(cls(tsp,funct(tsp,**kwargs)))
def __init__(self,tsp,tour_list):
self._tour_list = tour_list
self.tsp = tsp
self.distance = self.get_distance()
@property
def tour_list(self):
return(self._tour_list)
@tour_list.setter
def tour_list(self,tour_list_data):
if isinstance(tour_list_data,dict):
try:
self._tour_list = tour_list_data['tour']
self.distance = tour_list_data.get('distance',self.get_distance())
except KeyError as e:
print('Setting tour_list accepts the tour list as an iterable or a dict with keys {"tour","distance"}. "distance" key is optional.\n{}'.format(e))
else:
self._tour_list = tour_list_data
self.distance = self.get_distance()
@tour_list.getter
def tour_list(self):
return(self._tour_list)
def __iter__(self):
'''
Iterates through the nodes in the tour_list
'''
for node in self.tour_list:
yield(node)
def __str__(self):
the_string = 'The tour is ({}). \nThe distance is: {}.'.format(', '.join([str(i) for i in self.tour_list]),
self.distance)
return(the_string)
def __copy__(self):
result = type(self)(tsp = self.tsp,tour_list = self.tour_list)
result.__dict__.update(self.__dict__)
return(result)
def __deepcopy__(self,memo):
result = type(self)(tsp = self.tsp,tour_list = self.tour_list)
result.__dict__.update(self.__dict__)
memo[id(self)] = result
# make deep copies of all attributes
for key,value in self.__dict__.items():
setattr(result,key,deepcopy(value,memo))
return(result)
def __eq__(self,tour2):
'''
Returns True if tour_lists are same size and all nodes are in same order
Don't have to have same start node
'''
if len(self.tour_list) != len(tour2.tour_list):
return(False)
elif len(set(self.tour_list).difference(set(tour2.tour_list))) > 0:
return(False)
else:
# the index in second tour list that has this tours first item
first_node_ind = tour2.tour_list.index(self.tour_list[0])
if first_node_ind == 0:
reordered_list = tour2.tour_list
else:
# reorder the second tour list so we start with the same ourder as this tour list
reordered_list = tour2.tour_list[first_node_ind:len(tour2.tour_list)] + tour2.tour_list[0:first_node_ind]
return(all([i==j for i,j in zip(self.tour_list,reordered_list)]))
def __ne__(self,tour2):
return(not self.__eq__(tour2))
def __lt__(self,tour2):
return(self.distance < tour2.distance)
def __gt__(self,tour2):
return(self.distance > tour2.distance)
def __le__(self,tour2):
return(self.distance <= tour2.distance)
def __ge__(self,tour2):
return(self.distance >= tour2.distance)
def get_distance(self):
return(sum([self.tsp.dist_dod[self.tour_list[i]][self.tour_list[i+1]]
for i in range(len(self.tour_list)-1)]
# distance from last node back to the start
+ [self.tsp.dist_dod[self.tour_list[-1]][self.tour_list[0]]]))
def _get_add_del_edges(self, replace_dict):
'''
Create sets of edges to add and delete based on tour_list and replace_dict
Parameters
----------
replace_dict : dict
Dict with keys being tour indices to replace and values being the
the indices to replace them with.
Returns
-------
add_del_edge_dict : dict
Dict with keys {'add','delete'}. Values are sets of edges to add
and delete (respectively). Every occurrence of an index in replace_dict.keys()
is replaced with its provided value.
'''
node_inds_to_swap = replace_dict.keys()
# use sets to prevent double counting edges if nodes right next to each other
edges_to_delete = set() # the current (unique) edges connected to the nodes we want to swap
edges_to_add = set() # the edges we want to add to put nodes in their new positions
for ind in node_inds_to_swap:
if ind == 0:
edges_to_delete.add(tuple([ind,ind + 1]))
edges_to_delete.add(tuple([len(self.tour_list) - 1,ind]))
edges_to_add.add(tuple([replace_dict[ind], replace_dict.get(ind + 1,
ind + 1)]))
edges_to_add.add(tuple([replace_dict.get(len(self.tour_list) - 1,
len(self.tour_list) - 1),
replace_dict[ind]]))
elif ind == len(self.tour_list) - 1:
edges_to_delete.add(tuple([ind,0]))
edges_to_delete.add(tuple([ind - 1, ind]))
edges_to_add.add(tuple([replace_dict[ind],replace_dict.get(0,0)]))
edges_to_add.add(tuple([replace_dict.get(ind - 1,
ind - 1),
replace_dict[ind]]))
else:
edges_to_delete.add(tuple([ind,ind + 1]))
edges_to_delete.add(tuple([ind - 1, ind]))
edges_to_add.add(tuple([replace_dict[ind], replace_dict.get(ind + 1,
ind + 1)]))
edges_to_add.add(tuple([replace_dict.get(ind - 1,
ind - 1),
replace_dict[ind]]))
return({'add':edges_to_add,
'delete':edges_to_delete})
def n_swap(self,n):
'''
Swap n random nodes in the tour. Internally update tour_list and distance.
Select n random nodes in the tour and randomly swap them so no node
ends up in the same location.
Parameters
----------
n : int
The number of nodes to swap (must be between 2 and len(self.tour_list)).
Swapping 0 or 1 nodes, doesn't effectively change the tour so not allowed.
Returns
-------
None.
'''
# change n so it throws an error for n = 0 or 1
if n in [0,1] or n > len(self.tour_list):
orig_n = n
n = -1
try:
node_inds_to_swap = random.sample(range(len(self.tour_list)),n)
except (ValueError, TypeError) as e:
print('{} is not a valid value for n. It must be an integer between 2 and size of the graph'.format(orig_n))
raise e
inds_left_to_swap = node_inds_to_swap.copy()
replace_dict = {} # keys are indices to fill, values are indices to fill with
for curr_ind in node_inds_to_swap:
poss_inds_to_swap = list(set(inds_left_to_swap).difference({curr_ind}))
if len(poss_inds_to_swap) == 0:
# the last index left to switch is itself
# so replace with something that was already switched
# to prevent things from ending up in same place
rand_new_ind = random.choice(list(set(node_inds_to_swap).difference({curr_ind})))
replace_dict[curr_ind] = replace_dict[rand_new_ind] # take the dict val for the chosen ind
replace_dict[rand_new_ind] = curr_ind # fill the dict val for the chosen ind with this ind
inds_left_to_swap.remove(curr_ind)
# we have a different index to place here, but only 1
elif len(poss_inds_to_swap) == 1:
replace_dict[curr_ind] = inds_left_to_swap[0]
inds_left_to_swap.remove(inds_left_to_swap[0])
else:
rand_new_ind = random.choice(poss_inds_to_swap)
replace_dict[curr_ind] = rand_new_ind
inds_left_to_swap.remove(rand_new_ind)
add_del_edge_dict = self._get_add_del_edges(replace_dict)
edges_to_add = add_del_edge_dict['add']
edges_to_delete = add_del_edge_dict['delete']
# add the distance of the edges to add and
# subtract the distance of edges to delete
new_distance = (self.distance
- sum([self.tsp.dist_dod[self.tour_list[del_u]][self.tour_list[del_v]] for del_u,del_v in edges_to_delete])
+ sum([self.tsp.dist_dod[self.tour_list[add_u]][self.tour_list[add_v]] for add_u,add_v in edges_to_add])
)
# self.distance = new_distance
# swap the nodes
# tour nodes in indices of dict values get placed in index of tour keys
new_tour_inst = deepcopy(self)
new_tour_list = self.tour_list.copy()
for ind, new_ind in replace_dict.items():
new_tour_list[ind] = self.tour_list[new_ind]
# update the tour list and distance using the tour_list setter
new_tour_inst.tour_list = {'tour':new_tour_list,'distance':new_distance}
return(new_tour_inst)
def plot(self,start_color = 'red', other_color = 'blue',pos=None,layout_fct='circular_layout',**kwargs):
'''
Create a matplotlib plot of the tour
Parameters
----------
start_color : string, optional
The color of the starting node (node 0 in the tour_list). The default is 'red'.
other_color : string, optional
The color of other nodes. The default is 'blue'.
pos : dict, optional
If provided, a dict keyed by nodes in tour_list with 2-tuple as items.
First tuple item is x location, second is y. The default is None.
layout_fct : string, optional
If pos is None, then the string representation of the function to build node positions.
See `networkx.layout.__all__` for available options. The default is 'circular_layout'.
**kwargs : TYPE
keyword arguments passed to layout_fct.
Returns
-------
None.
'''
layout_dict = {layout:getattr(nx.layout,layout) for layout in nx.layout.__all__}
# {'circular':nx.circluar_layout,
# 'spectral':nx.spectral_layout,
# 'spring':nx.spring_layout,
# 'shell':nx.shell_layout,
# 'spiral':nx.shell_layout,
# 'planar':nx.planar_layout}
# need to make my own bipartite layout to put nodes in tour_list order
# or at least
default_kwargs = {'bipartite_layout':{'nodes':[node for i,node in enumerate(self.tour_list)
if i %2 == 0],
'align':'vertical'}}
g = nx.DiGraph()
g.add_weighted_edges_from([(u,v,self.tsp.dist_dod[u][v])
for u,v in [(self.tour_list[i],self.tour_list[i+1])
for i in range(len(self.tour_list)-1)]
+ [(self.tour_list[-1],self.tour_list[0])]])
color_dict = {**{self.tour_list[0]:start_color},
**{node:other_color for node in self.tour_list[1:]}}
color_list = [start_color] + [other_color]*(len(self.tour_list) - 1)
if pos is None:
pos = layout_dict.get(layout_fct,'circular_layout')(g,**{**default_kwargs.get(layout_fct,{}),
**kwargs})
f = plt.figure(1)
ax = f.add_subplot(111)
nx.draw_networkx_nodes(g,pos=pos,ax=ax,node_color=color_list)
nx.draw_networkx_labels(g,pos=pos,ax=ax)
nx.draw_networkx_edges(g,pos=pos,ax=ax)
nx.draw_networkx_edge_labels(g,pos=pos,ax=ax,edge_labels = {(u,v):dist
for u,v,dist in g.edges(data='weight')},
label_pos = 0.4)
ax.plot([0],[0],color=start_color,label='Starting Node')
ax.plot([0],[0],color=other_color,label='Other Nodes')
plt.title('Tour Distance: {}'.format(self.distance))
plt.axis('off')
plt.legend()
f.tight_layout()
return(f)
| cookesd/tsp_heuristics | tsp_heuristics/classes/tsp.py | tsp.py | py | 29,476 | python | en | code | 0 | github-code | 13 |
13325017357 | import os
import sys
from setuptools import setup, find_packages
os.chdir(os.path.dirname(os.path.realpath(__file__)))
VERSION_PATH = os.path.join("mudlink", "VERSION.txt")
OS_WINDOWS = os.name == "nt"
def get_requirements():
"""
To update the requirements for Shinma, edit the requirements.txt file.
"""
with open("requirements.txt", "r") as f:
req_lines = f.readlines()
reqs = []
for line in req_lines:
# Avoid adding comments.
line = line.split("#")[0].strip()
if line:
reqs.append(line)
return reqs
def get_version():
"""
When updating the Evennia package for release, remember to increment the
version number in evennia/VERSION.txt
"""
return open(VERSION_PATH).read().strip()
def package_data():
"""
By default, the distribution tools ignore all non-python files.
Make sure we get everything.
"""
file_set = []
for root, dirs, files in os.walk("mudlink"):
for f in files:
if ".git" in f.split(os.path.normpath(os.path.join(root, f))):
# Prevent the repo from being added.
continue
file_name = os.path.relpath(os.path.join(root, f), "mudlink")
file_set.append(file_name)
return file_set
# setup the package
setup(
name="mudlink",
version=get_version(),
author="Volund",
maintainer="Volund",
url="https://github.com/volundmush/shinma",
description="A library for creating, manipulating, and formatting ANSI text for MUDs and similar text-based games",
license="???",
long_description="""
""",
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=get_requirements(),
package_data={"": package_data()},
zip_safe=False,
classifiers=[
],
python_requires=">=3.7",
project_urls={
"Source": "https://github.com/volundmush/mudlink-python",
"Issue tracker": "https://github.com/volundmush/mudlink-python/issues",
"Patreon": "https://www.patreon.com/volund",
},
)
| volundmush/mudlink-python | setup.py | setup.py | py | 2,108 | python | en | code | 1 | github-code | 13 |
23632804232 | import re
from collections import deque
# operands, operators = [], []
print('Reverse Polish Notation\n')
expression = input("Enter a mathematical expression :\n").split()
print(expression)
# operands = [re.findall(r'\d+', expression)]
# operators = [re.findall(r'\D+', expression)]
#function to evalute the first operation
def evaluate(liste):
var = deque(liste)
result = 0
op1 = int(var.popleft())
op2 = int(var.popleft())
operation = var.popleft()
if (operation == '*'):
result = op1 * op2
if (operation == '+'):
result = op1 + op2
if (operation == '-'):
result = op1 - op2
if (operation == '/'):
result = op1 / op2
var.appendleft(str(result))
return list(var)
while (len(expression) > 1):
expression = evaluate(expression)
print(expression[0])
| MicroClub-USTHB/python-language | Math_level3/reverse_polish_notation/rpn.py | rpn.py | py | 837 | python | en | code | 2 | github-code | 13 |
327359771 | import math
def compute_coords(index):
next_sqrt = 2 * math.ceil(0.5 * (index**0.5 - 1)) + 1
next_bottom_right = (next_sqrt - 1) // 2
coords = [next_bottom_right, next_bottom_right]
diff = next_sqrt**2 - index
for sign in [-1, 1]:
for j in [0, 1]:
delta = min(diff, next_sqrt-1)
coords[j] += sign * delta
diff -= delta
return coords
value = 277678
x, y = compute_coords(value)
print(abs(x) + abs(y))
| grey-area/advent-of-code-2017 | day03/part1.py | part1.py | py | 473 | python | en | code | 0 | github-code | 13 |
14412202460 | # This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import argparse
from contextlib import contextmanager
from pathlib import Path
from PyHSPlasma import *
import shutil
import subprocess
import sys
import time
import traceback
main_parser = argparse.ArgumentParser(description="Korman Plasma Launcher")
main_parser.add_argument("cwd", type=Path, help="Working directory of the client")
main_parser.add_argument("age", type=str, help="Name of the age to launch into")
sub_parsers = main_parser.add_subparsers(title="Plasma Version", dest="version",)
moul_parser = sub_parsers.add_parser("pvMoul")
moul_parser.add_argument("ki", type=int, help="KI Number of the desired player")
moul_parser.add_argument("--serverini", type=str, default="server.ini")
sp_parser = sub_parsers.add_parser("pvPots", aliases=["pvPrime"])
sp_parser.add_argument("player", type=str, help="Name of the desired player")
autolink_chron_name = "OfflineKIAutoLink"
if sys.platform == "win32":
client_executables = {
"pvMoul": "plClient.exe",
"pvPots": "UruExplorer.exe"
}
else:
client_executables = {
"pvMoul": "plClient",
"pvPots": "UruExplorer"
}
def die(*args, **kwargs):
assert args
if len(args) == 1 and not kwargs:
sys.stderr.write(args[0])
else:
sys.stderr.write(args[0].format(*args[1:], **kwargs))
sys.stdout.write("DIE\n")
sys.exit(1)
@contextmanager
def open_vault_stream(vault_path, fm):
stream_type = globals().get("hsWindowsStream", "hsFileStream")
write("DBG: Opened '{}' stream with provider '{}'", vault_path, stream_type.__name__)
encrypted = plEncryptedStream.IsFileEncrypted(vault_path)
encryption_type = plEncryptedStream.kEncAuto if fm in {fmRead, fmReadWrite} else plEncryptedStream.kEncXtea
backing_stream = stream_type().open(vault_path, fm)
if encrypted:
enc_stream = plEncryptedStream().open(backing_stream, fm, encryption_type)
output_stream = enc_stream
else:
output_stream = backing_stream
try:
yield output_stream
finally:
if encrypted:
enc_stream.close()
backing_stream.flush()
backing_stream.close()
def write(*args, **kwargs):
assert args
if len(args) == 1 and not kwargs:
sys.stdout.write(args[0])
else:
sys.stdout.write(args[0].format(*args[1:], **kwargs))
sys.stdout.write("\n")
# And this is why we aren't using print()...
sys.stdout.flush()
def backup_vault_dat(path):
backup_path = path.with_suffix(".dat.korman_backup")
shutil.copy2(str(path), str(backup_path))
write("DBG: Copied vault backup: {}", backup_path)
def set_link_chronicle(store, new_value, cond_value=None):
chron_folder = next((i for i in store.getChildren(store.firstNodeID)
if getattr(i, "folderType", None) == plVault.kChronicleFolder), None)
if chron_folder is None:
die("Could not locate vault chronicle folder.")
autolink_chron = next((i for i in store.getChildren(chron_folder.nodeID)
if getattr(i, "entryName", None) == autolink_chron_name), None)
if autolink_chron is None:
write("DBG: Creating AutoLink chronicle...")
autolink_chron = plVaultChronicleNode()
autolink_chron.entryName = autolink_chron_name
previous_value = ""
store.addRef(chron_folder.nodeID, store.lastNodeID + 1)
else:
write("DBG: Found AutoLink chronicle...")
previous_value = autolink_chron.entryValue
# Have to submit the changed node to the store
if cond_value is None or previous_value == cond_value:
write("DBG: AutoLink = '{}' (previously: '{}')", new_value, previous_value)
autolink_chron.entryValue = new_value
store.addNode(autolink_chron)
else:
write("DBG: ***Not*** changing chronicle! AutoLink = '{}' (expected: '{}')", previous_value, cond_value)
return previous_value
def find_player_vault(cwd, name):
sav_dir = cwd.joinpath("sav")
if not sav_dir.is_dir():
die("Could not locate sav directory.")
for i in sav_dir.iterdir():
if not i.is_dir():
continue
current_dir = i.joinpath("current")
if not current_dir.is_dir():
continue
vault_dat = current_dir.joinpath("vault.dat")
if not vault_dat.is_file():
continue
store = plVaultStore()
with open_vault_stream(vault_dat, fmRead) as stream:
store.Import(stream)
# First node is the Player node...
playerNode = store[store.firstNodeID]
if playerNode.playerName == name:
write("DBG: Vault found: {}", vault_dat)
return vault_dat, store
die("Could not locate the requested player vault.")
def main():
print("DBG: alive")
args = main_parser.parse_args()
executable = args.cwd.joinpath(client_executables[args.version])
if not executable.is_file():
die("Failed to locate client executable.")
# Have to find and mod the single player vault...
if args.version == "pvPots":
vault_path, vault_store = find_player_vault(args.cwd, args.player)
backup_vault_dat(vault_path)
vault_prev_autolink = set_link_chronicle(vault_store, args.age)
write("DBG: Saving vault...")
with open_vault_stream(vault_path, fmCreate) as stream:
vault_store.Export(stream)
# Update init file for this schtuff...
init_path = args.cwd.joinpath("init", "net_age.fni")
with plEncryptedStream().open(str(init_path), fmWrite, plEncryptedStream.kEncXtea) as ini:
ini.writeLine("# This file was automatically generated by Korman.")
ini.writeLine("Nav.PageInHoldList GlobalAnimations")
ini.writeLine("Net.SetPlayer {}".format(vault_store.firstNodeID))
ini.writeLine("Net.SetPlayerByName \"{}\"".format(args.player))
# BUT WHY??? You ask...
# Because, sayeth Hoikas, if this command is not executed, you will remain ensconsed
# in the black void of the Link... forever... Sadly, it accepts no arguments and determines
# whether to link to AvatarCustomization, Cleft, Demo (whee!), or Personal all by itself.
ini.writeLine("Net.JoinDefaultAge")
# When URU runs, the player may change the vault. Remove any temptation to play with
# the stale vault...
del vault_store
# EXE args
plasma_args = [str(executable), "-iinit", "To_Dni"]
else:
write("DBG: Using a superior client :) :) :)")
plasma_args = [str(executable), "-LocalData", "-SkipLoginDialog", "-ServerIni={}".format(args.serverini),
"-PlayerId={}".format(args.ki), "-Age={}".format(args.age)]
try:
proc = subprocess.Popen(plasma_args, cwd=str(args.cwd), shell=True)
# signal everything is a-ok -- causes blender to detach
write("PLASMA_RUNNING")
# Wait for things to finish
proc.wait()
finally:
# Restore sp vault, if needed.
if args.version == "pvPots":
vault_store = plVaultStore()
with open_vault_stream(vault_path, fmRead) as stream:
vault_store.Import(stream)
new_prev_autolink = set_link_chronicle(vault_store, vault_prev_autolink, args.age)
if new_prev_autolink != args.age:
write("DBG: ***Not*** resaving the vault!")
else:
write("DBG: Resaving vault...")
with open_vault_stream(vault_path, fmCreate) as stream:
vault_store.Export(stream)
# All good!
write("DONE")
sys.exit(0)
if __name__ == "__main__":
try:
main()
except Exception as e:
if isinstance(e, SystemExit):
raise
else:
die(traceback.format_exc())
| H-uru/korman | korman/plasma_launcher.py | plasma_launcher.py | py | 8,579 | python | en | code | 31 | github-code | 13 |
30360281637 | from django.db import models
from django.contrib.auth.models import AbstractUser
USER = 'user'
ADMIN = 'admin'
MODERATOR = 'moderator'
ROLES_CHOICES = {
(USER, 'Пользователь'),
(ADMIN, 'Администратор'),
(MODERATOR, 'Модератор'),
}
class User(AbstractUser):
username = models.CharField(
max_length=150, unique=True, blank=False, null=False
)
email = models.EmailField(
max_length=254, unique=True, blank=False, null=False
)
role = models.CharField(
max_length=20,
blank=True,
choices=ROLES_CHOICES,
default=USER,
)
bio = models.TextField(
max_length=1000,
blank=True,
)
first_name = models.CharField(max_length=150, blank=True)
last_name = models.CharField(max_length=150, blank=True)
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
@property
def is_user(self):
return self.role == USER
@property
def is_admin(self):
return self.role == ADMIN
@property
def is_moderator(self):
return self.role == MODERATOR
def __str__(self):
return self.username
| denchur/GroupProj | api_yamdb/users/models.py | models.py | py | 1,241 | python | en | code | 0 | github-code | 13 |
74514483858 | '''
SẮP XẾP THEO TỔNG CHỮ SỐ
Cho dãy số A[] có N phần tử đều là các số nguyên dương, không quá 6 chữ số.
Hãy sắp xếp dãy số theo tổng chữ số tăng dần. Nếu tổng chữ số bằng nhau thì số nào nhỏ hơn sẽ viết trước.
Input
Dòng đầu ghi số bộ test (không quá 10)
Mỗi bộ test gồm 2 dòng:
Dòng đầu là số N (N < 100)
Dòng thứ 2 ghi N số của mảng A[], các số đều nguyên dương và không quá 9 chữ số.
Output
Với mỗi bộ test, ghi trên một dòng dãy số kết quả.
'''
t = int(input())
def sum(n):
s = 0
while n>0:
s += n%10
n//=10
return s
def condition_sort(x):
s = sum(x)
val = x
return (s,x)
while t>0:
n = int(input())
a = list(map(int,input().split()))
a.sort(key=condition_sort)
str_a = [str(int) for int in a]
print(' '.join(str_a))
t -= 1
'''
1
8
143 31 22 99 7 9 1111 10000000
''' | cuongdh1603/Python-Basic | PY02023.py | PY02023.py | py | 978 | python | vi | code | 0 | github-code | 13 |
4790806748 | from bin.scraper import Omni
if __name__ == '__main__':
scraper = Omni(
base_url='https://www.dallascounty.org/jaillookup/searchByName',
specs={
'pagination': True,
'pages_element': '',
'error_message': 'No records were found using the search criteria provided',
'fields': {
'firstName': 'input',
'lastName': 'input',
'race': 'select',
'sex': 'select'
},
'buttonText': 'Search By Prisoner Info'
}
)
| isome01/intelbroker | main.py | main.py | py | 573 | python | en | code | 0 | github-code | 13 |
10348863123 | import multiprocessing
from decimal import Decimal
from slacker import Slacker
from pymarketcap import Pymarketcap
from tinymongo import TinyMongoClient
import cryCompare
class ArbitrageBot:
def __init__(self):
# getcontext().prec = 15
# api_key = 'EcBv9wqxfdWNMhtOI8WbkGb9XwOuITAPxBdljcxv8RYX1H7u2ucC0qokDp2KOWmr'
# api_secret = 'i5Y57Gwu8sH9qUE5TbB7zLotm7deTa1D9S8K458LWLXZZzNq5wNAZOHlGJmyjq1s'
# kucoin_api_key = '5a64f6a46829d247d237e7bf'
# kucoin_api_secret = '93b85f5c-f164-4bea-bd40-3ffda4c03907'
self.market_cap = Pymarketcap()
# connection = TinyMongoClient()
# db = connection.cryptoAnalytics
# data = db.arbitrage.find()
# arbitrage_data = db.arbitrage.find()
# arbitrage_id = arbitrage_data[0]['_id']
slack_token = "xoxp-302678850693-302678850805-302556314308-5b70830e08bc3a0f6895d1f8545f537a"
self.slack = Slacker(slack_token)
self.exchanges = ["Poloniex", "Kraken",
"HitBTC", "Gemini", "Exmo", #"Yobit",
"Cryptopia", "Binance", "OKEX"]
self.to_coins = ["BTC", "ETH", "LTC"]
self.Price = cryCompare.Price()
self.lowest_price = 10000000000000000000000
self.highest_price = 0
self.exchange1 = None
self.exchange2 = None
self.movement = None
# from_coins = market_cap.symbols
def scan_for_arbitrage(self, to_coin, targetCoin):
print("Running!", to_coin)
# coin = marketCap.ticker(from_coin)
# exchangeCoin = marketCap.ticker(to_coin)
# if (coin['market_cap_usd'] and coin['market_cap_usd'] >= 15000000):
for exchange in self.exchanges:
# if from_coin in exchange_lists[exchange.lower()]:
prices = self.Price.price(from_curr=targetCoin,
to_curr=to_coin, e=exchange.lower())
if 'Response' not in prices:
if prices[to_coin] < self.lowest_price and self.movement == "up":
self.lowest_price = prices[to_coin]
self.exchange1 = exchange
if prices[to_coin] > self.highest_price and self.movement == "down":
self.highest_price = prices[to_coin]
self.exchange2 = exchange
if (self.highest_price > 0 and self.lowest_price < 10000000000000000000000
and self.highest_price > Decimal(.0000001) and self.lowest_price > Decimal(.0000001)):
percent_diff = ((self.highest_price - self.lowest_price) / self.highest_price) * 100
self.slack.chat.post_message('#signals',
"%s/%s is listed for %f%s on %s and %f%s on %s"
% (targetCoin, to_coin, self.lowest_price, to_coin,
self.exchange1, self.highest_price, to_coin, self.exchange2))
def checkCoin(self, targetCoin_json):
if (targetCoin_json["type"] == "up"):
self.movement = "up"
self.highest_price = targetCoin_json["price_btc"]
self.exchange2 = targetCoin_json["exchange"]
else:
self.movement = "down"
self.lowest_price = targetCoin_json["price_btc"]
self.exchange1 = targetCoin_json["exchange"]
for coin in self.to_coins:
p = multiprocessing.Process(target=self.scan_for_arbitrage, args=(coin,targetCoin_json["ticker"]))
p.start()
| Nfinger/crypto-analytics-api | arbitrage.py | arbitrage.py | py | 3,531 | python | en | code | 0 | github-code | 13 |
12984635602 | import numpy as np
import pandas as pd
import csv
import yfinance as yf
import matplotlib as plt
import tensorflow as tf
#Paramter:
details = 6
stocks = 27
days = 762
start = '2016-01-01'
end = '2019-01-01'
dataSet = np.zeros((days,details,1))
print(dataSet.shape)
stocklist = []
with open('stock_Name.csv','r') as f:
reader = csv.reader(f)
for name in reader:
stocklist = name
print(stocklist)
for x in name:
try:
data = yf.download(x,start,end)
temp = data.to_numpy()
try:
dataSet = np.dstack((dataSet,temp))
except:
print('append went wrong')
except:
print('no data for' +x+ 'found!')
test_dataset = tf.placeholder(tf.in32, [batch_size, num_steps])
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
| TheGamlion/Stock_RNN | main.py | main.py | py | 898 | python | en | code | 0 | github-code | 13 |
9712290865 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.nn as gnn
import torch_geometric.nn.models as M
class GCNGATVGAE(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, num_heads = 3):
super(GCNGATVGAE, self).__init__()
self.gcn = gnn.GCNConv(input_feat_dim, hidden_dim1)
self.gcn_mu = gnn.GCNConv(hidden_dim1, hidden_dim2)
self.gcn_logvar = gnn.GCNConv(hidden_dim1, hidden_dim2)
self.gat = gnn.GATConv(input_feat_dim, hidden_dim1, heads=num_heads, concat = True)
self.gat_mu = gnn.GATConv(num_heads * hidden_dim1, hidden_dim2, heads=num_heads, concat = False)
self.gat_logvar = gnn.GATConv(num_heads * hidden_dim1, hidden_dim2, heads=num_heads, concat = False)
self.decoder = M.InnerProductDecoder()
print(f"Using {num_heads} heads")
def encode(self, x, edge_index):
h_gcn = self.gcn(x, edge_index)
h_gcn = F.relu(h_gcn)
h_gat = self.gat(x, edge_index)
h_gat = F.relu(h_gat)
mu_gcn, logvar_gcn = self.gcn_mu(h_gcn, edge_index), self.gcn_logvar(h_gcn, edge_index)
mu_gat, logvar_gat = self.gat_mu(h_gat, edge_index), self.gat_logvar(h_gat, edge_index)
return [mu_gcn, logvar_gcn, mu_gat, logvar_gat]
def reparametrize(self, mu, logstd):
if self.training:
return mu + torch.randn_like(logstd) * torch.exp(logstd)
else:
return mu
def forward(self, x, edge_index):
mu_gcn, logvar_gcn, mu_gat, logvar_gat = self.encode(x, edge_index)
mu_cat = torch.cat((mu_gcn, mu_gat), dim=1)
logvar_cat = torch.cat((logvar_gcn, logvar_gat), dim=1)
z = self.reparametrize(mu_cat, logvar_cat)
return self.decoder.forward_all(z, sigmoid=True), mu_cat, logvar_cat #torch.max(mu_gcn, mu_gat), torch.max(logvar_gcn, logvar_gat) | Anindyadeep/MultiHeadVGAEs | Models/gcn_gat_cat.py | gcn_gat_cat.py | py | 1,911 | python | en | code | 4 | github-code | 13 |
17051875254 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ExtendFieldInfo(object):
def __init__(self):
self._field_name = None
self._field_value = None
@property
def field_name(self):
return self._field_name
@field_name.setter
def field_name(self, value):
self._field_name = value
@property
def field_value(self):
return self._field_value
@field_value.setter
def field_value(self, value):
self._field_value = value
def to_alipay_dict(self):
params = dict()
if self.field_name:
if hasattr(self.field_name, 'to_alipay_dict'):
params['field_name'] = self.field_name.to_alipay_dict()
else:
params['field_name'] = self.field_name
if self.field_value:
if hasattr(self.field_value, 'to_alipay_dict'):
params['field_value'] = self.field_value.to_alipay_dict()
else:
params['field_value'] = self.field_value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ExtendFieldInfo()
if 'field_name' in d:
o.field_name = d['field_name']
if 'field_value' in d:
o.field_value = d['field_value']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ExtendFieldInfo.py | ExtendFieldInfo.py | py | 1,401 | python | en | code | 241 | github-code | 13 |
40445234202 | def is_pangram(sentence):
if sentence == "":
return False
alphabet = 'abcdefghijklmnopqrstuvwxyz'
sen_lowercase = sentence.lower()
sen_list = list(sen_lowercase)
alpha_list = list(alphabet)
#test
#I had to define the flag here because test for empty sentange was failling when flag was defined only in the loop
# flag = False
# for i in range(len(alpha_list)):
# for j in range(len(sen_list)):
# flag = False
# if alpha_list[i] == sen_list[j]:
# sen_list.remove(sen_list[j])
# flag = True
# break
# if flag == False:
# print("Nu e frate")
# return False
# print("Blana")
# return True
#test
# I should try to set a flag maybe?????
'''I had to define the flag here because test for
empty sentance was failling when flag was defined
only in the loop'''
#flag = False
for i in alpha_list:
for j in sen_list:
flag = False
if i == j:
sen_list.remove(j)
print("They are equal")
flag = True
break
if flag == False:
print("Nu e frate")
return False
print("Blana")
return True
# print(alpha_list)
# print(sen_list)
# #return "False"
# if flag:
# print("it is true")
# return True
# else:
# #print("it is false")
# return False
# #return 'True'
#is_pangram("abcdefghijklmnopqrstuvwxyz")
#is_pangram("This is a test")
#is_pangram("") | CatalinPetre/Exercism | python/pangram/pangram_raw.py | pangram_raw.py | py | 1,678 | python | en | code | 0 | github-code | 13 |
28663913736 | import sqlite3
import time
class DbStore():
def __init__(self, name):
self.name = name
def create_db(self):
con = sqlite3.connect(str(self.name) + '.db')
cur = con.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS history_message(time TEXT,'
'sender TEXT,'
'receiver TEXT,'
'message TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS contacts(client_id TEXT PRIMARY KEY)')
cur.close()
con.close()
def add_client(self, login): # Добавление пользователя в БД клиента
con = sqlite3.connect(self.name + '.db')
cur = con.cursor()
data = [login]
try:
cur.execute('INSERT INTO contacts VALUES (?)', data)
con.commit()
print("Пользователь %s добавлен" % login)
except:
print('Пользователь %s уже в списке контактов' % login)
cur.close()
con.close()
def del_client(self, login): # Удаление пользователя из БД клиента
con = sqlite3.connect(self.name + '.db')
cur = con.cursor()
cur.execute('SELECT client_id FROM contacts WHERE client_id ="' + str(login) + '"')
result = cur.fetchall()
if result:
cur.execute('DELETE FROM contacts WHERE client_id ="' + str(login) + '"')
con.commit()
print("Пользователь %s удален" % login)
else:
print('Пользователь %s в списке не найден' % login)
cur.close()
con.close()
def show_client(self): # Выборка пользователя из списка контактов клиента
con = sqlite3.connect(self.name + '.db')
cur = con.cursor()
cur.execute('SELECT client_id FROM contacts')
result = cur.fetchall()
nicklist = [i[0] for i in result]
return nicklist
cur.close()
con.close()
def history(self, who, msg): # Запись сообщений в БД клиента
t = time.strftime("%Y-%m-%d-%H.%M.%S", time.localtime())
con = sqlite3.connect(self.name + '.db')
cur = con.cursor()
data = [t, self.name, who, msg]
cur.execute('INSERT INTO history_message VALUES (?,?,?,?)', data)
con.commit()
cur.close()
con.close()
| amakovey/messenger | dbclient.py | dbclient.py | py | 2,578 | python | ru | code | 0 | github-code | 13 |
10999165238 | from utilities import get_random_list
from utilities import timeit
@timeit
def solve_ranked_pythonic(ranked, player):
player_rank = []
unique_sorted_rank = list(set(ranked))
unique_sorted_rank.sort()
i = 0
current_position = len(unique_sorted_rank) + 1
for score in player:
if current_position > 1:
while i < len(unique_sorted_rank) and score >= unique_sorted_rank[i]:
current_position -= 1
i += 1
player_rank.append(current_position)
return player_rank
@timeit
def solve_ranked_efficient(ranking, player):
player_rank = []
player_index = len(player) - 1
ranking_size = len(ranking)
ranking_index = 0
position = 1
while ranking_index < ranking_size and player_index >= 0:
current_player = player[player_index]
ranking_points = ranking[ranking_index]
if current_player >= ranking_points:
player_rank.append(position)
player_index -= 1
else:
ranking_index += 1
if ranking_index < ranking_size and ranking[ranking_index] < ranking[ranking_index - 1]:
position += 1
position += 1
while player_index >= 0:
player_rank.append(position)
player_index -= 1
return player_rank[::-1]
def test_exec_time():
ranked = get_random_list()
player = get_random_list()
solve_ranked_pythonic(ranked, player)
solve_ranked_efficient(ranked, player)
def test_ranked_pythonic():
ranked = [100, 90, 90, 80]
player = [70, 80, 105]
result = solve_ranked_pythonic(ranked, player)
assert result == [4, 3, 1]
def test_ranked_efficient():
ranked = [100, 90, 90, 80]
player = [70, 80, 105]
result = solve_ranked_efficient(ranked, player)
assert result == [4, 3, 1]
| pedrolp85/python_pair_programming | climbing_the_leaderboard.py | climbing_the_leaderboard.py | py | 1,833 | python | en | code | 1 | github-code | 13 |
34795809269 | #!/usr/bin/env/python
# File name : server.py
# Production : PiCar-C
# Website : www.adeept.com
# Author : William
# Date : 2019/11/21
import servo
servo.servo_init()
import socket
import time
import threading
import GUImove as move
import Adafruit_PCA9685
import os
import FPV
import info
import LED
import GUIfindline as findline
import switch
import ultra
import PID
import random
SR_dect = 0
appConnection = 1
Blockly = 0
if SR_dect:
try:
import SR
SR_dect = 1
except:
SR_dect = 0
pass
SR_mode = 0
if appConnection:
try:
import appserver
AppConntect_threading=threading.Thread(target=appserver.app_ctrl) #Define a thread for app ctrl
AppConntect_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
AppConntect_threading.start() #Thread starts
except:
pass
MPU_connection = 1
servo_speed = 5
functionMode = 0
dis_keep = 0.35
goal_pos = 0
tor_pos = 1
mpu_speed = 1
init_get = 0
range_min = 0.55
R_set = 0
G_set = 0
B_set = 0
def start_blockly():
os.system("cd //home/pi/Blockly_picar-c && sudo python3 server.py")
if Blockly:
try:
blockly_threading=threading.Thread(target=start_blockly) #Define a thread for Blockly
blockly_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
blockly_threading.start() #Thread starts
except:
pass
def autoDect(speed):
move.motorStop()
servo.ahead()
time.sleep(0.3)
getMiddle = ultra.checkdist()
print('M%f'%getMiddle)
servo.ahead()
servo.lookleft(100)
time.sleep(0.3)
getLeft = ultra.checkdist()
print('L%f'%getLeft)
servo.ahead()
servo.lookright(100)
time.sleep(0.3)
getRight = ultra.checkdist()
print('R%f'%getRight)
if getMiddle < range_min and min(getLeft, getRight) > range_min:
if random.randint(0,1):
servo.turnLeft()
else:
servo.turnRight()
move.move(speed,'forward')
time.sleep(0.5)
move.motorStop()
elif getLeft < range_min and min(getMiddle, getRight) > range_min:
servo.turnRight(0.7)
move.move(speed,'forward')
time.sleep(0.5)
move.motorStop()
elif getRight < range_min and min(getMiddle, getLeft) > range_min:
servo.turnLeft(0.7)
move.move(speed,'forward')
time.sleep(0.5)
move.motorStop()
elif max(getLeft, getMiddle) < range_min and getRight > range_min:
servo.turnRight()
move.move(speed,'forward')
time.sleep(0.5)
move.motorStop()
elif max(getMiddle, getRight) < range_min and getLeft >range_min:
servo.turnLeft()
move.move(speed, 'forward')
time.sleep(0.5)
move.motorStop()
elif max(getLeft, getMiddle, getRight) < range_min:
move.move(speed,'backward')
time.sleep(0.5)
move.motorStop()
else:
servo.turnMiddle()
move.move(speed,'forward')
time.sleep(0.5)
move.motorStop()
class Servo_ctrl(threading.Thread):
def __init__(self, *args, **kwargs):
super(Servo_ctrl, self).__init__(*args, **kwargs)
self.__flag = threading.Event()
self.__flag.set()
self.__running = threading.Event()
self.__running.set()
def run(self):
global goal_pos, servo_command, init_get, functionMode
while self.__running.isSet():
self.__flag.wait()
if functionMode != 6:
if servo_command == 'lookleft':
servo.lookleft(servo_speed)
elif servo_command == 'lookright':
servo.lookright(servo_speed)
elif servo_command == 'up':
servo.up(servo_speed)
elif servo_command == 'down':
servo.down(servo_speed)
else:
pass
if functionMode == 4:
servo.ahead()
findline.run()
if not functionMode:
move.motorStop()
elif functionMode == 5:
autoDect(50)
if not functionMode:
move.motorStop()
elif functionMode == 6:
if MPU_connection:
accelerometer_data = sensor.get_accel_data()
X_get = accelerometer_data['x']
if not init_get:
goal_pos = X_get
init_get = 1
if servo_command == 'up':
servo.up(servo_speed)
time.sleep(0.2)
accelerometer_data = sensor.get_accel_data()
X_get = accelerometer_data['x']
goal_pos = X_get
elif servo_command == 'down':
servo.down(servo_speed)
time.sleep(0.2)
accelerometer_data = sensor.get_accel_data()
X_get = accelerometer_data['x']
goal_pos = X_get
if abs(X_get-goal_pos)>tor_pos:
if X_get > goal_pos:
servo.down(int(mpu_speed*abs(X_get - goal_pos)))
elif X_get < goal_pos:
servo.up(int(mpu_speed*abs(X_get - goal_pos)))
time.sleep(0.03)
continue
else:
functionMode = 0
try:
self.pause()
except:
pass
time.sleep(0.03)
def pause(self):
self.__flag.clear()
def resume(self):
self.__flag.set()
def stop(self):
self.__flag.set()
self.__running.clear()
class SR_ctrl(threading.Thread):
def __init__(self, *args, **kwargs):
super(SR_ctrl, self).__init__(*args, **kwargs)
self.__flag = threading.Event()
self.__flag.set()
self.__running = threading.Event()
self.__running.set()
def run(self):
global goal_pos, servo_command, init_get, functionMode
while self.__running.isSet():
self.__flag.wait()
if SR_mode:
voice_command = SR.run()
if voice_command == 'forward':
turn.turnMiddle()
move.move(speed_set, 'forward')
time.sleep(1)
move.motorStop()
elif voice_command == 'backward':
turn.turnMiddle()
move.move(speed_set, 'backward')
time.sleep(1)
move.motorStop()
elif voice_command == 'left':
servo.turnLeft()
move.move(speed_set, 'forward')
time.sleep(1)
turn.turnMiddle()
move.motorStop()
elif voice_command == 'right':
servo.turnRight()
move.move(speed_set, 'forward')
time.sleep(1)
turn.turnMiddle()
move.motorStop()
elif voice_command == 'stop':
turn.turnMiddle()
move.motorStop()
else:
self.pause()
def pause(self):
self.__flag.clear()
def resume(self):
self.__flag.set()
def stop(self):
self.__flag.set()
self.__running.clear()
def info_send_client():
SERVER_IP = addr[0]
SERVER_PORT = 2256 #Define port serial
SERVER_ADDR = (SERVER_IP, SERVER_PORT)
Info_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Set connection value for socket
Info_Socket.connect(SERVER_ADDR)
print(SERVER_ADDR)
while 1:
try:
Info_Socket.send((info.get_cpu_tempfunc()+' '+info.get_cpu_use()+' '+info.get_ram_info()+' '+str(servo.get_direction())).encode())
time.sleep(1)
except:
time.sleep(10)
pass
def FPV_thread():
global fpv
fpv=FPV.FPV()
fpv.capture_thread(addr[0])
def ap_thread():
os.system("sudo create_ap wlan0 eth0 Groovy 12345678")
def run():
global servo_move, speed_set, servo_command, functionMode, init_get, R_set, G_set, B_set, SR_mode
servo.servo_init()
move.setup()
findline.setup()
direction_command = 'no'
turn_command = 'no'
servo_command = 'no'
speed_set = 100
rad = 0.5
info_threading=threading.Thread(target=info_send_client) #Define a thread for FPV and OpenCV
info_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
info_threading.start() #Thread starts
servo_move = Servo_ctrl()
servo_move.start()
servo_move.pause()
findline.setup()
while True:
data = ''
data = str(tcpCliSock.recv(BUFSIZ).decode())
if not data:
continue
elif 'forward' == data:
direction_command = 'forward'
move.move(speed_set, direction_command)
elif 'backward' == data:
direction_command = 'backward'
move.move(speed_set, direction_command)
elif 'DS' in data:
direction_command = 'no'
move.move(speed_set, direction_command)
elif 'left' == data:
# turn_command = 'left'
servo.turnLeft()
elif 'right' == data:
# turn_command = 'right'
servo.turnRight()
elif 'TS' in data:
# turn_command = 'no'
servo.turnMiddle()
elif 'Switch_1_on' in data:
switch.switch(1,1)
tcpCliSock.send(('Switch_1_on').encode())
elif 'Switch_1_off' in data:
switch.switch(1,0)
tcpCliSock.send(('Switch_1_off').encode())
elif 'Switch_2_on' in data:
switch.switch(2,1)
tcpCliSock.send(('Switch_2_on').encode())
elif 'Switch_2_off' in data:
switch.switch(2,0)
tcpCliSock.send(('Switch_2_off').encode())
elif 'Switch_3_on' in data:
switch.switch(3,1)
tcpCliSock.send(('Switch_3_on').encode())
elif 'Switch_3_off' in data:
switch.switch(3,0)
tcpCliSock.send(('Switch_3_off').encode())
elif 'function_1_on' in data:
servo.ahead()
time.sleep(0.2)
tcpCliSock.send(('function_1_on').encode())
radar_send = servo.radar_scan()
tcpCliSock.sendall(radar_send.encode())
print(radar_send)
time.sleep(0.3)
tcpCliSock.send(('function_1_off').encode())
elif 'function_2_on' in data:
functionMode = 2
fpv.FindColor(1)
tcpCliSock.send(('function_2_on').encode())
elif 'function_3_on' in data:
functionMode = 3
fpv.WatchDog(1)
tcpCliSock.send(('function_3_on').encode())
elif 'function_4_on' in data:
functionMode = 4
servo_move.resume()
tcpCliSock.send(('function_4_on').encode())
elif 'function_5_on' in data:
functionMode = 5
servo_move.resume()
tcpCliSock.send(('function_5_on').encode())
elif 'function_6_on' in data:
if MPU_connection:
functionMode = 6
servo_move.resume()
tcpCliSock.send(('function_6_on').encode())
#elif 'function_1_off' in data:
# tcpCliSock.send(('function_1_off').encode())
elif 'function_2_off' in data:
functionMode = 0
fpv.FindColor(0)
switch.switch(1,0)
switch.switch(2,0)
switch.switch(3,0)
tcpCliSock.send(('function_2_off').encode())
elif 'function_3_off' in data:
functionMode = 0
fpv.WatchDog(0)
tcpCliSock.send(('function_3_off').encode())
elif 'function_4_off' in data:
functionMode = 0
servo_move.pause()
move.motorStop()
tcpCliSock.send(('function_4_off').encode())
elif 'function_5_off' in data:
functionMode = 0
servo_move.pause()
move.motorStop()
tcpCliSock.send(('function_5_off').encode())
elif 'function_6_off' in data:
functionMode = 0
servo_move.pause()
move.motorStop()
init_get = 0
tcpCliSock.send(('function_6_off').encode())
elif 'lookleft' == data:
servo_command = 'lookleft'
servo_move.resume()
elif 'lookright' == data:
servo_command = 'lookright'
servo_move.resume()
elif 'up' == data:
servo_command = 'up'
servo_move.resume()
elif 'down' == data:
servo_command = 'down'
servo_move.resume()
elif 'stop' == data:
if not functionMode:
servo_move.pause()
servo_command = 'no'
pass
elif 'home' == data:
servo.ahead()
elif 'CVrun' == data:
if not FPV.CVrun:
FPV.CVrun = 1
tcpCliSock.send(('CVrun_on').encode())
else:
FPV.CVrun = 0
tcpCliSock.send(('CVrun_off').encode())
elif 'wsR' in data:
try:
set_R=data.split()
R_set = int(set_R[1])
led.colorWipe(R_set, G_set, B_set)
except:
pass
elif 'wsG' in data:
try:
set_G=data.split()
G_set = int(set_G[1])
led.colorWipe(R_set, G_set, B_set)
except:
pass
elif 'wsB' in data:
try:
set_B=data.split()
B_set = int(set_B[1])
led.colorWipe(R_set, G_set, B_set)
except:
pass
elif 'pwm0' in data:
try:
set_pwm0=data.split()
pwm0_set = int(set_pwm0[1])
servo.setPWM(0, pwm0_set)
except:
pass
elif 'pwm1' in data:
try:
set_pwm1=data.split()
pwm1_set = int(set_pwm1[1])
servo.setPWM(1, pwm1_set)
except:
pass
elif 'pwm2' in data:
try:
set_pwm2=data.split()
pwm2_set = int(set_pwm2[1])
servo.setPWM(2, pwm2_set)
except:
pass
elif 'Speed' in data:
try:
set_speed=data.split()
speed_set = int(set_speed[1])
except:
pass
elif 'Save' in data:
try:
servo.saveConfig()
except:
pass
elif 'CVFL' in data:
if not FPV.FindLineMode:
FPV.FindLineMode = 1
tcpCliSock.send(('CVFL_on').encode())
else:
move.motorStop()
FPV.FindLineMode = 0
tcpCliSock.send(('CVFL_off').encode())
elif 'Render' in data:
if FPV.frameRender:
FPV.frameRender = 0
else:
FPV.frameRender = 1
elif 'WBswitch' in data:
if FPV.lineColorSet == 255:
FPV.lineColorSet = 0
else:
FPV.lineColorSet = 255
elif 'lip1' in data:
try:
set_lip1=data.split()
lip1_set = int(set_lip1[1])
FPV.linePos_1 = lip1_set
except:
pass
elif 'lip2' in data:
try:
set_lip2=data.split()
lip2_set = int(set_lip2[1])
FPV.linePos_2 = lip2_set
except:
pass
elif 'err' in data:
try:
set_err=data.split()
err_set = int(set_err[1])
FPV.findLineError = err_set
except:
pass
elif 'FCSET' in data:
FCSET = data.split()
fpv.colorFindSet(int(FCSET[1]), int(FCSET[2]), int(FCSET[3]))
elif 'setEC' in data:#Z
ECset = data.split()
try:
fpv.setExpCom(int(ECset[1]))
except:
pass
elif 'defEC' in data:#Z
fpv.defaultExpCom()
elif 'police' in data:
if LED.ledfunc != 'police':
tcpCliSock.send(('rainbow_off').encode())
LED.ledfunc = 'police'
ledthread.resume()
tcpCliSock.send(('police_on').encode())
elif LED.ledfunc == 'police':
LED.ledfunc = ''
ledthread.pause()
tcpCliSock.send(('police_off').encode())
elif 'rainbow' in data:
if LED.ledfunc != 'rainbow':
tcpCliSock.send(('police_off').encode())
LED.ledfunc = 'rainbow'
ledthread.resume()
tcpCliSock.send(('rainbow_on').encode())
elif LED.ledfunc == 'rainbow':
LED.ledfunc = ''
ledthread.pause()
tcpCliSock.send(('rainbow_off').encode())
elif 'sr' in data:
if not SR_mode:
if SR_dect:
SR_mode = 1
tcpCliSock.send(('sr_on').encode())
sr.resume()
elif SR_mode:
SR_mode = 0
sr.pause()
move.motorStop()
tcpCliSock.send(('sr_off').encode())
else:
pass
print(data)
def wifi_check():
try:
s =socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(("1.1.1.1",80))
ipaddr_check=s.getsockname()[0]
s.close()
print(ipaddr_check)
except:
ap_threading=threading.Thread(target=ap_thread) #Define a thread for data receiving
ap_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ap_threading.start() #Thread starts
led.colorWipe(0,16,50)
time.sleep(1)
led.colorWipe(0,16,100)
time.sleep(1)
led.colorWipe(0,16,150)
time.sleep(1)
led.colorWipe(0,16,200)
time.sleep(1)
led.colorWipe(0,16,255)
time.sleep(1)
led.colorWipe(35,255,35)
if __name__ == '__main__':
servo.servo_init()
switch.switchSetup()
switch.set_all_switch_off()
HOST = ''
PORT = 10223 #Define port serial
BUFSIZ = 1024 #Define buffer size
ADDR = (HOST, PORT)
# try:
led = LED.LED()
led.colorWipe(255,16,0)
ledthread = LED.LED_ctrl()
ledthread.start()
# except:
# print('Use "sudo pip3 install rpi_ws281x" to install WS_281x package')
# pass
if SR_dect:
sr = SR_ctrl()
sr.start()
while 1:
wifi_check()
try:
tcpSerSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpSerSock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
tcpSerSock.bind(ADDR)
tcpSerSock.listen(5) #Start server,waiting for client
print('waiting for connection...')
tcpCliSock, addr = tcpSerSock.accept()
print('...connected from :', addr)
# fpv=FPV.FPV()
# fps_threading=threading.Thread(target=FPV_thread) #Define a thread for FPV and OpenCV
# fps_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
# fps_threading.start() #Thread starts
break
except:
led.colorWipe(0,0,0)
try:
led.colorWipe(0,80,255)
except:
pass
fpv=FPV.FPV()
fps_threading=threading.Thread(target=FPV_thread) #Define a thread for FPV and OpenCV
fps_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
fps_threading.start() #Thread starts
run()
try:
run()
except:
servo_move.stop()
led.colorWipe(0,0,0)
servo.clean_all()
move.destroy()
| adeept/adeept_picar-b | server/server.py | server.py | py | 16,211 | python | en | code | 21 | github-code | 13 |
17045551064 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.FliggyPoiInfo import FliggyPoiInfo
class AlipayOverseasTravelFliggyPoiCreateModel(object):
def __init__(self):
self._data_version = None
self._ext_info = None
self._global_shop_id = None
self._poi_data = None
self._request_id = None
self._task_subtype = None
@property
def data_version(self):
return self._data_version
@data_version.setter
def data_version(self, value):
self._data_version = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def global_shop_id(self):
return self._global_shop_id
@global_shop_id.setter
def global_shop_id(self, value):
self._global_shop_id = value
@property
def poi_data(self):
return self._poi_data
@poi_data.setter
def poi_data(self, value):
if isinstance(value, FliggyPoiInfo):
self._poi_data = value
else:
self._poi_data = FliggyPoiInfo.from_alipay_dict(value)
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
@property
def task_subtype(self):
return self._task_subtype
@task_subtype.setter
def task_subtype(self, value):
self._task_subtype = value
def to_alipay_dict(self):
params = dict()
if self.data_version:
if hasattr(self.data_version, 'to_alipay_dict'):
params['data_version'] = self.data_version.to_alipay_dict()
else:
params['data_version'] = self.data_version
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.global_shop_id:
if hasattr(self.global_shop_id, 'to_alipay_dict'):
params['global_shop_id'] = self.global_shop_id.to_alipay_dict()
else:
params['global_shop_id'] = self.global_shop_id
if self.poi_data:
if hasattr(self.poi_data, 'to_alipay_dict'):
params['poi_data'] = self.poi_data.to_alipay_dict()
else:
params['poi_data'] = self.poi_data
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
if self.task_subtype:
if hasattr(self.task_subtype, 'to_alipay_dict'):
params['task_subtype'] = self.task_subtype.to_alipay_dict()
else:
params['task_subtype'] = self.task_subtype
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOverseasTravelFliggyPoiCreateModel()
if 'data_version' in d:
o.data_version = d['data_version']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'global_shop_id' in d:
o.global_shop_id = d['global_shop_id']
if 'poi_data' in d:
o.poi_data = d['poi_data']
if 'request_id' in d:
o.request_id = d['request_id']
if 'task_subtype' in d:
o.task_subtype = d['task_subtype']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayOverseasTravelFliggyPoiCreateModel.py | AlipayOverseasTravelFliggyPoiCreateModel.py | py | 3,684 | python | en | code | 241 | github-code | 13 |
73907473938 | """final
Revision ID: 31fff8168895
Revises:
Create Date: 2023-10-07 23:14:02.633868
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '31fff8168895'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('book',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=100), nullable=False),
sa.Column('isbn_13', sa.String(), nullable=True),
sa.Column('author', sa.String(length=100), nullable=True),
sa.Column('price', sa.Float(), nullable=True),
sa.Column('image', sa.String(length=100), nullable=True),
sa.Column('publisher', sa.String(length=100), nullable=True),
sa.Column('published', sa.Date(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('category', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('isbn_13')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('_password_hash', sa.String(), nullable=True),
sa.Column('email', sa.String(length=100), nullable=False),
sa.Column('full_name', sa.String(length=100), nullable=False),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('address',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('street', sa.String(length=255), nullable=False),
sa.Column('city', sa.String(length=100), nullable=False),
sa.Column('state', sa.String(length=50), nullable=False),
sa.Column('postal_code', sa.String(length=20), nullable=False),
sa.Column('country', sa.String(length=100), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_address_user_id_user')),
sa.PrimaryKeyConstraint('id')
)
op.create_table('cart_item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('quantity', sa.Integer(), nullable=True),
sa.Column('added_date', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('book_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['book.id'], name=op.f('fk_cart_item_book_id_book')),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_cart_item_user_id_user')),
sa.PrimaryKeyConstraint('id')
)
op.create_table('library_books',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('book_id', sa.Integer(), nullable=True),
sa.Column('date_added', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['book.id'], name=op.f('fk_library_books_book_id_book')),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_library_books_user_id_user'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('library_books')
op.drop_table('cart_item')
op.drop_table('address')
op.drop_table('user')
op.drop_table('book')
# ### end Alembic commands ###
| Renardo1985/BookShop | server/migrations/versions/31fff8168895_final.py | 31fff8168895_final.py | py | 3,272 | python | en | code | 0 | github-code | 13 |
71497083219 | # 언어 : Python
# 날짜 : 2022.1.20
# 문제 : BOJ > 가장 먼 노드 (https://programmers.co.kr/learn/courses/30/lessons/49189)
# 레벨 : level 3
# =====================================================================================
from collections import deque, defaultdict, Counter
def solution(n, edge):
distance = [float("inf") for _ in range(n + 1)]
distance[0] = -1
visited = [False for _ in range(n + 1)]
queue = deque()
queue.append([0, 1])
dict = defaultdict(list)
for e1, e2 in edge:
dict[e1].append(e2)
dict[e2].append(e1)
while queue:
w, node = queue.popleft()
if not visited[node]:
visited[node] = True
distance[node] = min(distance[node], w)
for neighbor in dict[node]:
if not visited[neighbor]:
queue.append([w + 1, neighbor])
counter = Counter(distance)
return counter[max(distance)]
n = 6
edge = [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]
result = solution(n, edge)
print(result) | eunseo-kim/Algorithm | programmers/코딩테스트 고득점 Kit/그래프/01_가장 먼 노드.py | 01_가장 먼 노드.py | py | 1,072 | python | en | code | 1 | github-code | 13 |
20619606030 | from utils import *
data1 = pd.read_excel('附件表/附件1-商家历史出货量表.xlsx', engine = 'openpyxl')
data2 = pd.read_excel('附件表/附件2-商品信息表.xlsx', engine = 'openpyxl')
data3 = pd.read_excel('附件表/附件3-商家信息表.xlsx', engine = 'openpyxl')
data4 = pd.read_excel('附件表/附件4-仓库信息表.xlsx', engine = 'openpyxl')
data = pd.merge(data1,data2)
data = pd.merge(data,data3)
data = pd.merge(data,data4)
data = data.sort_values(by=['seller_no', 'product_no', 'warehouse_no', 'date'])
data['qty'].interpolate(method='linear', inplace=True)
seller_dict = {f'seller_{i}': i for i in range(38)}
product_dict = {f'product_{i}': i for i in range(2001)}
warehouse_dict = {f'wh_{i}': i for i in range(60)}
seller_category_dict = {'宠物健康':0,'宠物生活':1,'厨具':2,'电脑、办公':3,'服饰内衣':4,'个人护理':5,'家居日用':6,'家具':7,'家用电器':8,'家装建材':9,'居家生活':10,'美妆护肤':11,'食品饮料':12,'手机通讯':13,'数码':14,'玩具乐器':15,'医疗保健':16}
inventory_category_dict = {'A':0,'B':1,'C':2,'D':3}
seller_level_dict = {'Large':0,'Medium':1,'New':2,'Small':3,'Special':4}
warehouse_category_dict = {'区域仓':0,'中心仓':1}
warehouse_region = {'东北':0,'华北':1,'华东':2,'华南':3,'华中':4,'西北':5,'西南':6}
mapped_data=data
mapped_data['seller_no'] = mapped_data['seller_no'].map(seller_dict)
mapped_data['product_no'] = mapped_data['product_no'].map(product_dict)
mapped_data['warehouse_no'] = mapped_data['warehouse_no'].map(warehouse_dict)
mapped_data['seller_category'] = mapped_data['seller_category'].map(seller_category_dict)
mapped_data['inventory_category'] = mapped_data['inventory_category'].map(inventory_category_dict)
mapped_data['seller_level'] = mapped_data['seller_level'].map(seller_level_dict)
mapped_data['warehouse _category'] = mapped_data['warehouse _category'].map(warehouse_category_dict)
mapped_data['warehouse _region'] = mapped_data['warehouse _region'].map(warehouse_region)
columns_to_drop = ['date', 'category1', 'category2','category3']
mapped_data.drop(columns=columns_to_drop, inplace=True)
grouped = mapped_data.groupby(['seller_no', 'product_no', 'warehouse_no'])
grouped = filter(grouped)
averages = np.zeros((1996, 9))
for i in range(1996):
group = grouped[i]
for j in range(9):
column_average = np.mean(group.iloc[:, j])
averages[i, j] = column_average
averages_new = averages[:, 3:]
sse = []
k_values = range(1, 11)
for k in k_values:
kmeans = KMeans(n_clusters=k)
kmeans.fit(averages_new)
sse.append(kmeans.inertia_)
kmeans = KMeans(n_clusters=2)
labels = kmeans.fit_predict(averages_new)
centers = kmeans.cluster_centers_
print(labels)
print(centers)
plt.scatter(averages_new[:, 0], averages_new[:, 1], c=labels)
plt.scatter(centers[:, 0], centers[:, 1], marker='x', c='red')
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.title('Clustering Results')
plt.show() | Andd54/Mathor_Cup_Project | Question1(2).py | Question1(2).py | py | 2,985 | python | en | code | 0 | github-code | 13 |
12740657893 | from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import MultiStepLR
import torch
import torchvision.datasets as dset
import torchvision.transforms as transforms
import gpytorch
from deep_gp.models.deep_kernel_model import DKLModel, DenseNetFeatureExtractor
normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408], std=[0.2675, 0.2565, 0.2761])
aug_trans = [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()]
common_trans = [transforms.ToTensor(), normalize]
train_compose = transforms.Compose(aug_trans + common_trans)
test_compose = transforms.Compose(common_trans)
# Create Dataloaders
dataset = 'cifar10'
if dataset == 'cifar10':
d_func = dset.CIFAR10
train_set = dset.CIFAR10('data', train=True, transform=train_compose, download=True)
test_set = dset.CIFAR10('data', train=False, transform=test_compose)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=256, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=256, shuffle=False)
num_classes = 10
elif dataset == 'cifar100':
d_func = dset.CIFAR100
train_set = dset.CIFAR100('data', train=True, transform=train_compose, download=True)
test_set = dset.CIFAR100('data', train=False, transform=test_compose)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=256, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=256, shuffle=False)
num_classes = 100
else:
raise RuntimeError('dataset must be one of "cifar100" or "cifar10"')
feature_extractor = DenseNetFeatureExtractor(block_config=(6, 6, 6), n_channels=3, num_classes=num_classes).cuda()
num_features = feature_extractor.classifier.in_features
model = DKLModel(feature_extractor, num_dim=num_features).cuda()
likelihood = gpytorch.likelihoods.SoftmaxLikelihood(num_features=model.num_dim, n_classes=num_classes).cuda()
# Define Training and Testing
n_epochs = 300
lr = 0.1
optimizer = SGD([
{'params': model.feature_extractor.parameters()},
{'params': model.gp_layer.hyperparameters(), 'lr': lr * 0.01},
{'params': model.gp_layer.variational_parameters()},
{'params': likelihood.parameters()},
], lr=lr, momentum=0.9, nesterov=True, weight_decay=0)
scheduler = MultiStepLR(optimizer, milestones=[0.5 * n_epochs, 0.75 * n_epochs], gamma=0.1)
def train(epoch):
model.train()
likelihood.train()
mll = gpytorch.mlls.VariationalELBO(likelihood, model.gp_layer, num_data=len(train_loader.dataset))
train_loss = 0.
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = -mll(output, target)
loss.backward()
optimizer.step()
if (batch_idx + 1) % 25 == 0:
print('Train Epoch: %d [%03d/%03d], Loss: %.6f' % (epoch, batch_idx + 1, len(train_loader), loss.item()))
def test():
import torch
model.eval()
likelihood.eval()
correct = 0
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
with torch.no_grad():
output = likelihood(model(data))
pred = output.probs.argmax(1)
correct += pred.eq(target.view_as(pred)).cpu().sum()
print('Test set: Accuracy: {}/{} ({}%)'.format(
correct, len(test_loader.dataset), 100. * correct / float(len(test_loader.dataset))
))
# Train the Model
for epoch in range(1, n_epochs + 1):
scheduler.step()
with gpytorch.settings.use_toeplitz(False), gpytorch.settings.max_preconditioner_size(0):
train(epoch)
test()
state_dict = model.state_dict()
likelihood_state_dict = likelihood.state_dict()
torch.save({'model': state_dict, 'likelihood': likelihood_state_dict}, 'dkl_cifar_checkpoint.dat')
| AlbertoCastelo/bayesian-dl-medical-diagnosis | tests/test_gpytorch.py | test_gpytorch.py | py | 3,855 | python | en | code | 0 | github-code | 13 |
43231703074 |
import sys
import random
import numpy as np
import pygame as pg
import vars
sys.path.append('./')
try:
from Graph_package.MovableVertex import MovableVertex2D, interact_manager
from Graph_package.Graph2D import Graph2D, InteractiveGraph2D
from GUI_package.Pygame_package import Graph_drawer
from RSA.RoadSearchAlgorithm import RSA
except Exception as e:
assert (e)
def main():
MovableVertex2D.R = vars.VERTEX_R
clock = pg.time.Clock()
screen: pg.Surface = pg.display.set_mode((vars.WIDTH, vars.HEIGHT))
N = vars.N
g = InteractiveGraph2D()
g.set_from_list(
[[random.uniform(30, vars.WIDTH-30), random.uniform(30, vars.HEIGHT-30)] for i in range(N)])
rsa = RSA(vars.Q, vars.ALPHA, vars.BETA,
vars.GAMMA, vars.PH_R, vars.PR_C)
rsa.set_graph(g)
rsa.set_vertex_priority(np.random.randint(1, 100, N))
interact_manager.curr_v = None
interact_manager.pressed = False
while True:
for e in pg.event.get():
if e.type == pg.QUIT:
pg.quit()
sys.exit()
dxy = pg.mouse.get_rel()
interact_manager(e, rsa.G.V, dxy)
if e.type == pg.KEYDOWN and pg.key.get_pressed()[pg.K_UP]:
rsa.iterate()
if e.type == pg.KEYDOWN and pg.key.get_pressed()[pg.K_r]:
rsa.reset_pheromone()
if e.type == pg.KEYDOWN and pg.key.get_pressed()[pg.K_p]:
rsa.set_vertex_priority(np.random.randint(1, 100, N))
rsa.reset_pheromone()
screen.fill(vars.colors.BLACK)
rsa.G.update_edges()
Graph_drawer.draw_graph(
screen, N, rsa.G.vertex_as_list, rsa.get_result(), rsa.vertex_priority)
rsa.iterate()
clock.tick(vars.FPS)
pg.display.update()
if __name__ == '__main__':
main()
| VY354/my_repository | Python/projects/swarm_intelligence/road_search_algorithm/main.py | main.py | py | 1,876 | python | en | code | 0 | github-code | 13 |
4883186627 | from sqlalchemy import create_engine, text
# Database engine
engine = create_engine("sqlite:///rocketpool.db")
# Query the database directly with raw SQL
with engine.connect() as connection:
result = connection.execute(text("SELECT id, slug FROM protocol_topics"))
# Construct URLs and store them in a list
# Note: result rows are tuples, so use indices to access elements
urls = [f"https://dao.rocketpool.net/t/{row[1]}/{row[0]}.json" for row in result]
# Print out the list of URLs
print("List of URLs:")
for url in urls:
print(url)
from sqlalchemy import (
create_engine,
Column,
Integer,
String,
DateTime,
JSON,
Float,
Boolean,
)
from sqlalchemy.orm import declarative_base, sessionmaker
import requests
import datetime
# SQLAlchemy Model
Base = declarative_base()
class ProtocolTopicsPost(Base):
__tablename__ = "protocol_topics_post"
id = Column(Integer, primary_key=True)
name = Column(String)
username = Column(String)
created_at = Column(DateTime)
cooked = Column(String)
post_number = Column(Integer)
reply_to_post_number = Column(Integer, nullable=True)
updated_at = Column(DateTime)
incoming_link_count = Column(Integer)
reads = Column(Integer)
readers_count = Column(Integer)
score = Column(Float)
topic_id = Column(Integer)
topic_slug = Column(String)
user_id = Column(Integer)
user_title = Column(String, nullable=True)
trust_level = Column(Integer)
moderator = Column(Boolean, nullable=True)
admin = Column(Boolean, nullable=True)
staff = Column(Boolean, nullable=True)
stream = Column(JSON) # Storing as JSON
# Define a session
engine = create_engine("sqlite:///rocketpool.db")
# Create the table
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
# Fetch JSON data for each URL
json_responses = []
for url in urls:
response = requests.get(url)
if response.status_code == 200:
json_responses.append(response.json())
else:
print(
f"Failed to retrieve data for URL {url}: HTTP Status Code",
response.status_code,
)
# Process each JSON response and insert data into the database
session = Session()
# Insert JSON data into the database
for json_data in json_responses:
posts_data = json_data.get("post_stream", {}).get("posts", [])
for post in posts_data:
try:
post_entry = ProtocolTopicsPost(
id=post.get("id"),
name=post.get("name"),
username=post.get("username"),
created_at=datetime.datetime.fromisoformat(
post["created_at"].rstrip("Z")
)
if post.get("created_at")
else None,
cooked=post.get("cooked"),
post_number=post.get("post_number"),
reply_to_post_number=post.get("reply_to_post_number"),
updated_at=datetime.datetime.fromisoformat(
post["updated_at"].rstrip("Z")
)
if post.get("updated_at")
else None,
incoming_link_count=post.get("incoming_link_count"),
reads=post.get("reads"),
readers_count=post.get("readers_count"),
score=post.get("score"),
topic_id=post.get("topic_id"),
topic_slug=post.get("topic_slug"),
user_id=post.get("user_id"),
user_title=post.get("user_title"),
trust_level=post.get("trust_level"),
moderator=post.get("moderator"),
admin=post.get("admin"),
staff=post.get("staff"),
stream=post.get("stream"),
)
session.add(post_entry)
except Exception as e:
print(f"Error inserting post data: {e}")
session.commit()
session.close()
| PaulApivat/data_engineer | practice/discourse/rocketpool/pipeline/post_model.py | post_model.py | py | 3,951 | python | en | code | 0 | github-code | 13 |
36681894515 | from typing import List
import numpy as np
from reward_shaping.core.reward import RewardFunction
from reward_shaping.core.utils import clip_and_norm
from reward_shaping.envs.lunar_lander.specs import get_all_specs
gamma = 1.0
def safety_collision_potential(state, info):
assert "collision" in state
return int(state["collision"] <= 0)
def safety_exit_potential(state, info):
assert "x" in state and "x_limit" in info
return int(abs(state["x"]) <= info["x_limit"])
def target_dist_to_goal_potential(state, info):
dist_goal = np.linalg.norm([state["x"] - info["x_target"], state["y"] - info["y_target"]])
return 1.0 - clip_and_norm(dist_goal, 0, 1.5)
def comfort_angle_potential(state, info):
return 1 - clip_and_norm(abs(state["angle"]), info["angle_limit"], 1.0)
def comfort_angvel_potential(state, info):
return 1 - clip_and_norm(abs(state["angle_speed"]), info["angle_speed_limit"], 1.0)
def simple_base_reward(state, info):
dist_x = info["halfwidth_landing_area"] - abs(state["x"])
dist_y = info["landing_height"] - abs(state["y"])
return 1.0 if min(dist_x, dist_y) >= 0 else 0.0
class LLHierarchicalShapingOnSparseTargetReward(RewardFunction):
def _safety_potential(self, state, info):
collision_reward = safety_collision_potential(state, info)
exit_reward = safety_exit_potential(state, info)
return collision_reward + exit_reward
def _target_potential(self, state, info):
target_reward = target_dist_to_goal_potential(state, info)
# hierarchical weights
safety_weight = safety_collision_potential(state, info) * safety_exit_potential(state, info)
return safety_weight * target_reward
def _comfort_potential(self, state, info):
angle_reward = comfort_angle_potential(state, info)
angvel_reward = comfort_angvel_potential(state, info)
# hierarchical weights
safety_weight = safety_collision_potential(state, info) * safety_exit_potential(state, info)
target_weight = target_dist_to_goal_potential(state, info)
return safety_weight * target_weight * (angle_reward + angvel_reward)
def __call__(self, state, action=None, next_state=None, info=None) -> float:
base_reward = simple_base_reward(next_state, info)
if info["done"]:
return base_reward
# hierarchical shaping
shaping_safety = gamma * self._safety_potential(next_state, info) - self._safety_potential(state, info)
shaping_target = gamma * self._target_potential(next_state, info) - self._target_potential(state, info)
shaping_comfort = gamma * self._comfort_potential(next_state, info) - self._comfort_potential(state, info)
return base_reward + shaping_safety + shaping_target + shaping_comfort
class LLHierarchicalShapingOnSparseTargetRewardNoComfort(RewardFunction):
def _safety_potential(self, state, info):
collision_reward = safety_collision_potential(state, info)
exit_reward = safety_exit_potential(state, info)
return collision_reward + exit_reward
def _target_potential(self, state, info):
target_reward = target_dist_to_goal_potential(state, info)
# hierarchical weights
safety_weight = safety_collision_potential(state, info) * safety_exit_potential(state, info)
return safety_weight * target_reward
def __call__(self, state, action=None, next_state=None, info=None) -> float:
base_reward = simple_base_reward(next_state, info)
if info["done"]:
return base_reward
# hierarchical shaping
shaping_safety = gamma * self._safety_potential(next_state, info) - self._safety_potential(state, info)
shaping_target = gamma * self._target_potential(next_state, info) - self._target_potential(state, info)
return base_reward + shaping_safety + shaping_target
class LLScalarizedMultiObjectivization(RewardFunction):
def __init__(self, weights: List[float], **kwargs):
assert len(weights) == len(get_all_specs()), f"nr weights ({len(weights)}) != nr reqs {len(get_all_specs())}"
assert (sum(weights) - 1.0) <= 0.0001, f"sum of weights ({sum(weights)}) != 1.0"
self._weights = weights
def __call__(self, state, action=None, next_state=None, info=None) -> float:
base_reward = simple_base_reward(next_state, info)
if info["done"]:
return base_reward
# evaluate individual shaping functions
shaping_collision = gamma * safety_collision_potential(next_state, info) - safety_collision_potential(state, info)
shaping_exit = gamma * safety_exit_potential(next_state, info) - safety_exit_potential(state, info)
shaping_target = gamma * target_dist_to_goal_potential(next_state, info) - target_dist_to_goal_potential(state, info)
shaping_comf_ang = gamma * comfort_angle_potential(next_state, info) - comfort_angle_potential(state, info)
shaping_comf_angvel = gamma * comfort_angvel_potential(next_state, info) - comfort_angvel_potential(state, info)
# linear scalarization of the multi-objectivized requirements
reward = base_reward
for w, f in zip(self._weights, [shaping_collision, shaping_exit, shaping_target, shaping_comf_ang, shaping_comf_angvel]):
reward += w * f
return reward
class LLUniformScalarizedMultiObjectivization(LLScalarizedMultiObjectivization):
def __init__(self, **kwargs):
weights = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
weights /= np.sum(weights)
super(LLUniformScalarizedMultiObjectivization, self).__init__(weights=weights, **kwargs)
class LLDecreasingScalarizedMultiObjectivization(LLScalarizedMultiObjectivization):
def __init__(self, **kwargs):
"""
weights selected according to the class:
- safety reqs have weight 1.0
- target req has weight 0.5
- comfort reqs have weight 0.25
"""
weights = np.array([1.0, 1.0, 0.5, 0.25, 0.25])
weights /= np.sum(weights)
super(LLDecreasingScalarizedMultiObjectivization, self).__init__(weights=weights, **kwargs)
| EdAlexAguilar/reward_shaping | reward_shaping/envs/lunar_lander/rewards/potential.py | potential.py | py | 6,192 | python | en | code | 0 | github-code | 13 |
74128043539 | import cv2
import os
import numpy as np
import face_recognition as fr
import time
from facerec import face_data_encodings
from vars import *
dataset = os.listdir(folder_name)
dataset = dataset[1:]
checked = [False]*len(dataset)
test_images = os.listdir(test_set)
def isthere(ret):
for i in range(len(ret)):
if ret[i]:
checked[i] = True
print(dataset[i].split('.')[0])
break
return True
# for checking efficiency
# start = time.time()
for i in test_images:
curr_image = cv2.imread(f'./{test_set}/{i}')
faces = fr.face_locations(curr_image)
print(len(faces))
# faces = np.ndarray(faces)
for face in faces:
a,b,c,d = face
frame = curr_image[a:c,d:b]
curr_encoding = fr.face_encodings(frame)[0]
# print(fr.face_encodings(frame))
ret = fr.compare_faces(face_data_encodings, curr_encoding)
print(ret)
if isthere(ret):
break
# print(time.time()-start)
# d,a b,c heloloi | Charan2k/rec-face | prediction.py | prediction.py | py | 1,020 | python | en | code | 0 | github-code | 13 |
74388856017 | # Tetris en Python
# Desarrollado por Santiago Menendez, pero no llegue a los 40 minutos permitidos por lo que no quede
import os
import random
import time
import keyboard
# Blocks
class Block:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
self.shape = []
self.x_shape = 0
self.y_shape = 0
def move(self, move):
if move == "down":
self.y += 1
elif move == "left":
self.x -= 1
elif move == "right":
self.x += 1
else:
self.y += 1
def rotate(self):
# Rotate shape to right
self.shape = list(zip(*self.shape[::-1]))
# Update shape size
self.x_shape, self.y_shape = self.y_shape, self.x_shape
def __str__(self):
return str(self.__class__.__name__)
class OShape(Block):
def __init__(self, x=0, y=0):
super().__init__(x, y)
self.shape = [[1, 1], [1, 1]]
self.x_shape = 2
self.y_shape = 2
class IShape(Block):
def __init__(self, x=0, y=0):
super().__init__(x, y)
self.shape = [[1], [1], [1], [1]]
self.x_shape = 1
self.y_shape = 4
class JShape(Block):
def __init__(self, x=0, y=0):
super().__init__(x, y)
self.shape = [[0, 1], [0, 1], [1, 1]]
self.x_shape = 2
self.y_shape = 3
class LShape(Block):
def __init__(self, x=0, y=0):
super().__init__(x, y)
self.shape = [[1, 0], [1, 0], [1, 1]]
self.x_shape = 2
self.y_shape = 3
class TShape(Block):
def __init__(self, x=0, y=0):
super().__init__(x, y)
self.shape = [[1, 1, 1], [0, 1, 0]]
self.x_shape = 3
self.y_shape = 2
class SShape(Block):
def __init__(self, x=0, y=0):
super().__init__(x, y)
self.shape = [[0, 1, 1], [1, 1, 0]]
self.x_shape = 3
self.y_shape = 2
class ZShape(Block):
def __init__(self, x=0, y=0):
super().__init__(x, y)
self.shape = [[1, 1, 0], [0, 1, 1]]
self.x_shape = 3
self.y_shape = 2
class Game:
def __init__(self):
self.x_table = 10
self.y_table = 15
self.y_start_block = 0
self.table = [
[" " for _ in range(self.x_table)]
for _ in range(self.y_table + self.y_start_block)
]
self.lines = 0
self.velocity = 5
self.velocity_ticks = 5
self.lose = False
self.block = None
self.next_block = None
self.ticks = 0
self.moved = True
def start(self):
self.block = self.generate_block(self.x_table // 2, 0)
self.next_block = self.generate_block(self.x_table // 2, 0)
while not self.lose:
self.update()
time.sleep(0.1)
def delete_lines(self):
# Delete lines
lines_deleted = 0
for i in range(0, self.y_table):
delete_line = True
for j in range(0, self.x_table):
if self.table[i][j] != "0":
delete_line = False
break
if delete_line:
self.lines += 1
lines_deleted += 1
for k in range(i, 0, -1):
for j in range(0, self.x_table):
self.table[k][j] = self.table[k - 1][j]
for j in range(0, self.x_table):
self.table[0][j] = " "
return lines_deleted
def clear_table(self):
# Clear table
for i in range(self.y_table):
for j in range(self.x_table):
if self.table[i][j] != "0":
self.table[i][j] = " "
def draw(self):
# Clear console
os.system("cls")
# Print top border
for j in range(self.x_table + 2):
print("-", end="")
print()
# Print table
for i in range(self.y_start_block, self.y_table):
for j in range(self.x_table):
if j == 0:
print("|", end="")
print(self.table[i][j], end="")
if j == self.x_table - 1:
print("|", end="")
print()
# Print bottom border and info
for j in range(self.x_table + 2):
print("-", end="")
print()
print("Next block: " + str(self.next_block))
print(
"Lines: "
+ str(self.lines)
+ " | Ticks: "
+ str(self.ticks)
+ " | Velocity: "
+ str(self.velocity)
)
if self.block is not None:
print("Block coords: " + str(self.block.x) + ", " + str(self.block.y))
if self.lose:
print("You lose!")
def generate_block(self, x, y):
if self.next_block is not None:
self.block = self.next_block
return random.choice(
[
JShape(x, y),
LShape(x, y),
OShape(x, y),
IShape(x, y),
TShape(x, y),
SShape(x, y),
ZShape(x, y),
]
)
def update(self):
# Update ticks
self.ticks += 1
self.velocity_ticks -= 1
lines_deleted = self.delete_lines()
if lines_deleted > 0 and self.lines % 10 == 0 and self.velocity > 1:
self.velocity -= 1
self.clear_table()
if self.lose:
return
# Move or generate new block
if self.block is None:
# Generate new block in table
self.block = self.next_block
self.next_block = self.generate_block(self.x_table // 2, 0)
# Check if a block obstructs the spawn
for i in range(0, self.block.y_shape):
for j in range(0, self.block.x_shape):
if self.block.shape[i][j] == 1:
if self.table[self.block.y + i][self.block.x + j] == "0":
self.lose = True
break
# Update block in table
for i in range(self.block.y, self.block.y + self.block.y_shape):
for j in range(self.block.x, self.block.x + self.block.x_shape):
if self.block.shape[i - self.block.y][j - self.block.x] == 1:
self.table[i][j] = "X"
else:
# Check keyboard press or move down in x time
if self.moved:
self.moved = False
elif not self.moved:
if keyboard.is_pressed("left"):
# Check collision left
move_left = True
for i in range(0, self.block.y_shape):
for j in range(0, self.block.x_shape):
# Collision with left border
if self.block.x == 0:
move_left = False
break
# Collision with another block
elif self.block.shape[i][j] == 1:
if (
self.table[self.block.y + i][self.block.x - 1]
== "0"
):
move_left = False
break
else:
move_left = True
if not move_left:
break
if move_left:
self.block.move("left")
elif keyboard.is_pressed("right"):
# Check collision right
move_right = True
for i in range(0, self.block.y_shape):
for j in range(0, self.block.x_shape):
# Collision with right border
if self.block.x + self.block.x_shape == self.x_table:
move_right = False
break
# Collision with another block
elif self.block.shape[i][j] == 1:
if (
self.table[self.block.y + i][self.block.x + j + 1]
== "0"
):
move_right = False
break
else:
move_right = True
if not move_right:
break
if move_right:
self.block.move("right")
elif keyboard.is_pressed("down"):
# Check colision down
move_down = True
for i in range(0, self.block.y_shape):
for j in range(0, self.block.x_shape):
# Collision with bottom border
if self.block.y + self.block.y_shape == self.y_table:
move_down = False
break
# Collision with another block
if self.block.shape[i][j] == 1:
if (
self.table[self.block.y + i + 1][self.block.x + j]
== "0"
):
move_down = False
break
else:
move_down = True
if not move_down:
break
if move_down:
self.block.move("down")
# Rotate key
elif keyboard.is_pressed("up"):
# Check if block can rotate
can_rotate = True
if self.block.x + self.block.y_shape > self.x_table:
can_rotate = False
else:
for i in range(self.block.y_shape):
for j in range(self.block.x_shape):
if self.block.shape[i][j] == 1:
if (
self.table[self.block.y + j][self.block.x + i]
== "0"
):
can_rotate = False
break
if not can_rotate:
break
if can_rotate:
self.block.rotate()
# Quit tetris
elif keyboard.is_pressed("space"):
self.lose = True
# Check if block is used
used_block = False
for i in range(0, self.block.y_shape):
for j in range(0, self.block.x_shape):
# Collision with bottom border
if self.block.y + self.block.y_shape == self.y_table:
used_block = True
break
# Collision with another block
if self.block.shape[i][j] == 1:
if self.table[self.block.y + i + 1][self.block.x + j] == "0":
used_block = True
break
else:
used_block = False
if used_block:
break
if used_block:
# Update block to used block in table
for i in range(self.block.y, self.block.y + self.block.y_shape):
for j in range(self.block.x, self.block.x + self.block.x_shape):
if self.block.shape[i - self.block.y][j - self.block.x] == 1:
self.table[i][j] = "0"
self.block = None
# Update block in table
if self.block is not None:
for i in range(self.block.y, self.block.y + self.block.y_shape):
for j in range(self.block.x, self.block.x + self.block.x_shape):
if self.block.shape[i - self.block.y][j - self.block.x] == 1:
self.table[i][j] = "X"
# Push down block
if self.velocity_ticks == 0:
self.velocity_ticks = self.velocity
if self.block is not None:
# Check colision down
move_down = True
for i in range(0, self.block.y_shape):
for j in range(0, self.block.x_shape):
# Collision with bottom border
if self.block.y + self.block.y_shape == self.y_table:
move_down = False
break
# Collision with another block
if self.block.shape[i][j] == 1:
if (
self.table[self.block.y + i + 1][self.block.x + j]
== "0"
):
move_down = False
break
else:
move_down = True
if not move_down:
break
if move_down:
self.block.move("down")
# Draw table
self.draw()
if __name__ == "__main__":
game = Game().start()
| SantiMenendez19/tetris_game | tetris.py | tetris.py | py | 13,926 | python | en | code | 1 | github-code | 13 |
73996330258 | import socket
import time
import cv2
import numpy as np
from pred_net import YoloTest
import json
def start():
address = ('0.0.0.0', 6606)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(address)
s.listen(1)
yolo=YoloTest()
def recvpack(sock, count):
buf = b''
_len=0
while count:
newbuf = None
try:
newbuf=sock.recv(count)
except :
print('')
if not newbuf:
print(len(buf))
return buf
buf += newbuf
count -= len(newbuf)
return buf
def sendpack(sock,data):
sock.send(bytes(data,"UTF-8"))
while True:
conn, addr = s.accept()
times=0;
dist=[]
try :
while 1:
print('connect from:'+str(addr))
start = time.time()
length = recvpack(conn,16)
print("body_length")
print(int(length))
stringData = recvpack(conn, int(length))
print(stringData)
data = np.frombuffer(stringData, np.uint8)
decimg=cv2.imdecode(data,cv2.IMREAD_COLOR)
print("decimg")
cv2.imwrite("./test/test.jpg",decimg)
boxes=yolo.get_img(decimg)
print(boxes)
dicJson = json.dumps(np.array(boxes).tolist() )
print("decimg...............")
print(dicJson)
end = time.time()
sendpack(conn,dicJson)
seconds = end - start
fps = 1/seconds;
k = cv2.waitKey(10)&0xff
if k == 27:
break
except Exception as r:
print(' %s' %(r))
s.close()
if __name__ == '__main__':
start()
| hry8310/ai | dl/tf-yolo3/sv.py | sv.py | py | 1,957 | python | en | code | 2 | github-code | 13 |
9746674534 | import pandas as pd
import numpy as np
CsvFileNameFormat="/gitrepo/robotRepo/hq{}/{}.y.csv"
VOLUME_REDUCER=1000.0
def readRawData(ticker, day):
csvFile=CsvFileNameFormat.format(day, ticker)
df = pd.read_csv(csvFile, index_col=[0], parse_dates=False)
csvShape=df.shape
df['PrevClose'] = df.Close.shift(1)
df['PrevVolume'] = df.Volume.shift(1)
df['VolChange'] = (df.Volume - df.PrevVolume)/df.PrevVolume/VOLUME_REDUCER
df['OP'] = (df.Open - df.PrevClose) /df.PrevClose
df['HP'] = (df.High - df.PrevClose)/df.PrevClose
df['LP'] = (df.Low - df.PrevClose) /df.PrevClose
df['CP'] = (df.Close - df.PrevClose)/df.PrevClose
# df['HL'] = (df.High - df.Low)/df.PrevClose
df['HL'] = df.HP - df.LP
# print(df[0:4])
print(df.iloc[1:15,8:])
# print(df.iloc[1:5,12]) # = df[1:5].CP
# npArray=df.iloc[1:,8:].values
# return np.rot90(npArray, )
# df=df.iloc[1:,8:]
# df = df.T #rotate 90 degree
# print(df.shape)
# print(df)
input_raw_data=df.iloc[1:,8:].values
# target_raw_data=df.iloc[1:,12]
target_raw_data=df[1:].CP
return input_raw_data, target_raw_data
class HqReader:
def __init__(self,ticker,day):
self.ticker=ticker
self.day=day
input_raw_data, target_raw_data=readRawData(ticker, day)
print('input_raw_data shape',input_raw_data.shape)
print('target_raw_data shape', target_raw_data.shape)
self.rows, self.n_input = input_raw_data.shape
# reshape the csv raw data by 90 rotate
self.input_raw_data=input_raw_data.reshape((1,self.rows,self.n_input))
self.target_raw_data=target_raw_data
# print(self.input_reshape(batch_size=2,time_steps=3,n_input=self.n_input))
# print(self.target_reshape(batch_size=2,n_input=2))
'''
Input & Target
'''
"""
self.n_input=6
self.time_steps=3
x=input_raw_data.reshape((1,rows,n_input))
print(x.shape)
# print(x)
inputSize=2
batchInput=np.empty((inputSize,self.time_steps,n_input))
# batchInput=np.array()
for i in range(inputSize):
# np.append(batchInput,x[0,i:self.time_steps+i])
batchInput[i]=x[0,i:self.time_steps+i]
# print(x[0,i:self.time_steps+i])
print(batchInput.shape)
# print(batchInput[-1])
print(batchInput)
"""
def split_data(self,train_size,time_steps,n_classes):
x=self.input_reshape(train_size,time_steps,self.n_input)
y=self.target_reshape(n_classes)
return x, y[time_steps:time_steps+train_size]
def input_reshape(self,batch_size,time_steps,n_input):
x=np.empty((batch_size,time_steps,n_input))
for i in range(batch_size):
x[i]=self.input_raw_data[0,i:time_steps+i]
return x
def target_reshape(self,n_classes):
batch_size=self.rows-n_classes
y=np.empty((batch_size,n_classes))
for i in range(batch_size):
y[i]=self.target_raw_data[i:i+n_classes]
return y
if __name__ == "__main__":
day='20181003'
ticker='LABU'
hqReader=HqReader(ticker, day)
train_x,train_y=hqReader.split_data(train_size=100,time_steps=time_steps,n_classes=n_classes)
print(train_x.shape)
print(train_y.shape)
# print(train_x[0])
# print(train_y[0])
| jbtwitt/pipy | hq/HqReader.py | HqReader.py | py | 3,381 | python | en | code | 0 | github-code | 13 |
72651545619 | __authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import unittest
import datetime
# dependencies imports
from dateutil import rrule
# import here the module / classes to be tested
from srules import Session, SRules
class TestSRules(unittest.TestCase):
def setUp(self):
self.ses1 = Session("Test1", duration=60*8,
start_hour=13, start_minute=30)
self.ses2 = Session("Test2", duration=60*3,
start_hour=12, start_minute=00)
self.ses3 = Session("Test3", duration=60*3,
start_hour=15, start_minute=00)
self.ses4 = Session("Test4", duration=60*3,
start_hour=20, start_minute=00)
self.ses5 = Session("Test5", duration=60*3,
start_hour=23, start_minute=00)
self.ses1.add_rule("", freq=rrule.DAILY,
dtstart=datetime.date(2011, 8, 20), interval=2)
self.ses2.add_rule("", freq=rrule.DAILY,
dtstart=datetime.date(2011, 8, 20), interval=2)
self.ses3.add_rule("", freq=rrule.DAILY,
dtstart=datetime.date(2011, 8, 20), interval=2)
self.ses4.add_rule("", freq=rrule.DAILY,
dtstart=datetime.date(2011, 8, 20), interval=2)
self.ses5.add_rule("", freq=rrule.DAILY,
dtstart=datetime.date(2011, 8, 20), interval=2)
class TestIntersection1(TestSRules):
def setUp(self):
TestSRules.setUp(self)
self.srule = SRules("Test")
self.srule.add_session(self.ses1)
self.srule.add_session(self.ses2)
def test_1(self):
pass
if __name__ == "__main__":
import sys
suite = unittest.findTestCases(sys.modules[__name__])
#suite = unittest.TestLoader().loadTestsFromTestCase(Test)
unittest.TextTestRunner(verbosity=2).run(suite)
| LinkCareServices/python-schedule-rules | tests/srules_test.py | srules_test.py | py | 1,950 | python | en | code | 1 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.