seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
39946882202 | #!/usr/bin/env python
import sys
import time
import logger
import session_environment
def execute(config, args):
"""
Runs the exec module with the given args and
global configuration.
For details of contents of config and args -
see the batch-mode.py main file.
- command
- name
- batch_size
- force
- read
"""
env = session_environment.SessionEnvironment(config)
log = logger.Logger(config)
command = args["command"]
command_list = command.split(" ")
name = args["name"]
batch_size = args["batch_size"]
force = args["force"]
read = args["read"]
# The default format for a new session.
if name == "":
name = command_list[0] + "." + time.strftime("%Y-%m-%d")
args["name"] = name
# Tries to create the new session
session = None
if env.session_exists(name):
log.log(logger.WARNING, "Session with that name already exists.")
if force:
log.log(logger.WARNING, "Deleting old session and forcing a new session.")
env.delete_session(name)
session = env.create_empty_session(name)
else:
log.log(logger.ERROR, "Not using '--force' flag, aborting.")
return False
else:
log.log(logger.INFO, "Creating session '%s'." % name)
session = env.create_empty_session(name)
if session == None:
log.log(logger.ERROR, "Failed to create new session.")
return False
session.create(config, args)
# Generate the batches.
jobstream = None
if read == "":
jobstream = sys.stdin
else:
jobstream = open(read, "r")
session.generate_batches(jobstream)
if read != "":
jobstream.close()
session.save()
return True
| jtmpu/batch-mode | bm_modules/mnew.py | mnew.py | py | 1,791 | python | en | code | 0 | github-code | 13 |
33972717500 | # Assignment: Mini Project 1
# Due Date: October, 27 2015
# Name: Lane Scobie, Dylan Waters, Jason Yuen
# Unix ID: scobie, dwaters, jjyuen1
# StudentID: 1448158, 1343144, 1267071
# Lecture Section: B1
# Instructor: Davood Rafiei
# Group: 20
#---------------------------------------------------------------
#
#
#---------------------------------------------------------------
# library import
import sys
import datetime
import cx_Oracle # the package used for accessing Oracle in Python
import getpass # the package for getting password from user without displaying it
import random
# Sign in to use airline options.
# Must either already have login access, or create a new user.
def signIn():
print()
print("To continue please choose one of the following options:")
boots= True
# prompts user with options
while boots:
user = input("Press 1 to login: \nPress 2 to sign-up: \nPress 3 for exit: \n")
if (user=='3'):
print("Goodbye")
exit()
elif (user== '1'):
count=0
while (count<3):
email= input("Enter email: ")
email ='{0: <20}'.format(email)
passw = getpass.getpass()
passw = '{0: <4}'.format(passw)
select = "SELECT email FROM users WHERE email=:email and pass=:passw"
curs.execute(select,{'email':email, 'passw':passw})
rows = curs.fetchall()
if len(rows)>0:
print ("\nLogin successful for", email, "\n")
select= "SELECT name FROM airline_agents WHERE email= :email"
curs.execute(select, {'email':email})
row1 = curs.fetchall()
if len(row1)>0:
agentName=row1[0][0]
# checks agents
print("Welcome Airline Agent", agentName )
caller(email, True)
count = 3
else:
caller(email, False) # So it can go back to log in screen after log out
count = 3
else:
count+=1
print("Login Failed. Remaining attempts: ", 3 - count)
elif (user== '2'):
print("Creating new user\n")
validEmail = False
while not validEmail:
email= input("Please enter in a valid email: ")
email ='{0: <20}'.format(email)
select = "SELECT email FROM users WHERE email= :email"
curs.execute(select,{'email':email})
rows=curs.fetchall()
if len(rows)>0:
print("Email taken")
validEmail= False
else:
validEmail = True
if validEmail:
notvalid= True
while notvalid:
passw= input("Please submit a password: ")
if len(passw)>4:
print("Passwords must be only 4 characters")
else:
notvalid=False
notvalid=True
while notvalid:
name= input("Name: ")
if len(passw)>20:
print("Name must be less than 20 characters")
else:
notvalid=False
#have to check if email is still valid
good = True
select = "SELECT email FROM users WHERE email= :email"
curs.execute(select,{'email':email})
row = curs.fetchall()
if len(row)>0:
good = False
if good:
#update the tables
insert = "insert into users values (:email, :passw, NULL)"
curs.execute(insert,{'email':email,'passw':passw})
connection.commit()
print("New User created. Welcome", email)
# anyother user input is invalid
else:
print("Invalid input")
boots= True
# This function searchs for a flight from a desired destination and availble flights
def search():
f=open("mini-view.sql")
full=f.read()
comm=full.split(';')
try:
curs.execute('drop view available_flights')
except:
pass
finally:
curs.execute(comm[1])
connection.commit()
good=False
curs.execute('select * from airports')
rows= curs.fetchall()
while not good:
src=input("Please enter the source:")
src= src.upper()
for row in rows:
if src.upper() in row:
good=True
print("Departing from: "+src)
break
if not good:
for row in rows:
if src.upper() in row[1].upper():
print("Did you mean?",row[0],row[1])
elif src.upper() in row[2].upper():
print("Did you mean?",row[0],row[1])
elif src.upper() in row[3].upper():
print("Did you mean?",row[0],row[1])
curs.execute("SELECT * from airports")
rows=curs.fetchall()
good=False
while not good:
dst=input("Please enter the destination:")
dst=dst.upper()
for row in rows:
if dst.upper()==row[0]:
good=True
print("Arriving at: "+dst)
break
if not good:
for row in rows:
if dst.upper() in row[1].upper():
print("Did you mean?",row[0],row[1])
elif dst.upper() in row[2].upper():
print("Did you mean?",row[0],row[1])
elif dst.upper() in row[3].upper():
print("Did you mean?",row[0],row[1])
good=False
while not good:
curs.prepare("select dep_date from sch_flights where dep_date=:datez")
date=input("Please enter the departure date(DD-Mon-YYYY):")
try:
curs.execute(None, {"datez":date})
rows = curs.fetchall()
except:
print("Invaild date")
else:
if rows==None:
print("No flights match that date, please try again")
print("Format should be (DD-Mon-YYYY), ei:22-Sep-2015")
else:
good=True
curs.prepare("select src, dst, flightno,to_char(dep_time,'HH24:MI'), to_char(arr_time, 'HH24:MI'), fare, seats, price from available_flights where dep_date=:datez")
curs.execute(None, {"datez":date})
rows=curs.fetchall()
direct=[]
indirSRC=[]
indirDST=[]
for row in rows:
if row[0]==src.upper() and row[1]==dst.upper():
newdir=[row[0],row[1],row[2],'Null ',row[3],row[4],row[5],row[6],' ',' ',0,'NONE',row[7]]
direct.append(newdir)
elif row[0]==src.upper():
newdir=[row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7]]
indirSRC.append(newdir)
elif row[1]==dst.upper():
newdir=[row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7]]
indirDST.append(newdir)
indirect=[]
for row in indirSRC:
for row1 in indirDST:
if row[1]==row1[0]:
price=row[7]+row1[7]
arz=row[4].split(":")
dpz=row1[3].split(":")
hourM=(int(dpz[0])-int(arz[0]))*60
Mins=(int(dpz[1])-int(arz[1]))
Laytime=hourM+Mins
newdir=[row[0],row1[1],row[2],row1[2],row[3],row1[4],row[5],row[6],row1[5],row1[6],1,Laytime,price ]
if dpz[0]>arz[0]:
indirect.append(newdir)
elif dpz[0]==arz[0] and dpz[1]>arz[1]:
indirect.append(newdir)
masterlist=[]
for row in direct:
masterlist.append(row)
for row in indirect:
masterlist.append(row)
if len(masterlist) == 0:
print("\n------------No flights for given date------------\n")
return None, None
notvalid=True
while notvalid:
answ=input("Would you like to sort based on amount of layovers(1) or Price(2): ")
if answ=='2':
notvalid=False
elif answ=='1':
notvalid=False
else:
print("Invalid, Please try again")
if answ=='1':
print("Sorted by Layover")
masterlist.sort(key=lambda x: x[12])
masterlist.sort(key=lambda x: x[10])
else:
print("Sorted by Price")
masterlist.sort(key=lambda x: x[12])
print("SRC DST FNO1 FNO2 ARR DEP Fare Seats Fare2 Seats2 Stops Lay Price")
for row in masterlist:
if row[8] != ' ':
print(row[0],row[1],row[2],row[3],row[4],row[5],row[6],' ',row[7],' ',row[8],' ',row[9],' ',row[10],' ',row[11],'',row[12])
else:
print(row[0],row[1],row[2],row[3],row[4],row[5],row[6],' ',str(row[7]).ljust(3),' NA NA 0 NA ', row[12])
return masterlist,date
def make(email):
masterlist,date= search()
invalid= True
if date== None:
return
datz=date
while invalid:
ans=input("Are you booking a flight with a layover?(Y/N): ")
if ans.upper()=="N":
print("Direct flight")
indirect=False
invalid= False
elif ans.upper()=='Y':
print("inDirect flight")
indirect=True
invalid= False
else:
print("Invalid input")
#Check if they are a passenger
pname= input("Please enter your name: ")
pname= '{0: <20}'.format(pname)
check= "select count(name) from passengers where name=:pname and email=:email"
curs.execute(check,{'email':email, 'pname':pname})
count= curs.fetchall()
if count[0][0]==0: #If passenger does not exist
country= input("What is your country of origin?")
#Update passenger table
insert= "insert into passengers values (:email,:pname,:country)"
curs.execute(insert,{'email':email, 'pname':pname,'country':country})
connection.commit()
valid= False
while not valid:
fno1=input("Please enter the flightno: ")
fare1=input("Please enter the desired fare type: ")
fare1= fare1.upper()
fno1 ='{0: <6}'.format(fno1)
fare1 ='{0: <2}'.format(fare1)
for row in masterlist:
if fno1==row[2] and fare1== row[6] and row[7]!=0:
valid= True
tjbooker(fno1,fare1,email,datz,pname)
if indirect:
valid = False
while not valid:
fno2=input("Please enter the 2nd flightno: ")
fare2=input("Please enter the desired fare type: ")
fare2= fare2.upper()
fno2 ='{0: <6}'.format(fno2)
fare2 ='{0: <2}'.format(fare2)
for row in masterlist:
if fno2==row[3] and fare2== row[8] and row[9]!=0:
valid= True
tjbooker(fno2,fare2,email,datz,pname)
def tjbooker(fno,fare,email,datz,pname):
check= "select limit from flight_fares where flightno= '%s'"%(fno)
curs.execute(check)
limit= curs.fetchall()
if limit== 0:
print("Error: flight is full")
return
#Generate random ticket#
ticket= ticket_gen()
print('Your ticket number is: ', ticket)
#Get/Generate seat
seat= seat_gen()
get= 'select price from flight_fares where flightno= :fno and fare= :fare'
curs.execute(get,{'fno':fno, 'fare':fare})
price= curs.fetchall()
price= price[0][0]
insert= "insert into tickets values (:ticket,:pname,:email,:price)"
curs.execute(insert, {'ticket':ticket, 'pname':pname,'email':email,'price':price})
connection.commit()
insert= "insert into bookings values (:ticket,:fno,:fare, to_date(:datz,'DD-Mon-YYYY'),:seat)"
curs.execute(insert, {'ticket':ticket, 'fno':fno,'fare':fare,'datz':datz,'seat':seat})
connection.commit()
#Generate random ticket#
def ticket_gen():
valid = False
while not valid:
ticket= random.randint(0,999)
select= "select count(tno) from bookings where tno= '%d'" %(ticket)
curs.execute(select)
count= curs.fetchall()
if count[0][0]==0:
valid= True
return ticket
#Generate random seat
def seat_gen():
valid = False
while not valid:
#Generate random seat
seats='ABCDEF'
seatn= random.randint(1,20)
x= random.randint(0,5)
seat2= seats[x]
seat= str(seatn)+str(seat2)
select= "select count(tno) from bookings where seat= '%s'" %(seat)
curs.execute(select)
count= curs.fetchall()
if count[0][0]==0:
print('Seat is booked')
valid= True
return seat
def list(email):
select=("Select b.tno, t.name, b.dep_date, t.paid_price from bookings b, tickets t where b.tno=t.tno and t.email= :email order by row_number() over(order by b.tno)")
curs.execute(select,{'email':email})
rows=curs.fetchall()
if len(rows)==0:
ret=0
print("You do not have any bookings")
else:
ret=1
print("Ticket #:", "\t Name:", "\t\t Dept Date:", "\t\t Price:")
for row in rows:
print(str(row[0]).ljust(7), "\t", (row[1].strip()).ljust(8),"\t", row[2],"\t", row[3])
more= input("Would you like more information on a booking? (Y/N)")
more= more.upper()
if more== 'Y':
valid= False
while not valid:
try:
which= int(input("Which booking would you like more info on?(Ticket)"))
much= "select * from bookings where tno= '%d'" %(which)
curs.execute(much)
row= curs.fetchall()
print("tno flightno fare date Seat ")
print(row[0][0],'',row[0][1],' ',row[0][2],' ',row[0][3],row[0][4])
except:
valid= True
print("-------------Invalid tno--------------------")
else:
valid= True
return ret
def cancel(email):
if list(email)==0:
pass
else:
check= True
while check:
cancel= input("Which booking would you like to cancel? Input ticket number: ")
cancel ='{0: <20}'.format(cancel)
try:
select = "SELECT tno FROM bookings WHERE tno= :cancel"
curs.execute(select,{'cancel':cancel})
except:
print("Invalid input")
else:
rows=curs.fetchall()
if len(rows)>0:
print("Deleting booking for flight", cancel)
check= False
#Delete booking
delete = "delete from bookings where tno = '%s'" %(cancel)
curs.execute(delete)
delete2 = "delete from tickets where tno = '%s'" %(cancel)
curs.execute(delete2)
connection.commit()
print("Booking deleted\n")
else:
print(cancel)
print("Invalid ticket number")
# Updates the departure time of a user inputed flight number with the current time
def updateD():
valid = True
while valid:
flightno=input("---What flight number would you like to update the departure time for?\n")
update = "update sch_flights set act_dep_time = SYSDATE where flightno = '%s'" %(flightno)
# Error handling to ensure flight is a flight that has left
try:
curs.execute(update)
connection.commit()
check = "select * from sch_flights where flightno = '%s'" %(flightno)
curs.execute(check)
newUpdate = curs.fetchall()
print("---Flight", flightno,"to",newUpdate[0][2])
print("---Updated flight departure time. Safe flight!\n")
valid = False
except:
print("---Invalid flight number.")
return
# Updates the arrival time of a user inputed flight number with the current time
def updateA():
valid = True
while valid:
flightno=input("---What flight number would you like to update the arrival time for?\n")
update = "update sch_flights set act_arr_time = SYSDATE where flightno = '%s'" %(flightno)
try:
curs.execute(update)
connection.commit()
check = "select * from sch_flights where flightno = '%s'" %(flightno)
curs.execute(check)
newUpdate = curs.fetchall()
print("---Flight", flightno,"to",newUpdate[0][1])
print("---Updated flight arrival time. Happy landing!\n")
valid = False
except:
print("---Invalid flight number.")
return
def caller(email, agent):
scoots= True
while scoots:
print("-----------------------------------------")
print("What would you like to do?")
do = input("Type 1 to search for flights\nType 2 to make a booking\nType 3 to list your current bookings\nType 4 to cancel a booking\nType 5 for Airline Agent options\nType 6 to logout\n------>")
if (do== '1'):
search()
elif (do== '2'):
make(email)
elif (do=='3'):
list(email)
elif (do== '4'):
cancel(email)
elif (do == '5'):
if agent:
agentInput = input("---Type 1 to update departure time by flight\n---Type 2 to update arrival time by flight\n---Type 3 to go back\n")
if agentInput == '1':
updateD()
elif agentInput == '2':
updateA()
else:
print("You do not have suffient access.")
elif (do== '6'):
update = "update users set last_login = SYSDATE where email = :email"
curs.execute(update,{'email':email})
connection.commit()
print("Logout successful")
connection.commit()
scoots= False
return
else:
print("Invalid input")
if __name__ == "__main__":
# Start program
print("\n----------Welcome to AirRafiei----------")
print("Please provide your SQL login to continue:")
# get username
user = input("Username [%s]: " % getpass.getuser())
if not user:
user=getpass.getuser()
# get password
pw = getpass.getpass()
# The URL we are connnecting to
conString=''+user+'/' + pw +'@gwynne.cs.ualberta.ca:1521/CRS'
try:
# Establish a connection in Python
connection = cx_Oracle.connect(conString)
# create a cursor
curs = connection.cursor()
# Login to SQL failed
except cx_Oracle.DatabaseError as exc:
error, = exc.args
print("Oracle code:", error.code)
print("Oracle message:", error.message)
print( "Login Failed. Goodbye.")
sys.exit()
signIn() | Lepitwar/Airlines | mini-pro.py | mini-pro.py | py | 20,844 | python | en | code | 0 | github-code | 13 |
73479317139 | #! python
with open('day3/input') as f:
wires = list(f.readlines())
for wire in range(len(wires)):
wires[wire] = wires[wire].split(',')
wires[wire][-1] = wires[wire][-1][:4]
def trace_wire(wire):
path = [(0,0)]
for movement in wire:
direction = movement[0]
distance = int(movement[1:])
moved = 0
if direction == 'R':
while moved < distance:
moved += 1
x,y = path[-1]
x+=1
path.append((x,y))
elif direction == 'L':
while moved < distance:
moved += 1
x,y = path[-1]
x-=1
path.append((x,y))
elif direction == 'U':
while moved < distance:
moved += 1
x,y = path[-1]
y+=1
path.append((x,y))
elif direction == 'D':
while moved < distance:
moved += 1
x,y = path[-1]
y-=1
path.append((x,y))
return path
def man_dist(tu):
return abs(tu[0]) + abs(tu[1])
path1 = trace_wire(wires[0])
path2 = trace_wire(wires[1])
intersections = set(path1) & set(path2)
closest = (0,0)
for intersect in intersections:
distance = man_dist(intersect)
if distance > 0 and distance < man_dist(closest):
closest = intersect
continue
if closest == (0,0):
closest = intersect
continue
print(man_dist(closest))
| Frosty-nee/aoc2019 | day3/day3p1.py | day3p1.py | py | 1,194 | python | en | code | 0 | github-code | 13 |
38256494271 | ## 일반 Sequence Classification training을 수행하는 코드
from dataset import prepare_WC
from transformers import AutoModelForSequenceClassification, TrainingArguments, AutoConfig, Trainer, EarlyStoppingCallback, DataCollatorWithPadding
from datasets import concatenate_datasets
import wandb
import os
from utils import seed_everything
import argparse
from sklearn.metrics import accuracy_score, f1_score
def train(kfold=5):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
kfold_tokenized_dataset_list, tokenizer = prepare_WC(kfold=kfold)
# 반복문을 돌면서 지정된 폴드를 검증 데이터셋으로, 나머지 폴드들을 훈련 데이터셋으로 사용
for fold in range(kfold):
valid_dataset = kfold_tokenized_dataset_list[fold]
train_dataset = concatenate_datasets([kfold_tokenized_dataset_list[i] for i in range(kfold) if i!=fold])
# 훈련에 사용하는 config, model은 모두 Huggingface library에서 불러와 사용
config = AutoConfig.from_pretrained('klue/roberta-large')
config.num_labels = 232
model = AutoModelForSequenceClassification.from_pretrained('klue/roberta-large', config=config)
# 훈련과정에서 customize하는 argument들
training_args = TrainingArguments(
output_dir= f'../output/roberta_large_WC_fold{fold}',
evaluation_strategy = 'epoch',
save_strategy = 'epoch',
per_device_train_batch_size = 128,
per_device_eval_batch_size = 128,
gradient_accumulation_steps = 1,
learning_rate = 5e-5,
weight_decay = 0.1,
num_train_epochs = 4,
warmup_ratio = 0.1,
logging_strategy = 'steps',
logging_steps = 50,
save_total_limit = 1,
seed = 42,
dataloader_num_workers = 2,
load_best_model_at_end = True,
metric_for_best_model = 'accuracy',
group_by_length =True,
report_to = 'wandb',
)
## 검증 후 metric을 확인하기 위한 함수
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
acc = accuracy_score(labels, preds)
f1 = f1_score(labels, preds, average='macro')
return {'eval_accuracy' : acc*100, 'eval_f1' : f1 * 100}
## tokenize과정에서 padding을 하지 않았기 때문에 batch마다 dynamic padding하기 위한 클래스
data_collator = DataCollatorWithPadding(tokenizer = tokenizer)
## Huggingface에서 제공하는 모델 훈련 및 검증이 쉽게 가능한 Trainer클래스를 활용해 학습, 검증, 로그 저장, 모델 저장 등의 과정을 수행함
trainer=Trainer(
model,
training_args,
train_dataset = train_dataset,
eval_dataset = valid_dataset,
tokenizer = tokenizer,
data_collator = data_collator,
compute_metrics = compute_metrics,
callbacks=[EarlyStoppingCallback(early_stopping_patience=2)]
)
## 학습과정과 여러 지표들을 그래프로 확인하기 위해 wandb API를 활용
run = wandb.init(project='kostat', entity='donggunseo', name=f'roberta_large_WC_fold{fold}')
trainer.train()
run.finish()
## 각 에폭의 model checkpoint 중 가장 성능이 우수한 모델을 해당 폴드에 대한 대표모델로 따로 저장
trainer.save_model(f'../best_model/roberta_large_WC_fold{fold}')
## 학습 로그를 저장하는 코드
trainer.save_state()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--kfold', type=int, default=5, help='decide the number of fold for stratify kfold')
args = parser.parse_args()
seed_everything(42)
train(args.kfold) | donggunseo/SCI_Kostat2022 | train_WC.py | train_WC.py | py | 3,929 | python | ko | code | 2 | github-code | 13 |
34573999939 | import csv
import os
input_csv='C:\\Users\\tbnet\\Desktop\\UKED201811DATA5\\02-Homework\\03-Python\\Instructions\\PyPoll\\Resources\\election_data.csv'
total_votes=0
candidates=[]
vote_count={}
with open(input_csv) as csv_file:
csvreader=csv.reader(csv_file)
for row in csvreader:
total_votes +=1
if row ['Candidate'] not in candidates:
candidates.append(row['Candidate'])
vote_count[row['Candidate']]=1
elif row['Candidate'] in candidates:
vote_count[row['Candidate']] += 1
prior_candiate= 0
print("Election Results")
print("-------------------------------")
print("Total Vote Counts: "+ str(total_votes))
print("-------------------------------")
for key, value in vote_count.items():
print(key + ": " + str(((float(value/total_votes)*100),1)) + "%" + " (" + str(value)+ ")")
for key, value in vote_count.items():
if value > prior_candiate:
most_vote = key
prior_candiate = value
print("--------------------------------")
print("Winner: " + most_vote)
print("--------------------------------")
| tnetherton19/KU--tim-python-challenge | PyPoll/main.py | main.py | py | 1,067 | python | en | code | 0 | github-code | 13 |
659615127 | #Rahul Ramakrishnan
#module: config
population_size = 50 #Number of trees in the population
tournament_size = 3 #Size of tournament during tournament selection
tree_size = 10 #Number of nodes in a tree
generations = 50 #Number of generations
c_probability = .7 #Crossover probability
m_probability = .2 #Mutation probability
minimum_size = 3 #Minimum size of the tree that will be
#preserved during crossover
iteration = 10
| giladbi/algorithmic-trading | Rahul_Genetic_Program/apple/config.py | config.py | py | 471 | python | en | code | 90 | github-code | 13 |
2350589473 | import pygame
class LoadFont:
def __init__(self, render, location, size, text, color, placement, aaFlag=True, boldFlag=False, italicFlag=False):
# file location
self.location = location
# font size
self.size = size
# placement on the screen
self.placement = placement
# font text
self.text = text
# font color
self.color = color
# font display
self.font = pygame.font.Font(self.location, self.size)
# render
self.render = True
# font image
self.image = self.font.render(self.text, aaFlag, self.color)
# font rect
self.rect = pygame.draw.rect(self.image, self.color, self.image.get_rect(), -1)
# bold flag
self.font.bold = boldFlag
# italic flag
self.font.italic = italicFlag
render.fontList.append(self) | EoD-Games/Alchemy-Adventure-Battle | client/classes/font.py | font.py | py | 892 | python | en | code | 2 | github-code | 13 |
8623760937 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 10:21:55 2020
@author: Sogal
"""
#Can't download file
from bs4 import BeautifulSoup
import requests
data = """
<html>
<head>
<title>Phoebe's Fantasy journey</title>
<link href="style.css" rel=stylesheet>
</head>
<body>
<div>
<header>
</header>
<h1>為無奈的工作人生添加一點趣味吧!</h1>
在這裡菲比會分享日常小事,像是上班途中發現的巷弄美食、文青咖啡店<br>
又或是學了什麼新的知識,都會在這邊分享給大家
<h2>菲比尋常的奇幻旅程</h2>
<a href="www.yahoo.com" class="L"> Find Something</a><br>
<a href="www.google.com" class="b"> Find Something</a><br>
</div>
</body>
</html>
"""
data = requests.get("http://www.books.com.tw/web/sys_saletopb/books/02?attribute=30&loc=act_menu_th_46_002")
soup = BeautifulSoup(data.text, "html.parser")
print(soup.prettify())
'''
print("=====================================================")
print(soup.title)
print("=====================================================")
print(soup.a)
print("=====================================================")
print(soup.a.attrs)
print("=====================================================")
print(soup.a.text)
print("=====================================================")
print(soup.find("a"))
print("=====================================================")
print(soup.find_all("a"))
print("=====================================================")
print(soup.find_all("a", href="www.google.com"))
print("=====================================================")
print(soup.find_all("a", class_="b"))
print("=====================================================")
print(soup.find_all("a", href="www.yahoo.com"))
'''
#start of code
l = 1
print(soup.find("div", class_="type02_bd-a"))
div_items = soup.find_all("div", class_="type02_bd-a")
#for i in div_items:
for index,i in enumerate(div_items):
print("=====================================================")
#print(i)
h4 = i.find('h4')
if not h4:
continue
print(str(index+1) + '. ' + h4.text)
ul = i.find('ul', class_= 'msg')
li_author = ul.find('li')
print(li_author.text)
li_price = i.find('li', class_='price_a')
print(li_price.text)
image = i.find('img')
l = l + 1
if l > 100:
break
divs = soup.find_all('img', class_= "cover")
for index,ele in enumerate(divs):
print("=================[Images]=================")
print(ele)
image = ele.find('img')
if not image:
continue
print(str(index + 1) + ": " + image.get("src"))
img_url = image.get("src")
img_data = requests.get(img_url)
print(img_data.content)
fileName = str(index) + ".jpg"
file = open(fileName, "wb")
file.write(img_data.content)
file.close
if l > 100:
break | MakeMeASandwich/Python | bs2.py | bs2.py | py | 2,962 | python | en | code | 0 | github-code | 13 |
39018998982 | from typing import List
class Solution:
def findMin(self, nums: List[int]) -> int:
l = 0
r = len(nums)-1
res = nums[0]
while l <= r:
if nums[l] <= nums[r]:
res = min(res, nums[l])
break
mid = (l+r) // 2
res = min(res, nums[mid])
if nums[mid] >= nums[l]:
# minimum towards left side.
l = mid+1
else:
# minimum towards right side.
r = mid-1
return res
| sarveshbhatnagar/CompetetiveProgramming | min_in_rotated_sorted.py | min_in_rotated_sorted.py | py | 554 | python | en | code | 0 | github-code | 13 |
17086492864 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.CampDetailInfo import CampDetailInfo
from alipay.aop.api.domain.ShopDiscountInfo import ShopDiscountInfo
from alipay.aop.api.domain.ShopDiscountInfo import ShopDiscountInfo
class AlipayOfflineMarketShopDiscountQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOfflineMarketShopDiscountQueryResponse, self).__init__()
self._camp_list = None
self._camp_num = None
self._discount_list = None
self._item_list = None
@property
def camp_list(self):
return self._camp_list
@camp_list.setter
def camp_list(self, value):
if isinstance(value, list):
self._camp_list = list()
for i in value:
if isinstance(i, CampDetailInfo):
self._camp_list.append(i)
else:
self._camp_list.append(CampDetailInfo.from_alipay_dict(i))
@property
def camp_num(self):
return self._camp_num
@camp_num.setter
def camp_num(self, value):
self._camp_num = value
@property
def discount_list(self):
return self._discount_list
@discount_list.setter
def discount_list(self, value):
if isinstance(value, list):
self._discount_list = list()
for i in value:
if isinstance(i, ShopDiscountInfo):
self._discount_list.append(i)
else:
self._discount_list.append(ShopDiscountInfo.from_alipay_dict(i))
@property
def item_list(self):
return self._item_list
@item_list.setter
def item_list(self, value):
if isinstance(value, list):
self._item_list = list()
for i in value:
if isinstance(i, ShopDiscountInfo):
self._item_list.append(i)
else:
self._item_list.append(ShopDiscountInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayOfflineMarketShopDiscountQueryResponse, self).parse_response_content(response_content)
if 'camp_list' in response:
self.camp_list = response['camp_list']
if 'camp_num' in response:
self.camp_num = response['camp_num']
if 'discount_list' in response:
self.discount_list = response['discount_list']
if 'item_list' in response:
self.item_list = response['item_list']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayOfflineMarketShopDiscountQueryResponse.py | AlipayOfflineMarketShopDiscountQueryResponse.py | py | 2,614 | python | en | code | 241 | github-code | 13 |
49897032 | # -*- coding: utf-8 -*-
"""Installer for the ruddocom.policy package."""
from setuptools import find_packages
from setuptools import setup
long_description = '\n\n'.join([
open('README.rst').read(),
open('CONTRIBUTORS.rst').read(),
open('CHANGES.rst').read(),
])
setup(
name='ruddocom.policy',
version='1.0a1',
description="Rudd-O.com policy package",
long_description=long_description,
# Get more from https://pypi.org/classifiers/
classifiers=[
"Environment :: Web Environment",
"Framework :: Plone",
"Framework :: Plone :: Addon",
"Framework :: Plone :: 6.0.0a1",
"Programming Language :: Python",
"Programming Language :: Python :: 3.1",
"Operating System :: OS Independent",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
keywords='Python Plone CMS',
author='Manuel Amador (Rudd-O)',
author_email='rudd-o+plone@rudd-o.com',
url='https://github.com/collective/ruddocom.policy',
project_urls={
'PyPI': 'https://pypi.python.org/pypi/ruddocom.policy',
'Source': 'https://github.com/collective/ruddocom.policy',
'Tracker': 'https://github.com/collective/ruddocom.policy/issues',
# 'Documentation': 'https://ruddocom.policy.readthedocs.io/en/latest/',
},
license='GPL version 2',
packages=find_packages('src', exclude=['ez_setup']),
namespace_packages=['ruddocom'],
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
python_requires=">=3.1",
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
'z3c.jbot',
'Products.GenericSetup>=1.8.2',
'plone.api>=1.8.4',
'plone.restapi',
'plone.app.dexterity',
'plone.app.relationfield',
'plone.app.lockingbehavior',
'plone.schema',
'plone.app.multilingual',
'collective.relationhelpers',
'collective.exportimport',
'collective.folderishtypes[dexterity]',
'collective.searchandreplace',
'ruddocom.pdfiframer',
'sc.social.like',
],
extras_require={
'test': [
'plone.app.testing',
# Plone KGS does not use this version, because it would break
# Remove if your package shall be part of coredev.
# plone_coredev tests as of 2016-04-01.
'plone.testing>=5.0.0',
'plone.app.robotframework[debug]',
],
},
entry_points="""
[z3c.autoinclude.plugin]
target = plone
[console_scripts]
update_locale = ruddocom.policy.locales.update:update_locale
[plone.recipe.zope2instance.ctl]
createsite = ruddocom.policy.ctl:createsite
upgrade = ruddocom.policy.ctl:upgrade
import = ruddocom.policy.ctl:import_
export = ruddocom.policy.ctl:export
folderize = ruddocom.policy.ctl:folderize
add_content_redirects = ruddocom.policy.ctl:add_content_redirects
""",
)
| Rudd-O/Rudd-O.com | src/ruddocom.policy/setup.py | setup.py | py | 3,010 | python | en | code | 0 | github-code | 13 |
18794545938 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provides useful utilities for the other modules as well as for general use.
"""
import lxml
from lxml import etree
import xml.etree.ElementTree as et
import sys, re, os
from bs4 import BeautifulSoup
import pandas as pd
import hashlib
def infer_metadata(filename):
metadata = dict()
filename = filename.replace("-", "_")
metadata["protocol"] = filename.split("/")[-1].split(".")[0]
split = filename.split("/")[-1].split("_")
# Year
for s in split:
s = s[:4]
if s.isdigit():
year = int(s)
if year > 1800 and year < 2100:
metadata["year"] = year
# Chamber
metadata["chamber"] = "Enkammarriksdagen"
if "_ak_" in filename:
metadata["chamber"] = "Andra kammaren"
elif "_fk_" in filename:
metadata["chamber"] = "Första kammaren"
try:
metadata["number"] = int(split[-1])
except:
print("Number parsing unsuccesful", filename)
return metadata
def element_hash(elem, protocol_id="", chars=16):
"""
Calculate a deterministic hash for an XML element
"""
# The hash seed consists of
# 1. Element text without line breaks
elem_text = elem.text
if elem_text is None:
elem_text = ""
elem_text = elem_text.strip().replace("\n", " ")
elem_text = ' '.join(elem_text.split())
# 2. The element tag
elem_tag = elem.tag
# 3. The element attributes in alphabetical order,
# excluding the XML ID and XML n
xml_id = "{http://www.w3.org/XML/1998/namespace}id"
xml_n = "{http://www.w3.org/XML/1998/namespace}n"
n = "n"
excluded = [xml_id, xml_n, n, "prev", "next"]
elem_attrib = {key: value for key, value in elem.attrib.items() if key not in excluded}
elem_attrib = str(sorted(elem_attrib.items()))
seed = protocol_id + "\n" + elem_text + "\n" + elem_tag + "\n" + elem_attrib
encoded_seed = seed.encode("utf-8")
# Finally, the hash is calculated via MD5
digest = hashlib.md5(encoded_seed).hexdigest()
return digest[:chars]
def _clean_html(raw_html):
# Clean the HTML code in the Riksdagen XML text format
raw_html = raw_html.replace("\n", " NEWLINE ")
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
cleantext = cleantext.replace(" NEWLINE ", "\n")
return cleantext
def read_riksdagen_xml(path):
"""
Read Riksdagen XML text format and return a tuple
consisting of an etree of , as well as the HTML
inside the text element
"""
# TODO: implement
xml, cleaned_html
def read_html(path):
"""
Read a HTML file and turn it into valid XML
"""
f = open(path)
soup = BeautifulSoup(f)
f.close()
pretty_html = soup.prettify()
return etree.fromstring(pretty_html)
def validate_xml_schema(xml_path, schema_path):
xml_file = lxml.etree.parse(xml_path)
schema = lxml.etree.XMLSchema(file=schema_path)
is_valid = schema.validate(xml_file)
return is_valid
def parlaclarin_to_md(tree):
"""
Convert Parla-Clarin XML to markdown. Returns a string.
"""
return ""
def parlaclarin_to_txt(tree):
"""
Convert Parla-Clarin XML to plain text. Returns a string.
"""
segments = tree.findall('.//seg')
for segment in segments:
etree.strip_tags(segment, 'seg')
#print(type(segment))
#return
segment_txts = [etree.tostring(segment, pretty_print=True, encoding="UTF-8").decode("utf-8") for segment in segments]
segment_txts = [txt.replace("<seg>", "").replace("</seg>", "") for txt in segment_txts]
print(segment_txts[0])
print(type(segment_txts[0]))
return "\n".join(segment_txts)
def speeches_with_name(tree, name):
"""
Convert Parla-Clarin XML to plain text. Returns a string.
"""
us = tree.findall('.//u')
texts = []
for u in us:
if name.lower() in u.attrib['who'].lower():
text = etree.tostring(u, pretty_print=True, encoding="UTF-8").decode("utf-8")
texts.append(text)
#print(type(segment))
return texts
if __name__ == '__main__':
validate_parla_clarin_example()
#update_test()
| ninpnin/parlaclarin | pyparlaclarin/utils.py | utils.py | py | 4,236 | python | en | code | 0 | github-code | 13 |
29567081426 | import streamlit as st
import bcrypt
import datetime
import pandas
import pandas.io.sql as psql
# \COPY datatable(Merchants, MerchName2, TPV, TPC, Fees, Rev$, TPV$, Day, Date, Week, Month, Quarter, Year, Currency, Country, Product, SubProduct, Vertical, Category, Classification) FROM 'C:\Users\Nzubechukwu Onyekaba\Desktop\project\data.csv' DELIMITER ',' CSV HEADER encoding 'UTF8';
# get data function
def data_table(c):
c.execute(
'''
CREATE TABLE IF NOT EXISTS datatable(
ID BIGSERIAL PRIMARY KEY,
Merchants VARCHAR(300) DEFAULT NULL,
MerchName2 VARCHAR(250) DEFAULT NULL,
TPV DECIMAL(15,2) DEFAULT 0,
TPC BIGINT DEFAULT 1 CHECK (TPC>=0),
Fees DECIMAL(15,2) DEFAULT 0,
Rev$ DECIMAL(13,2) DEFAULT 0,
TPV$ DECIMAL(15,2) DEFAULT 0,
Day SMALLINT DEFAULT NULL CHECK (Day<=31),
Date TIMESTAMP DEFAULT NULL,
Week SMALLINT DEFAULT NULL CHECK (Week<=53),
Month SMALLINT DEFAULT NULL CHECK (Month<=12),
Quarter SMALLINT DEFAULT NULL CHECK (Quarter<=4),
Year SMALLINT DEFAULT NULL CHECK (Year>=2016),
Currency VARCHAR(4) DEFAULT NULL,
Country VARCHAR(3) DEFAULT NULL,
Product VARCHAR(100) DEFAULT NULL,
SubProduct VARCHAR(150) DEFAULT NULL,
Vertical VARCHAR(30) DEFAULT NULL,
Category VARCHAR(25) DEFAULT NULL,
Classification VARCHAR(20) DEFAULT NULL
)
''')
# \COPY storetxn(AccountID, StoreName, TPV, Fees, Rev$, TPV$, Rate, Day, Date, Week, Month, Quarter, Year, Currency, Country, PaymentType, Band) FROM 'C:\Users\Nzubechukwu Onyekaba\Desktop\project\StoreTrxn.csv' DELIMITER ',' CSV HEADER encoding 'UTF8';
def create_storetxn(c):
c.execute(
'''
CREATE TABLE IF NOT EXISTS storetxn(
ID BIGSERIAL PRIMARY KEY,
AccountID INT DEFAULT NULL,
StoreName VARCHAR(300) DEFAULT NULL,
TPV DECIMAL(15,2) DEFAULT 0,
Fees DECIMAL(15,2) DEFAULT 0,
Rev$ DECIMAL(13,2) DEFAULT 0,
TPV$ DECIMAL(13,2) DEFAULT 0,
Rate DECIMAL(7,5) DEFAULT 0,
Day SMALLINT DEFAULT NULL CHECK (Day<=31),
Date TIMESTAMP DEFAULT NULL,
Week SMALLINT DEFAULT NULL CHECK (Week<=53),
Month SMALLINT DEFAULT NULL CHECK (Month<=12),
Quarter SMALLINT DEFAULT NULL CHECK (Quarter<=4),
Year SMALLINT DEFAULT NULL CHECK (Year>=2016),
Currency VARCHAR(4) DEFAULT NULL,
Country VARCHAR(3) DEFAULT NULL,
PaymentType VARCHAR(50) DEFAULT NULL,
Band VARCHAR(10) DEFAULT NULL
)
''')
# \COPY ravestore(merchantid,storename,registrationdate,storecreationdate,day,week,month,year,country,category,status) FROM 'C:\Users\Nzubechukwu Onyekaba\Desktop\project\RaveStore.csv' DELIMITER ',' CSV HEADER encoding 'UTF8';
def create_ravestore(c):
c.execute('''
CREATE TABLE IF NOT EXISTS ravestore(
ID BIGSERIAL PRIMARY KEY,
MerchantID INT DEFAULT NULL,
StoreName VARCHAR(300) DEFAULT NULL,
RegistrationDate TIMESTAMP DEFAULT NULL,
StorecreationDate TIMESTAMP DEFAULT NULL,
Day SMALLINT DEFAULT NULL CHECK (Day<=31),
Week SMALLINT DEFAULT NULL CHECK (Week<=53),
Month SMALLINT DEFAULT NULL CHECK (Month<=12),
Quarter SMALLINT DEFAULT NULL CHECK (Quarter<=4),
Year SMALLINT DEFAULT NULL CHECK (Year>=2016),
Country VARCHAR(3) DEFAULT NULL,
Category VARCHAR(100) DEFAULT NULL,
Status VARCHAR(20) DEFAULT NULL
)
'''
)
# \COPY country(abbreviation,country) FROM 'C:\Users\Nzubechukwu Onyekaba\Desktop\project\country.csv' DELIMITER ',' CSV HEADER encoding 'UTF8';
def create_country(c):
c.execute('CREATE TABLE IF NOT EXISTS country (id SERIAL PRIMARY KEY, abbreviation VARCHAR(2) UNIQUE, country VARCHAR(100) UNIQUE)')
def create_entrpsemertable(c):
c.execute(
'CREATE TABLE IF NOT EXISTS entrpsemertable(id SERIAL PRIMARY KEY, merchants VARCHAR(250) UNIQUE)')
def create_usertable(c):
c.execute('CREATE TABLE IF NOT EXISTS userstable(id SERIAL PRIMARY KEY, email VARCHAR(50) UNIQUE, vertical VARCHAR(25), password VARCHAR, admin BOOLEAN DEFAULT FALSE)')
def add_userdata(c, email, vertical, password):
c.execute('INSERT INTO userstable(email,vertical,password) VALUES (%s,%s,%s)',
(email, vertical, password))
def login_user(c, email, password):
try:
c.execute('SELECT * FROM userstable WHERE email = %s', ([email]))
data = c.fetchall()
if bcrypt.checkpw(password.encode('utf-8'), data[0][3].encode('utf-8')):
return data
else:
return []
except Exception:
pass
def create_targetable(c):
c.execute('CREATE TABLE IF NOT EXISTS targetable(id SERIAL PRIMARY KEY, last_month_target INTEGER, month_target INTEGER, year_target INTEGER)')
def view_all_users(conn):
dfusers = psql.read_sql('''SELECT * FROM userstable''',
conn)
dfusers.columns = ['id', 'Email', 'Team', 'Password', 'Admin']
return dfusers
def view_all_targets(c):
c.execute('SELECT * FROM vertargetable')
data = c.fetchall()
return data
def update_target(c, lastmonthtarget=0, monthtarget=0, yeartarget=0):
if lastmonthtarget != 0:
c.execute(
'UPDATE targetable SET last_month_target = %s WHERE id = %s', (lastmonthtarget, 1))
if monthtarget != 0:
c.execute(
'UPDATE targetable SET month_target = %s WHERE id = %s', (monthtarget, 1))
if yeartarget != 0:
c.execute(
'UPDATE targetable SET year_target = %s WHERE id = %s', (yeartarget, 1))
def get_target(c):
c.execute('SELECT * FROM targetable WHERE id = 1')
data = c.fetchall()
return data
def create_notes(c):
c.execute('''CREATE TABLE IF NOT EXISTS dailysumnotes(id SERIAL PRIMARY KEY,date_created DATE NOT NULL UNIQUE DEFAULT CURRENT_DATE, dailysum VARCHAR(1500))''')
c.execute('''CREATE TABLE IF NOT EXISTS weeklysumnotes(id SERIAL PRIMARY KEY,date_created DATE NOT NULL UNIQUE DEFAULT CURRENT_DATE, weeklysumn VARCHAR(1500))''')
c.execute('''CREATE TABLE IF NOT EXISTS weeklycurrnotes(id SERIAL PRIMARY KEY,date_created DATE NOT NULL UNIQUE DEFAULT CURRENT_DATE, weeklycurr VARCHAR(1500))''')
c.execute('''CREATE TABLE IF NOT EXISTS weeklybarnotes(id SERIAL PRIMARY KEY,date_created DATE NOT NULL UNIQUE DEFAULT CURRENT_DATE, weeklybar VARCHAR(1500))''')
c.execute('''CREATE TABLE IF NOT EXISTS accmgtgainnotes(id SERIAL PRIMARY KEY,date_created DATE NOT NULL UNIQUE DEFAULT CURRENT_DATE, accmgtgain VARCHAR(3000))''')
c.execute('''CREATE TABLE IF NOT EXISTS accmgtlossnotes(id SERIAL PRIMARY KEY,date_created DATE NOT NULL UNIQUE DEFAULT CURRENT_DATE, accmgtloss VARCHAR(3000))''')
c.execute('''CREATE TABLE IF NOT EXISTS smesummnotes(id SERIAL PRIMARY KEY,date_created DATE NOT NULL UNIQUE DEFAULT CURRENT_DATE, smesumm VARCHAR(1500))''')
c.execute('''CREATE TABLE IF NOT EXISTS pipelinenotes(id SERIAL PRIMARY KEY,date_created DATE NOT NULL UNIQUE DEFAULT CURRENT_DATE, pipeline VARCHAR(1500))''')
def edit_notes(c, today1, note, nameofnote):
if note:
if datetime.datetime.now().day - today1.day >= 0:
if nameofnote == 'DailySummary':
try:
c.execute(
'INSERT INTO dailysumnotes(date_created,dailysum) VALUES (%s,%s)', (today1, note))
except Exception:
c.execute(
'UPDATE dailysumnotes SET dailysum = %s WHERE date_created = %s', (note, today1))
elif nameofnote == 'WeeklySummary':
try:
c.execute(
'INSERT INTO weeklysumnotes(date_created,weeklysum) VALUES (%s,%s)', (today1, note))
except Exception:
c.execute(
'UPDATE weeklysumnotes SET weeklysum = %s WHERE date_created = %s', (note, today1))
elif nameofnote == 'WeeklyCurrency':
try:
c.execute(
'INSERT INTO weeklycurrnotes(date_created,weeklycurr) VALUES (%s,%s)', (today1, note))
except Exception:
c.execute(
'UPDATE weeklycurrnotes SET weeklycurr = %s WHERE date_created = %s', (note, today1))
elif nameofnote == 'WeeklyBarter':
try:
c.execute(
'INSERT INTO weeklybarnotes(date_created,weeklybar) VALUES (%s,%s)', (today1, note))
except Exception:
c.execute(
'UPDATE weeklybarnotes SET weeklybar = %s WHERE date_created = %s', (note, today1))
elif nameofnote == 'AccMgtGain':
try:
c.execute(
'INSERT INTO accmgtgainnotes(date_created,accmgtgain) VALUES (%s,%s)', (today1, note))
except Exception:
c.execute(
'UPDATE accmgtgainnotes SET accmgtgain = %s WHERE date_created = %s', (note, today1))
elif nameofnote == 'AccMgtLoss':
try:
c.execute(
'INSERT INTO accmgtlossnotes(date_created,accmgtloss) VALUES (%s,%s)', (today1, note))
except Exception:
c.execute(
'UPDATE accmgtlossnotes SET accmgtloss = %s WHERE date_created = %s', (note, today1))
elif nameofnote == 'SME':
try:
c.execute(
'INSERT INTO smesummnotes(date_created,smesumm) VALUES (%s,%s)', (today1, note))
except Exception:
c.execute(
'UPDATE smesummnotes SET smesumm = %s WHERE date_created = %s', (note, today1))
elif nameofnote == 'Pipeline':
try:
c.execute(
'INSERT INTO pipelinenotes(date_created,pipeline) VALUES (%s,%s)', (today1, note))
except Exception:
c.execute(
'UPDATE pipelinenotes SET pipeline = %s WHERE date_created = %s', (note, today1))
def view_notes(c, today1, nameofnote):
if nameofnote == 'DailySummary':
c.execute(
'SELECT * FROM dailysumnotes WHERE date_created = %s', ([today1]))
elif nameofnote == 'WeeklySummary':
c.execute(
'SELECT * FROM weeklysumnotes WHERE date_created = %s', ([today1]))
elif nameofnote == 'WeeklyCurrency':
c.execute(
'SELECT * FROM weeklycurrnotes WHERE date_created = %s', ([today1]))
elif nameofnote == 'WeeklyBarter':
c.execute(
'SELECT * FROM weeklybarnotes WHERE date_created = %s', ([today1]))
elif nameofnote == 'AccMgtGain':
c.execute(
'SELECT * FROM accmgtgainnotes WHERE date_created = %s', ([today1]))
elif nameofnote == 'AccMgtLoss':
c.execute(
'SELECT * FROM accmgtlossnotes WHERE date_created = %s', ([today1]))
elif nameofnote == 'SME':
c.execute(
'SELECT * FROM smesummnotes WHERE date_created = %s', ([today1]))
elif nameofnote == 'Pipeline':
c.execute(
'SELECT * FROM pipelinenotes WHERE date_created = %s', ([today1]))
data = c.fetchall()
return data
def create_vertargetable(c):
c.execute('CREATE TABLE IF NOT EXISTS vertargetable(id SERIAL PRIMARY KEY, vertical VARCHAR(55) UNIQUE, month_target INTEGER, year_target INTEGER)')
def create_livetargetable(c):
c.execute('CREATE TABLE IF NOT EXISTS livetargetable(id SERIAL PRIMARY KEY, vertical VARCHAR(55) UNIQUE, live_target INTEGER)')
def get_vertarget(c, team_name):
c.execute('''SELECT * FROM vertargetable WHERE vertical = %s''', (team_name))
data = c.fetchall()
return data
def get_livetarget(c, team_name):
c.execute('SELECT * FROM livetargetable WHERE vertical = %s', (team_name))
data = c.fetchone()
return data
def edit_vertargetable(c, team_name, monthtarget2=0, yeartarget2=0):
if monthtarget2 != 0:
c.execute('UPDATE vertargetable SET month_target = %s WHERE vertical = %s',
(monthtarget2, team_name[0]))
elif yeartarget2 != 0:
c.execute('UPDATE vertargetable SET year_target = %s WHERE vertical = %s',
(yeartarget2, team_name[0]))
else:
pass
def edit_livetargetable(c, team_name, livetarget2):
try:
c.execute('INSERT INTO livetargetable(vertical,live_target) VALUES (%s,%s)',
(team_name[0], livetarget2))
except Exception:
c.execute('UPDATE livetargetable SET live_target = %s WHERE vertical = %s',
(livetarget2, team_name[0]))
def create_bestcase(c):
c.execute('CREATE TABLE IF NOT EXISTS projection(id SERIAL PRIMARY KEY, MerchName2 VARCHAR(75) UNIQUE, best_fig DECIMAL(9,2))')
def update_bestcase(c, merch_name, best_fig):
if 'All' not in merch_name and best_fig != 1:
try:
c.execute('INSERT INTO projection(MerchName2,best_fig) VALUES (%s,%s)',
(merch_name[0], best_fig))
except Exception:
c.execute('UPDATE projection SET best_fig = %s WHERE MerchName2 = %s',
(best_fig, merch_name[0]))
else:
st.warning(
'Please input merchants one at a time, and unselect the All option')
def delete_bestcase(c, del_merch_name):
if del_merch_name:
try:
for name in del_merch_name:
c.execute(
'DELETE FROM projection WHERE MerchName2 = %s', ([name]))
except Exception:
st.warning(
f'{del_merch_name} Failed to delete Merchant, please try again')
else:
st.success(f'{del_merch_name} deleted sucessfully')
else:
pass
def delete_user(c, del_email):
if del_email:
try:
for email in del_email:
c.execute('DELETE FROM userstable WHERE email = %s', ([email]))
except Exception:
st.warning(f'Failed to delete {del_email}, please try again')
else:
st.success(f'{del_email} deleted sucessfully')
else:
pass
def get_bestcase(conn):
dfpro = psql.read_sql('''SELECT * FROM projection''', conn)
dfpro.columns = ['SN', 'MerchName2', 'bestCase']
dfpro = dfpro.iloc[:, 1:]
return dfpro
def create_weeklynewold_merch(c):
c.execute(
'CREATE TABLE IF NOT EXISTS newmerch(id SERIAL PRIMARY KEY, new VARCHAR(75) UNIQUE)')
c.execute(
'CREATE TABLE IF NOT EXISTS oldmerch(id SERIAL PRIMARY KEY, old VARCHAR(75) UNIQUE)')
def get_weeklynewold_merch(c, new_old):
if new_old == 'new':
c.execute('SELECT * FROM newmerch')
elif new_old == 'old':
c.execute('SELECT * FROM oldmerch')
data = c.fetchall()
return data
def update_weeklynewold_merch(c, new_old, merch_name2):
if 'All' not in merch_name2:
if new_old == 'new':
try:
c.execute('INSERT INTO newmerch(new) VALUES (%s)',
(merch_name2[0]))
except Exception:
st.info('Merchant Already Exists')
elif new_old == 'old':
try:
c.execute('INSERT INTO oldmerch(old) VALUES (%s)',
(merch_name2[0]))
except Exception:
st.info('Merchant Already Exists')
else:
st.warning(
'Please input merchants one at a time, and unselect the All option')
def delete_weeklynewold_merch(c, new_old, del_merch_name2):
if del_merch_name2:
try:
if new_old == 'new':
for name in del_merch_name2:
c.execute('DELETE FROM newmerch WHERE new = %s', ([name]))
elif new_old == 'old':
for name in del_merch_name2:
c.execute('DELETE FROM oldmerch WHERE old = %s', ([name]))
except Exception:
st.warning(
f'{del_merch_name2} Failed to delete Merchant, please try again')
else:
st.success(f'{del_merch_name2} deleted sucessfully')
else:
pass
# \COPY appusers(email, vertical) FROM 'C:\Users\Nzubechukwu Onyekaba\Desktop\project\appusers.csv' DELIMITER ',' CSV HEADER encoding 'UTF8';
def create_appusertable(c):
c.execute(
''' CREATE TABLE IF NOT EXISTS appusertable(id SERIAL PRIMARY KEY, email VARCHAR(50) UNIQUE, vertical VARCHAR(50)) ''')
def view_appusers(conn, email):
dfappusers = psql.read_sql('''
SELECT *
FROM appusertable
WHERE email = %(s1)s
''', conn, params={'s1': email})
dfappusers.columns = ['ID', 'Email', 'Vertical']
return dfappusers
def view_all_appusers(conn):
dfappusers = psql.read_sql('''
SELECT *
FROM appusertable
''', conn)
dfappusers.columns = ['ID', 'Email', 'Vertical']
return dfappusers
def add_appuser(c, email, vertical):
if '@flutterwavego' in email:
try:
c.execute(
''' INSERT INTO appusertable(email,vertical) VALUES (%s)''', ([email], [vertical]))
except:
st.warning('User already permmitted')
else:
pass
def delete_appuser(c, del_appuser_email):
if del_appuser_email:
try:
for name in del_appuser_email:
c.execute(
'DELETE FROM appusertable WHERE email = %s', ([name]))
except Exception:
st.warning(
f'{del_appuser_email} Failed to delete Merchant, please try again')
else:
st.success(f'{del_appuser_email} deleted sucessfully')
else:
pass
| Jude-X/reportapp | db.py | db.py | py | 17,729 | python | en | code | 0 | github-code | 13 |
21632108474 | import datetime
from django.contrib import admin
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from register.admin.core import NotNullFilter
from register.dates import get_ranges_for_dates
from register.models.accommodation import Accomm
class SpecialNeedsNotNullFilter(NotNullFilter):
title = "Special Needs"
parameter_name = "special_needs"
class AccommAdmin(admin.ModelAdmin):
list_display = (
'attendee', 'full_name', 'email',
'dates', 'reconfirm', 'bursary', 'room'
)
list_editable = ('room',)
list_filter = (
'childcare', 'nights', SpecialNeedsNotNullFilter,
'attendee__reconfirm', 'attendee__user__bursary__accommodation_status',
)
search_fields = (
'attendee__user__username', 'attendee__user__first_name',
'attendee__user__last_name'
)
def full_name(self, instance):
return instance.attendee.user.get_full_name()
full_name.admin_order_field = 'attendee__user__last_name'
def email(self, instance):
return instance.attendee.user.email
email.admin_order_field = 'attendee__user__email'
def reconfirm(self, instance):
return _boolean_icon(instance.attendee.reconfirm)
reconfirm.short_description = 'Confirmed?'
reconfirm.admin_order_field = 'attendee__reconfirm'
def bursary(self, instance):
return instance.attendee.user.bursary.accommodation_status
bursary.short_description = 'Accomm bursary status'
bursary.admin_order_field = 'attendee__user__bursary__accommodation_status'
def dates(self, instance):
to_show = []
stays = get_ranges_for_dates(
night.date for night in instance.nights.all()
)
for first_night, last_night in stays:
last_morning = last_night + datetime.timedelta(days=1)
num_nights = (last_morning - first_night).days
to_show.append("%s eve. to %s morn. (%s nights)" % (
first_night, last_morning, num_nights
))
return '; '.join(to_show)
admin.site.register(Accomm, AccommAdmin)
| muhammed-ajmal/heroku | register/admin/accommodation.py | accommodation.py | py | 2,118 | python | en | code | 0 | github-code | 13 |
18605982314 | from scenario_builder import Scenario
from scenario_builder.openbach_functions import StartJobInstance
from scenario_builder.helpers.network.ip_route import ip_route
from scenario_builder.helpers.network.sr_tunnel import create_sr_tunnel
from scenario_builder.helpers.postprocessing.histogram import cdf_on_same_graph
from scenario_builder.helpers.postprocessing.time_series import time_series_on_same_graph
SCENARIO_NAME = 'network_sr_tunnel'
SCENARIO_DESCRIPTION = """This scenario creates a SR tunnel between 2 entities.
It launches 'sr_tunnel' which is a program which implements a Selective Repeat
algorithm at the IP level within a TUN/TAP tunnel. A good illustration of
the algorithm implemented is given here :
https://www2.tkn.tu-berlin.de/teaching/rn/animations/gbn_sr/.
**Important Note** : the traffic needs to be sent to the 'tun0' interfaces in order to
activate the Selective Repeate process.
"""
def sr_tunnel(
server_entity, client_entity, server_ip, server_tun_ip, client_tun_ip, server_port,
trace, server_drop, client_drop, server_burst, client_burst, duration,
scenario_name=SCENARIO_NAME):
scenario = Scenario(scenario_name, SCENARIO_DESCRIPTION)
scenario.add_constant('server_ip', server_ip)
scenario.add_constant('server_tun_ip', server_tun_ip)
scenario.add_constant('client_tun_ip', client_tun_ip)
scenario.add_constant('trace', trace)
tunnel = create_sr_tunnel(
scenario, server_entity, client_entity, '$server_ip', '$server_tun_ip',
'$client_tun_ip', server_port, '$trace', server_drop, client_drop,
server_burst, client_burst)
return scenario
def build(
server_entity, client_entity, server_ip, server_tun_ip, client_tun_ip, server_port=None,
trace=None, server_drop=None, client_drop=None, server_burst=None, client_burst=None, duration=0,
scenario_name=SCENARIO_NAME):
scenario = sr_tunnel(
server_entity, client_entity, server_ip, server_tun_ip, client_tun_ip, server_port,
trace, server_drop, client_drop, server_burst, client_burst, duration, scenario_name)
if duration:
jobs = [f for f in scenario.openbach_functions if isinstance(f, StartJobInstance)]
scenario.add_function('stop_job_instance', wait_launched=jobs, wait_delay=duration).configure(*jobs)
return scenario
| CNES/openbach-extra | apis/scenario_builder/scenarios/network_sr_tunnel.py | network_sr_tunnel.py | py | 2,382 | python | en | code | 0 | github-code | 13 |
72187828819 | import time
import requests
import json
from lxml import etree
import re
import traceback
"""
爬取全国所有法院名称用于裁判文书搜索
"""
def get_proxy():
# 获取代理ip方法请自行封装
# 免费代理ip爬取: https://github.com/SelemeneCFY/ip_pool.git
pass
start_url = "http://tingshen.court.gov.cn/court"
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
def main():
resp = requests.get(start_url, headers=headers, proxies=get_proxy())
tree = etree.HTML(resp.content)
print(resp.text)
code_li = tree.xpath("//div[@class='region-city _region_city']/span/@areacode")
print(len(code_li))
name_li = []
for code in code_li:
p_url = start_url + '?areaCode=' + code
r = requests.get(p_url, headers=headers, proxies=get_proxy())
time.sleep(1)
tree = etree.HTML(r.content)
fy_name = tree.xpath("//a[contains(@href,'/court/')]//text()")
name_li += fy_name
name_li = list(set(name_li))
# print(len(name_li))
with open('fymc.txt', 'a')as f:
for name in name_li:
f.write(name + '\r\n')
if __name__ == '__main__':
# print(get_proxy())
main()
| yanxiaofei395118/CPWSSpider | cpwsSpider/cpwsSpider/spiders/get_fymc.py | get_fymc.py | py | 1,279 | python | en | code | 2 | github-code | 13 |
42642202934 | import sys
import time
from mwpyeditor.core import mwplugin, mwglobals, mwjobs
from mwpyeditor.core.mwplugin import load_plugin
from mwpyeditor.record import mwcell, mwland
def init_settings():
"""Change settings for which data is loaded in and how much of it is processed."""
"""
Record types loaded by default for every plugin. Options:
RECORDS_ALL -- all types
RECORDS_NODIAL -- all types except: DIAL, INFO
RECORDS_MOST -- all types except: DIAL, INFO, CELL, LAND
RECORDS_REFS -- RECORDS_MIN, CELL, and anything that can be placed as a ref
RECORDS_ITEMS -- RECORDS_MIN, CONT, CREA, NPC_, LEVI, CELL, and items that can be held in inventories
RECORDS_MIN -- minimum types required for autocalc: MGEF, CLAS, RACE, SKIL
RECORDS_DIALOGUE -- DIAL and INFO
RECORDS_NONE -- nothing except for TES3, which is always loaded
"""
mwglobals.default_records = mwglobals.RECORDS_DIALOGUE
"""Expand initial list above."""
mwglobals.default_records += []
"""Automatically load the same record types for a plugin's ESM master files, esp. Morrowind and expansions."""
mwplugin.auto_load_masters = False
"""Process large data for CELL and LAND."""
mwcell.init_references = True # statics and other references placed in the world
mwland.init_lod = False # lod to show global map
mwland.init_terrain = False # normals, heights, colors, and textures of landscape (long load time)
def init_plugins():
"""Choose common plugins to load for TR and PT devs. Versions likely out of date."""
"""Vanilla"""
load_plugin('Morrowind.esm')
load_plugin('Tribunal.esm')
load_plugin('Bloodmoon.esm')
"""DLC"""
# load_plugin('adamantiumarmor.esp')
# load_plugin('AreaEffectArrows.esp')
# load_plugin('bcsounds.esp')
# load_plugin('EBQ_Artifact.esp')
# load_plugin('entertainers.esp')
# load_plugin('LeFemmArmor.esp')
# load_plugin('master_index.esp')
# load_plugin('Siege at Firemoth.esp')
"""Tamriel_Data"""
# load_plugin('Tamriel_Data.esm')
# load_plugin('TD_Addon.esp')
"""Tamriel Rebuilt"""
# load_plugin('TR_Mainland.esp')
# load_plugin('TR_Factions.esp')
# load_plugin('TR_Travels.esp')
# load_plugin('TR_Travels_(Preview_and_Mainland).esp')
# load_plugin('TR_ThirrValley_v0075.ESP')
# load_plugin('TR_ShipalShin_v0004.ESP')
# load_plugin('TR_RestExterior.ESP')
"""Skyrim: Home of the Nords"""
# load_plugin('Sky_Main_2021_10_08.ESP')
# load_plugin('Sky_Markarth_2021-10-29.ESP')
# load_plugin('Sky_Falkheim_2021_10_30.ESP')
"""Province: Cyrodiil"""
# load_plugin('Cyrodiil_Main_2021_06_27.esp')
# load_plugin('PC_Anvil_v0082.esp')
# load_plugin('PC_Sutch_v0018.ESP')
def testing_area():
"""
Anything put here is executed after settings and plugins are initialized. You can load additional plugins, run
jobs, or anything else not possible through command line args (you can run those too).
"""
"""Jani's Jobs"""
# mwjobs.find_creatures(file='files/SHOTN_Creas.csv')
# mwjobs.exterior_doors(file='files/PC_Doors.csv')
# mwjobs.exterior_doors(file='files/SHOTN_Doors.csv')
# mwjobs.exterior_doors(file='files/TR_Doors.csv')
# mwjobs.ref_map(file='files/SHOTN_Doors.csv', img='files/cellexp/SHOTN_CellExport.png',
# top=23, bottom=-3, left=-120, right=-94)
# mwjobs.ref_map(file='files/PC_Doors.csv', img='files/cellexp/PC_CellExport.png',
# top=-35, bottom=-58, left=-141, right=-108)
# mwjobs.ref_map(file='files/TR_Doors.csv', img='files/cellexp/TR_CellExport.png',
# top=29, bottom=-59, left=-39, right=49)
# mwjobs.dump_dialogue(file='files/Dump.csv')
dump = mwjobs.choice_tree()
dump.to_csv('files/Dump.csv', index=False, header=True)
"""Start"""
mwjobs.unique_dialogue("adanja")
pass
"""
IGNORE AFTER THIS
"""
def main():
start_time = time.time()
with open('mwpyeditor_settings.txt') as file:
line = file.readline().split('=')
setattr(sys.modules['mwpyeditor.core.mwglobals'], line[0], line[1])
init_settings()
init_plugins()
testing_area()
print()
if len(sys.argv) > 2 or '-h' in sys.argv or '--help' in sys.argv:
args = mwplugin.init_args()
mwplugin.handle_args(args)
time_spent = time.time() - start_time
print(f"\n** Time spent: {time_spent:.3f} seconds **")
if __name__ == '__main__':
main()
| Dillonn241/MwPyEditor | mwpyeditor_start.py | mwpyeditor_start.py | py | 4,580 | python | en | code | 4 | github-code | 13 |
11728986451 | # import date time Module
from datetime import datetime as dt
t1=input('enter date in HH:MM:SS:')
t2=input('enter date in HH:MM:SS:')
# format Time
format= "%H:%M:%S"
def timedifference(time1, time2):
try:
t1 = dt .strptime(time2,format)-dt.strptime(time1, format)
return t1
except Exception as e:
print(e)
return e
return (t2 - t1)
print(timedifference(t1,t2))
| Srinivasareddymediboina/PYTHON-TOT | difftime.py | difftime.py | py | 471 | python | en | code | 0 | github-code | 13 |
5225373449 | import sqlite3
todo_data = sqlite3.connect("assignments_tracker.db")
c = todo_data.cursor()
# Create Users Table
'''
c.execute("""CREATE TABLE "users" (
"id" INTEGER NOT NULL,
"username" TEXT NOT NULL UNIQUE,
"password" TEXT,
PRIMARY KEY("id" AUTOINCREMENT)
);""")
'''
# Create Tasks Table
'''
c.execute("""CREATE TABLE "tasks" (
"id" INTEGER NOT NULL,
"task_user" TEXT NOT NULL,
"task_description" TEXT NOT NULL,
PRIMARY KEY("id" AUTOINCREMENT)
);""")
'''
todo_data.commit()
todo_data.close() | arelyx/TodoList | create_db.py | create_db.py | py | 507 | python | en | code | 0 | github-code | 13 |
42840385888 | import os
import glob
from re import split
from tqdm import tqdm
from multiprocessing import Pool
from functools import partial
scannet_dir='/root/data/ScanNet-v2-1.0.0/data/raw'
dump_dir='/root/data/scannet_dump'
num_process=32
def extract(seq,scannet_dir,split,dump_dir):
assert split=='train' or split=='test'
if not os.path.exists(os.path.join(dump_dir,split,seq)):
os.mkdir(os.path.join(dump_dir,split,seq))
cmd='python reader.py --filename '+os.path.join(scannet_dir,'scans' if split=='train' else 'scans_test',seq,seq+'.sens')+' --output_path '+os.path.join(dump_dir,split,seq)+\
' --export_depth_images --export_color_images --export_poses --export_intrinsics'
os.system(cmd)
if __name__=='__main__':
if not os.path.exists(dump_dir):
os.mkdir(dump_dir)
os.mkdir(os.path.join(dump_dir,'train'))
os.mkdir(os.path.join(dump_dir,'test'))
train_seq_list=[seq.split('/')[-1] for seq in glob.glob(os.path.join(scannet_dir,'scans','scene*'))]
test_seq_list=[seq.split('/')[-1] for seq in glob.glob(os.path.join(scannet_dir,'scans_test','scene*'))]
extract_train=partial(extract,scannet_dir=scannet_dir,split='train',dump_dir=dump_dir)
extract_test=partial(extract,scannet_dir=scannet_dir,split='test',dump_dir=dump_dir)
num_train_iter=len(train_seq_list)//num_process if len(train_seq_list)%num_process==0 else len(train_seq_list)//num_process+1
num_test_iter=len(test_seq_list)//num_process if len(test_seq_list)%num_process==0 else len(test_seq_list)//num_process+1
pool = Pool(num_process)
for index in tqdm(range(num_train_iter)):
seq_list=train_seq_list[index*num_process:min((index+1)*num_process,len(train_seq_list))]
pool.map(extract_train,seq_list)
pool.close()
pool.join()
pool = Pool(num_process)
for index in tqdm(range(num_test_iter)):
seq_list=test_seq_list[index*num_process:min((index+1)*num_process,len(test_seq_list))]
pool.map(extract_test,seq_list)
pool.close()
pool.join() | apple/ml-aspanformer | tools/extract.py | extract.py | py | 2,061 | python | en | code | 147 | github-code | 13 |
42783941624 | from tkinter import *
def EntrarClick ():
print ('Has introducido la frase --- ' + fraseEntry.get() + ' --- y has pulsado el botón entrar')
def Button1Click ():
print ('Has pulsado el botón 1')
window = Tk()
window.geometry("400x400")
window.rowconfigure(0, weight=1)
window.rowconfigure(1, weight=1)
window.columnconfigure(0, weight=1)
topFrame = LabelFrame (window, text ='Display')
topFrame.grid(row=0, column=0, padx=5, pady=5, sticky=N + S + E + W)
topFrame.rowconfigure(0, weight=1)
topFrame.rowconfigure(1, weight=1)
topFrame.columnconfigure(0, weight=1)
topFrame.columnconfigure(1, weight=1)
topFrame.columnconfigure(2, weight=1)
AButton = Button(topFrame, text="A", bg='red', fg="white")
AButton.grid(row=0, column=0, padx=5, pady=5, sticky=N + S + E + W)
BButton = Button(topFrame, text="B", bg='yellow', fg="black")
BButton.grid(row=0, column=1, padx=5, pady=5, sticky=N + S + E + W)
CButton = Button(topFrame, text="C", bg='blue', fg="white")
CButton.grid(row=0, column=2, padx=5, pady=5, sticky=N + S + E + W)
fraseEntry = Entry(topFrame)
fraseEntry.grid(row=1, column=0, columnspan = 2, padx=5, pady=5, sticky=N + S + E + W)
EntrarButton = Button(topFrame, text="Entrar", bg='red', fg="white",command=EntrarClick)
EntrarButton.grid(row=1, column=2, padx=5, pady=5, sticky=N + S + E + W)
bottomFrame = LabelFrame (window, text ='Volar')
bottomFrame.grid(row=1, column=0, padx=5, pady=5, sticky=N + S + E + W)
bottomFrame.rowconfigure(0, weight=1)
bottomFrame.rowconfigure(1, weight=1)
bottomFrame.rowconfigure(2, weight=1)
bottomFrame.columnconfigure(0, weight=1)
bottomFrame.columnconfigure(1, weight=1)
bottomFrame.columnconfigure(2, weight=1)
Button1 = Button(bottomFrame, text="1", bg='red', fg="white",command=Button1Click)
Button1.grid(row=0, column=0, padx=5, pady=5, sticky=N + S + E + W)
Button2 = Button(bottomFrame, text="2", bg='yellow', fg="black")
Button2.grid(row=0, column=1, padx=5, pady=5, sticky=N + S + E + W)
Button3 = Button(bottomFrame, text="3", bg='blue', fg="white")
Button3.grid(row=0, column=2, padx=5, pady=5, sticky=N + S + E + W)
Button4 = Button(bottomFrame, text="4", bg='orange', fg="black")
Button4.grid(row=1, column=0, padx=5, pady=5, sticky=N + S + E + W)
Button5 = Button(bottomFrame, text="5", bg='red', fg="white")
Button5.grid(row=1, column=1, padx=5, pady=5, sticky=N + S + E + W)
Button6 = Button(bottomFrame, text="6", bg='yellow', fg="black")
Button6.grid(row=1, column=2, padx=5, pady=5, sticky=N + S + E + W)
Button7 = Button(bottomFrame, text="7", bg='blue', fg="white")
Button7.grid(row=2, column=0, padx=5, pady=5, sticky=N + S + E + W)
Button8 = Button(bottomFrame, text="8", bg='orange', fg="black")
Button8.grid(row=2, column=1, padx=5, pady=5, sticky=N + S + E + W)
Button9 = Button(bottomFrame, text="9", bg='pink', fg="black")
Button9.grid(row=2, column=2, padx=5, pady=5, sticky=N + S + E + W)
window.mainloop() | dronsEETAC/tallerFundesplai | Lib/botones2.py | botones2.py | py | 2,906 | python | en | code | 0 | github-code | 13 |
18481747254 | import pickle
import logging
import BeautifulSoup
import requests
from requests.exceptions import ConnectionError
class Scrapper(object):
RESUME_URL = 'http://jobsearch.monsterindia.com/searchresult-'
def __init__(self, count = 1):
self.payload = "fts=&lmy=&ind=65&ctp=0&job="
self.headers = {
"Content-Type" : "application/x-www-form-urlencoded"
}
self.url = self.RESUME_URL + str(count) + '.html'
while True:
try:
self.response = requests.get(self.url, headers = self.headers, data = self.payload)
except ConnectionError as exc:
print(repr(exc))
# time.sleep(10)
continue
break
def get_urls(self):
urls = []
response_soup = BeautifulSoup.BeautifulSoup(self.response.content)
hyperlinks = response_soup.findAll("a", {"class":"title_in"})
for links in hyperlinks:
urls.append(str(links.get('href')))
return urls
class ResumeScrapper(object):
def __init__(self, url):
self.url = url
self.headers = {
"User-Agent" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36"
}
while True:
try:
self.response = requests.get(self.url, headers = self.headers)
except ConnectionError as exc:
print(repr(exc))
# time.sleep(10)
continue
break
self.soup = BeautifulSoup.BeautifulSoup(self.response.content)
def get_company_info_data(self):
about_company_data = self.soup.findAll("div", {"class" : "desc"})
try:
about_company = about_company_data[-1].text.strip()
except:
about_company = None
return about_company
def get_job_title(self):
job_title_element = self.soup.findAll("div", {"class" : "job_title"})
try:
job_title = job_title_element[0].text
except:
job_title = None
return job_title
def get_job_description(self):
job_posted = self.soup.findAll("div", {"class" : "desc"})
try:
job_description = job_posted[0].text.strip()
except:
job_description = None
return job_description
def get_job_skills(self):
job_skills_element = self.soup.findAll("div", {"class" : "keyskill"})
try:
job_skills = job_skills_element[0].text.split(':')[-1]
except:
job_skills = None
return job_skills
def get_summary_data(self):
summary_data = self.soup.findAll("div", {"class" : "col-md-3 col-xs-12 pull-right jd_rol_section"})
try:
heading_data = summary_data[0].findAll("div", {"class" : "heading"})
span_data = summary_data[0].findAll('span')
summary = {}
for heading,i in zip(heading_data, range(len(span_data))):
try:
summary.update({heading.text : (span_data[i].findAll('a')[0].get('title'))})
except:
continue
except:
summary = {}
return summary
| dspkgp/web-scrapper | monster/scraper.py | scraper.py | py | 2,705 | python | en | code | 0 | github-code | 13 |
37613309892 | from panda3d.core import RenderState, ColorAttrib, Vec4, Point3, NodePath, CollisionBox, CollisionNode, CollisionTraverser, BitMask32
from panda3d.core import CollisionHandlerQueue, GeomNode
from .BoxTool import BoxTool, ResizeHandle, BoxAction
from direct.foundry import LEGlobals
from direct.foundry import LEUtils
from direct.foundry.ViewportType import VIEWPORT_3D_MASK, VIEWPORT_2D_MASK
from direct.foundry.Select import Select, Deselect
from direct.foundry.KeyBind import KeyBind
from direct.foundry.Box import Box
from direct.foundry.GeomView import GeomView
class SelectTool(BoxTool):
Name = "Select"
ToolTip = "Select Tool"
KeyBind = KeyBind.SelectTool
Icon = "icons/editor-select.png"
Draw3DBox = False
def __init__(self, mgr):
BoxTool.__init__(self, mgr)
self.box.setColor(Vec4(1, 1, 0, 1))
self.suppressSelect = False
def cleanup(self):
self.suppressSelect = None
self.multiSelect = None
BoxTool.cleanup(self)
def activate(self):
BoxTool.activate(self)
self.accept('shift-mouse1', self.mouseDown)
self.accept('shift-mouse1-up', self.mouseUp)
self.accept('wheel_up', self.wheelUp)
self.accept('wheel_down', self.wheelDown)
self.accept('shift', self.shiftDown)
self.accept('shift-up', self.shiftUp)
self.accept('selectionsChanged', self.selectionChanged)
self.accept('selectionModeChanged', self.selectionModeChanged)
base.selectionMgr.selectionMode.toolActivate()
def deactivate(self):
BoxTool.deactivate(self)
base.selectionMgr.selectionMode.toolDeactivate()
def selectionModeChanged(self, old, mode):
mode.toolActivate()
def enable(self):
BoxTool.enable(self)
self.multiSelect = False
self.mouseIsDown = False
def shiftDown(self):
self.multiSelect = True
def shiftUp(self):
self.multiSelect = False
def selectionChanged(self):
pass
def mouseDown(self):
vp = base.viewportMgr.activeViewport
if not vp:
return
self.mouseIsDown = True
BoxTool.mouseDown(self)
if self.suppressSelect:
return
ret = base.selectionMgr.selectionMode.selectObjectUnderMouse(self.multiSelect)
if (not ret) and (not self.multiSelect) and (self.state.action != BoxAction.ReadyToResize):
# Deselect all if not doing multi-select and no hits
self.deselectAll()
def mouseUp(self):
self.mouseIsDown = False
vp = base.viewportMgr.activeViewport
if not vp:
return
if vp.is2D():
BoxTool.mouseUp(self)
def boxDrawnConfirm(self):
invalid, mins, maxs = self.getSelectionBox()
if invalid:
return
base.selectionMgr.selectionMode.selectObjectsInBox(mins, maxs)
def wheelUp(self):
if not self.mouseIsDown:
return
base.selectionMgr.selectionMode.cycleNextSelection(self.multiSelect)
def wheelDown(self):
if not self.mouseIsDown:
return
base.selectionMgr.selectionMode.cyclePreviousSelection(self.multiSelect)
def escapeDown(self):
BoxTool.escapeDown(self)
self.deselectAll()
def deselectAll(self):
base.selectionMgr.selectionMode.deselectAll()
def disable(self):
BoxTool.disable(self)
self.multiSelect = False
self.mouseIsDown = False
| toontownretro/direct | src/foundry/SelectTool.py | SelectTool.py | py | 3,521 | python | en | code | 2 | github-code | 13 |
69837425939 | import censusdata
import pandas as pd
#function to make list of all county ids in state (given by census state id)
def county_list(state_number):
counties = censusdata.geographies(censusdata.censusgeo([('state', state_number),('county','*')]), 'acs5', 2018)
county_list = []
for i in counties.keys():
county_list.append(counties[i].geo[1][1])
return county_list
#function to pull defined variables for blocks in specified state, looping over countties
#(input state id and list of variables)
def block_pull(state_id,variable_list):
c_list = county_list(state_id)
for i in range(0,len(c_list)):
geo = censusdata.censusgeo([('state',state_id),('county',c_list[i]),('tract','*'),('block group','*')])
county_df = censusdata.download('acs5',2018,geo,variable_list)
if i == 0:
data = county_df
else:
data = pd.concat([data,county_df])
return data
variables_list = ['B02001_001E']
county = block_pull('39',variables_list)
count.to_csv('test_upload.csv')
| bonfirefan/oh_schools_mlppl | assignment1/census_load.py | census_load.py | py | 1,043 | python | en | code | 0 | github-code | 13 |
41542251174 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as metrics
from scipy.stats import norm
def calibration_plot(y_pred, y_true, bins=100, ax=None):
if ax is None:
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(111)
y_true = y_true.reshape(-1, 1)
prob = np.sum(
y_pred[:, :, 0] * (1 - norm.cdf((y_true - y_pred[:, :, 1]) / y_pred[:, :, 2])),
axis=-1, keepdims=True)
sns.distplot(prob, norm_hist=True, bins=bins, hist_kws={'range': (0, 1)}, kde=False, ax=ax)
ax.axhline(1., linestyle='--', color='r')
ax.set_xlim(0, 1)
ax.set_ylim(0)
return ax
def true_predicted(y_true, y_pred, agg='mean', quantile=True, ms=None, ax=None):
if ax is None:
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
ax.set_aspect('equal')
if quantile:
c_quantile = np.sum(y_pred[:, :, 0] * (1 - norm.cdf((y_true.reshape(-1, 1) - y_pred[:, :, 1]) / y_pred[:, :, 2])),
axis=-1, keepdims=False)
else:
c_quantile = None
if agg == 'mean':
y_pred_point = np.sum(y_pred[:, :, 0] * y_pred[:, :, 1], axis=1)
elif agg == 'point':
y_pred_point = y_pred
else:
raise ValueError(f'Aggregation type "{agg}" unknown')
limits = (np.min(y_true) - 0.5, np.max(y_true) + 0.5)
ax.plot(limits, limits, 'k-', zorder=1)
if ms is None:
cbar = ax.scatter(y_true, y_pred_point, c=c_quantile, cmap='coolwarm', zorder=2)
else:
cbar = ax.scatter(y_true, y_pred_point, s=ms, c=c_quantile, cmap='coolwarm', zorder=2)
ax.set_xlabel('$y_{true}$')
ax.set_ylabel('$y_{pred}$')
r2 = metrics.r2_score(y_true, y_pred_point)
ax.text(min(np.min(y_true), limits[0]), max(np.max(y_pred_point), limits[1]), f"$R^2={r2:.2f}$", va='top')
return ax, cbar | yetinam/TEAM | plots.py | plots.py | py | 1,882 | python | en | code | 36 | github-code | 13 |
30828661744 | import os
import maya.OpenMaya as om
from pymel import core as pm
from maya import OpenMaya as om, OpenMayaMPx as ompx
import zMayaTools.menus
from zMayaTools.menus import Menu
from zMayaTools import controller_editor, maya_helpers, material_assignment_menu, shelf_menus, joint_labelling, skin_clusters
from zMayaTools import animation_helpers, pick_walk, wireframes, fix_layer_editor_undo, attribute_reordering, component_tag_menu
try:
from importlib import reload
except ImportError:
pass
from zMayaTools import maya_logging
log = maya_logging.get_log()
# Only import hide_output_window in Windows. Don't load this from 2022 onwards, since Maya does
# this internally now.
if os.name == 'nt' and om.MGlobal.apiVersion() < 20220000:
from zMayaTools import hide_output_window
reload(hide_output_window)
else:
hide_output_window = None
class PluginMenu(Menu):
def __init__(self):
super(PluginMenu, self).__init__()
self.shelf_menu = None
self.shelf_preference_handler = None
def _add_menu_items(self):
super(PluginMenu, self)._add_menu_items()
# Make sure the file menu and other deferred menus are built.
pm.mel.eval('buildDeferredMenus()')
if os.name == 'nt':
# This would be more logical to put in the top "Open" block, but we don't put it
# there to avoid shifting around the important open/save menu items (shifting those
# down would be annoying since you expect them to be a certain distance from the menu).
# This is also not an important enough feature to put in such a high-profile place.
# Instead, put it down in the "View" section.
menu = 'mainFileMenu'
def show_scene_in_explorer(unused):
maya_helpers.open_scene_in_explorer()
# It would be useful to grey the menu item out if the scene hasn't been saved, but there's
# only a global callback for the menu and not for each menu item, and adding to the menu
# callback is brittle.
section = self.find_menu_section_containing_item(pm.menu('mainFileMenu', q=True, ia=True), 'viewSequenceItem')
self.add_menu_item('zMayaTools_ViewSceneInExplorer', label='View Scene In Explorer', parent=menu, insertAfter=section[-1],
annotation='Show the current scene file in Explorer',
command=show_scene_in_explorer,
top_level_path='Misc|ViewSceneInExplorer')
pm.mel.eval('ChaSkinningMenu("mainRigSkinningMenu")')
self.add_menu_item('zMayaTools_ToggleMoveSkinnedJoints', label='Toggle Move Skinned Joints', parent=pm.mel.globals['gRigSkinningMenu'],
insertAfter='moveSkinJointsItem',
command='zMoveSkinnedJoints -toggle',
sourceType='mel',
image='smoothSkin.png',
top_level_path='Rigging|ToggleMoveSkinnedJoints')
self.add_menu_item('zMayaTools_CreateEditableJoints', label='Create Editable Joints', parent=pm.mel.globals['gRigSkinningMenu'],
insertAfter='zMayaTools_ToggleMoveSkinnedJoints',
command='zCreateEditableJoints',
sourceType='mel',
image='smoothSkin.png',
top_level_path='Rigging|CreateEditableJoints')
menu = 'MayaWindow|mainRigSkeletonsMenu'
# Make sure the menu is built.
pm.mel.eval('ChaSkeletonsMenu "%s";' % menu)
def validate_character(unused):
from zMayaTools import validate_character
reload(validate_character)
validate_character.UI().run()
self.add_menu_item('zMayaTools_ValidateCharacter', label='Validate Character', parent=menu, insertAfter='hikWindowItem',
command=validate_character,
top_level_path='Rigging|ValidateCharacter')
for menu in ['mainDeformMenu', 'mainRigDeformationsMenu']:
# Make sure the menu is built.
pm.mel.eval('ChaDeformationsMenu "MayaWindow|%s";' % menu)
# Add "Mirror Weights" in the "Weights" section at the bottom of the Deform menu.
menu_items = pm.menu(menu, q=True, ia=True)
mirror_weights = self.find_item_with_command(menu_items, 'MirrorDeformerWeights')
def run_copy_painted_weights(unused):
from zMayaTools import copy_painted_weights
reload(copy_painted_weights)
ui = copy_painted_weights.UI()
ui.run()
self.add_menu_item('zMayaTools_CopyWeights_%s' % menu, label='Copy Deformer Weights', parent=menu,
annotation='Copy painted weights from one mesh to another',
insertAfter=menu_items[mirror_weights],
command=run_copy_painted_weights,
top_level_path='Rigging|CopyWeights')
# Find the "Edit" section in the Deform menu, then find the "Blend Shape" submenu inside
# that section.
menu_items = pm.menu(menu, q=True, ia=True)
section = self.find_menu_section_by_name(menu_items, pm.mel.eval('uiRes("m_ChaDeformationsMenu.kDeformEdit")'))
submenu = self.find_submenu_by_name(section, 'Blend Shape', default=menu)
def run_blend_shape_retargetting(unused):
from zMayaTools import blend_shape_retargetting
reload(blend_shape_retargetting)
blend_shape_retargetting.UI().run()
self.add_menu_item('zBlendShapeRetargetting_%s' % menu, label='Retarget Blend Shapes', parent=submenu,
command=run_blend_shape_retargetting,
image='blendShape.png',
top_level_path='Blend Shapes|RetargetBlendShapes')
def run_split_blend_shapes(unused):
from zMayaTools import split_blend_shapes
split_blend_shapes.UI().run()
self.add_menu_item('zSplitBlendShape_%s' % menu, label='Split Blend Shape', parent=submenu,
annotation='Split a blend shape across a plane',
command=run_split_blend_shapes,
image='blendShape.png',
top_level_path='Blend Shapes|SplitBlendShapes')
self.add_rigging_tools()
self.add_hide_output_window()
self.add_show_shelf_menus()
self.add_channel_box_editing()
self.add_modify_menu_items()
controller_editor.menu.add_menu_items()
joint_labelling.menu.add_menu_items()
def add_rigging_tools(self):
menu = 'MayaWindow|mainRigControlMenu'
# Make sure the menu is built.
pm.mel.eval('ChaControlsMenu "%s";' % menu)
# If this ends up having a bunch of rigging tools this can be a submenu, but
# for now just put this at the top.
divider = self.add_menu_item('zMayaTools_RiggingDivider', divider=True, parent=menu, label='zMayaUtils')
def run_eye_rig(unused):
from zMayaTools.rigging import eye_rig
eye_rig.create_eye_rig()
self.add_menu_item('zMayaTools_EyeRig', label='Eye Rig', parent=menu, insertAfter=divider,
command=run_eye_rig,
top_level_path='Rigging|EyeRig')
def add_hide_output_window(self):
# Add "Show Output Window" at the end of the Windows menu.
if hide_output_window is None:
return
# Activate the user's current preference.
hide_output_window.refresh_visibility()
def refresh_menu_item():
label = 'Show Output Window' if hide_output_window.is_hidden() else 'Hide Output Window'
for menu_item in self.output_window_menu_items:
pm.menuItem(menu_item, e=True, label=label)
def toggle_output_window(unused):
hide_output_window.toggle()
refresh_menu_item()
pm.mel.eval('buildDeferredMenus')
menu_item = self.add_menu_item('zHideOutputWindow', parent='mainWindowMenu', command=toggle_output_window,
label='Hide output window', # placeholder
top_level_path='Misc|ToggleOutputWindow')
self.output_window_menu_items = self.get_related_menu_items(menu_item)
refresh_menu_item()
def add_show_shelf_menus(self):
self.shelf_menu = shelf_menus.ShelfMenu()
self.shelf_preference_handler = shelf_menus.create_preference_handler()
self.shelf_preference_handler.register()
def add_channel_box_editing(self):
def move_attr_up(unused):
attrs = maya_helpers.get_selected_cb_attributes()
pm.zReorderAttribute(direction='up', attr=attrs)
def move_attr_down(unused):
attrs = maya_helpers.get_selected_cb_attributes()
pm.zReorderAttribute(direction='down', attr=attrs)
# Add "Move Attributes Up" and "Move Attributes Down" to the bottom of Edit.
# Put this in a submenu, so the menu can be torn off while making a bunch of
# attribute edits.
#
# The top_level_paths are set to make "Move Up" come before "Move Down" in the
# standalone menu.
menu = 'MayaWindow|mainEditMenu'
move_attribute_menu = self.add_menu_item('zMayaTools_MoveAttributes', label='Reorder Attributes', parent=menu,
subMenu=True, tearOff=True)
self.add_menu_item('zMayaTools_MoveAttributeUp', label='Move Attributes Up', parent=move_attribute_menu,
command=move_attr_up,
annotation='Move a channel box attribute higher in the list',
top_level_path='Reorder Attributes|Move1')
self.add_menu_item('zMayaTools_MoveAttributeDown', label='Move Attributes Down', parent=move_attribute_menu,
command=move_attr_down,
annotation='Move a channel box attribute lower in the list',
top_level_path='Reorder Attributes|Move2')
def add_modify_menu_items(self):
# Add Match Translation and Rotation to Modify > Match Transformations.
# This menu item isn't added to the top-level zMayaTools menu, since it doesn't
# really make sense on its own.
pm.mel.eval('ModObjectsMenu "mainModifyMenu"')
menu = 'mainModifyMenu|matchTransformsItem'
menu_items = pm.menu(menu, q=True, ia=True)
match_rotation = self.find_item_with_command(menu_items, 'MatchRotation')
self.add_menu_item('zMayaTools_MatchPosition', label='Match Position',
parent=menu,
annotation='Match the translation and rotation of selected objects to the last-selected object.',
insertAfter=menu_items[match_rotation],
command='zMatchPosition', sourceType='mel')
def _remove_menu_items(self):
super(PluginMenu, self)._remove_menu_items()
# Remove shelf menus.
if self.shelf_menu is not None:
self.shelf_menu.remove()
self.shelf_menu = None
if self.shelf_preference_handler is not None:
self.shelf_preference_handler.unregister()
self.shelf_preference_handler = None
controller_editor.menu.remove_menu_items()
joint_labelling.menu.remove_menu_items()
menu = PluginMenu()
def initializePlugin(mobject):
plugin = ompx.MFnPlugin(mobject)
if om.MGlobal.mayaState() != om.MGlobal.kInteractive:
return
menu.add_menu_items()
material_assignment_menu.AssignMaterialsContextMenu.register()
component_tag_menu.ComponentTagContextMenu.register()
skin_clusters.MoveSkinnedJoints.register(plugin)
animation_helpers.install()
pick_walk.setup_runtime_commands()
maya_helpers.setup_runtime_commands()
wireframes.setup_runtime_commands()
attribute_reordering.ReorderAttribute.register(plugin)
if pm.optionVar(q='zFixLayerEditorUndo'):
fix_layer_editor_undo.install()
def uninitializePlugin(mobject):
plugin = ompx.MFnPlugin(mobject)
menu.remove_menu_items()
material_assignment_menu.AssignMaterialsContextMenu.deregister()
component_tag_menu.ComponentTagContextMenu.deregister()
skin_clusters.MoveSkinnedJoints.deregister(plugin)
animation_helpers.uninstall()
fix_layer_editor_undo.uninstall()
attribute_reordering.ReorderAttribute.unregister(plugin)
| zewt/zMayaTools | plug-ins/zMayaUtils.py | zMayaUtils.py | py | 12,476 | python | en | code | 102 | github-code | 13 |
28920155168 | separador = lambda y, x='=': print(f'{y}\n', 30 * f'{x}')
# Criando dados para armazenar num dicionário
marca = 'apple'
cor = 'cinza espacial'
tam = '14 pol'
modelo = 'Macbook air'
chip = 'm1'
# Empacotando dos dados
mac = {
'marca': marca,
'cor': cor,
'tam': tam,
'modelo': modelo,
'chip': chip
}
# Dados
carro = {
'fabricante': 'Honda',
'model': 'Civic ej6',
'ano': '1995',
'valor': 65000
}
separador('desempacotamento de dicionários')
# Desempacotando dados
frabricante, model, ano, valor = carro['fabricante'], carro['model'], carro['ano'], carro['valor']
print(frabricante, model, ano, valor)
# Calculando área do triângulo
def calcula_area_triangulo():
"""
fução recebe 2 valores separados por virgula, onde
o primeiro valor equivale a b(base) e o segundo
à h(altura) e retorna o calculo da área do
triângulo>
"""
dados = input('insira a área, base e altura do triângulo: ').split()
b, h = [int(i) for i in dados]
area = b * h / 2
return f'{area} cm2'
resultado = calcula_area_triangulo()
print(resultado)
"""
Em Python, **kwargs é uma sintaxe especial que permite
passar um número variável de argumentos nomeados para
uma função. A palavra-chave kwargs é uma abreviação para
"keyword arguments" (argumentos de palavra-chave).
"""
separador('uso de "**kwargs" na função')
dados_pessoa = {
'nome': 'João',
'sobrenome': 'Almeida',
'idade': 32,
'altura': 1.67,
'peso': 63,
}
def desempacota(**kwargs):
for chave, valor in kwargs.items():
print(chave, valor)
pessoa = desempacota(nome='Diego', sobrenome='Santos', idade=21)
print(pessoa)
print(desempacota(**dados_pessoa))
| devSantZ/python_course | secao_2/aulas/aula78.py | aula78.py | py | 1,719 | python | pt | code | 0 | github-code | 13 |
34173039212 | class Solution:
def findMaxAverage(self, nums: List[int], k: int) -> float:
if not nums:
return 0
if len(nums) == 1:
return nums[0]
n =len(nums)
'''
if k >= n:
return 0
max_val = float('-inf')
for i in range(n):
if (i + k) < n:
window_sum = mean(nums[i:i+k])
max_val = max(max_val, window_sum)
else:
break
'''
max_sum, curr_sum = sum(nums[:k]), sum(nums[:k])
for i in range(k, n):
curr_sum += (nums[i] - nums[i-k])
max_sum = max(curr_sum, max_sum)
return (max_sum/k) | amuhebwa/100Days_of_Code | max_avg_subarray.py | max_avg_subarray.py | py | 709 | python | en | code | 2 | github-code | 13 |
16719178660 | import sys
# sys.stdin = open('input1.txt')
T = int(input())
for _ in range(T):
result = list(map(str, input().split()))
a = float(result[0])
for i in range(1, len(result)):
if result[i] == "@":
a *= 3
elif result[i] == "%":
a += 5
elif result[i] == "#":
a -= 7
print(format(a, ".2f")) | zzzso-o/Algorithm | 백준/Bronze/5355. 화성 수학/화성 수학.py | 화성 수학.py | py | 378 | python | en | code | 0 | github-code | 13 |
727610250 | from colorama import Fore, Style, init
init()
class Interpreter:
def __init__(self):
self.commands = {
"print": self.printly,
"help": self.helply,
"add": self.addly,
"read": self.readly,
"write": self.writely,
"append": self.appendly,
"copy": self.copyly,
"rename": self.renamely,
"delete": self.deletely
}
def run(self, code):
tokens = code.split()
keyword = tokens[0]
self.execute(keyword, tokens)
def execute(self, keyword, tokens):
if keyword in self.commands:
self.commands[keyword](tokens)
else:
print(Fore.BLUE + "Invalid keyword", keyword)
def printly(self, tokens):
if len(tokens) <= 3:
print(Fore.BLUE + tokens[1] + " " + tokens[2])
else:
if len(tokens)== 2:
print(Fore.BLUE + tokens[1])
else:
disk = len(tokens) - 1
print("Can only display 1-2 words amount of words given: ", Fore.RED + str(disk))
def helply(self, tokens=None):
print(Fore.BLUE + "Available commands:")
for command in self.commands:
print(Fore.BLUE + f"{command} - {self.commands[command].__doc__}")
def addly(self, tokens):
"""Adds two numbers and prints the result"""
if len(tokens) != 3:
print(Fore.BLUE + "Invalid number of arguments")
return
try:
result = int(tokens[1]) + int(tokens[2])
print(Fore.BLUE + str(result))
except ValueError:
print(Fore.BLUE + "Invalid arguments")
def readly(self, tokens):
"""Reads the content of a file and prints it"""
if len(tokens) != 2:
print(Fore.BLUE + "Invalid number of arguments")
return
try:
with open(tokens[1], 'r') as file:
content = file.read()
print(Fore.BLUE + content)
except FileNotFoundError:
print(Fore.BLUE + f"File {tokens[1]} not found")
def writely(self, tokens):
"""Writes content to a file"""
if len(tokens) < 3:
print(Fore.BLUE + "Invalid number of arguments")
return
try:
with open(tokens[1], 'w') as file:
content = ' '.join(tokens[2:])
file.write(content)
print(Fore.BLUE + f"Content written to {tokens[1]}")
except FileNotFoundError:
print(Fore.BLUE + f"File {tokens[1]} not found")
def appendly(self, tokens):
"""Appends content to a file"""
if len(tokens) < 3:
print(Fore.BLUE + "Invalid number of arguments")
return
try:
with open(tokens[1], 'a') as file:
content = ' '.join(tokens[2:])
file.write(content)
print(Fore.BLUE + f"Content appended to {tokens[1]}")
except FileNotFoundError:
print(Fore.BLUE + f"File {tokens[1]} not found")
def copyly(self, tokens):
"""Copies a file"""
if len(tokens) != 3:
print(Fore.BLUE + "Invalid number of arguments")
return
try:
with open(tokens[1], 'r') as src_file:
with open(tokens[2], 'w') as dest_file:
content = src_file.read()
dest_file.write(content)
print(Fore.BLUE + f"File {tokens[1]} copied to {tokens[2]}")
except FileNotFoundError:
print(Fore.BLUE + f"File {tokens[1]} not found")
def renamely(self, tokens):
"""Renames a file"""
if len(tokens) != 3:
print(Fore.BLUE + "Invalid number of arguments")
return
try:
os.rename(tokens[1], tokens[2])
print(Fore.BLUE + f"File {tokens[1]} renamed to {tokens[2]}")
except FileNotFoundError:
print(Fore.BLUE + f"File {tokens[1]} not found")
def deletely(self, tokens):
"""Deletes a file"""
if len(tokens) != 2:
print(Fore.BLUE + "Invalid number of arguments")
return
try:
os.remove(tokens[1])
print(Fore.BLUE + f"File {tokens[1]} deleted")
except FileNotFoundError:
print(Fore.BLUE + f"File {tokens[1]} not found")
interpreter = Interpreter()
print(Fore.BLUE + "Lenti Terminal")
while True:
code = input(Fore.YELLOW + '>> ')
print(" ")
interpreter.run(code)
print("\n")
| akrtkk/lenti-language | lenti_terminal.py | lenti_terminal.py | py | 4,737 | python | en | code | 1 | github-code | 13 |
10264023406 | # 1.while循环
"""
while 条件:
do something1,2,3,
"""
i = 1
sum = 0
while i<=100:
sum += i
i += 1
print(sum)
# while猜数字
import random
num = random.randint(1,10)
guess = int(input("请输入你要猜的值:"))
i = 1
flag = 1
while flag:
if guess == num:
print(f"congratulations! u have used {i} times!")
flag = 0
elif guess > num:
print("bigger!")
guess = int(input("try again!the number is:"))
else:
print("smaller!")
guess = int(input("try again!the number is:"))
i += 1
# 2.while嵌套循环
"""
while tiaojian1:
do something
while tiaojian2:
do something
"""
i = 1
j = i
while i <= 100:
print(f"today is {i} days")
j=1
while j<=10:
print(f"give {j} flowers to XiaoMei")
j += 1
print("i love u")
i += 1
print("success!")
# 9*9乘法表
"""
print("hello",end = ' ')可以实现输出不换行
输出对齐:\t用制表符
print("hello \tworld")
print("itheima \tbest")
"""
i = 1
j = 1
while i < 10:
while j <= i:
print(f"{i}*{j}={i*j}\t",end = ' ')
j += 1
j = 1
print("\n")
i += 1
| cicospui/note | py基础学习/4.1while循环.py | 4.1while循环.py | py | 1,235 | python | en | code | 1 | github-code | 13 |
19166984398 | import numpy as np
from numpy import multiply as mult
from numpy import divide as div
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import csv
class Model:
__G = 6.67e-11
object_data = {}
position_dict = {}
energy_dict = {'total_energy': [], 'kinetic_energy': [], 'potential_energy': []}
def __init__(self):
self.__read_object_data()
# ======================================== Data Reading ========================================#
# Read data from csv file
def __read_object_data(self):
with open('object_data.csv', 'r') as object_csv_file:
csv_reader = csv.DictReader(object_csv_file)
# Evaluate every possible element
for line in csv_reader:
# The first key 'name' is paired to a string,
# eval(Type: String) would return an error
for key in list(line.keys())[1:]:
if type(eval(line[key])) == tuple:
line[key] = np.array(eval(line[key]))
else:
line[key] = eval(line[key])
# Read to class dictionary including every given body
self.object_data[line['name']] = line
self.position_dict[line['name']] = [tuple(line['position'])]
# ======================================== Beeman Algorithm ========================================#
# Update positions to object_data and appending new position to position lists
def __update_position(self, time_step):
for key in self.object_data:
next_pos = self.object_data[key]['position'] + mult(self.object_data[key]['velocity'], time_step) + \
mult(1 / 6 * time_step ** 2, mult(4, self.object_data[key]['current_acceleration']) -
self.object_data[key]['previous_acceleration'])
self.object_data[key]['position'] = next_pos
self.position_dict[key].append(tuple(next_pos))
# Calculate the acceleration for the next time step
def __calc_next_acc(self):
for key in self.object_data:
other_bodies = list(self.object_data.keys())
other_bodies.remove(key)
lst = []
# Calculation relative to each other body and append each value to lst
for body in other_bodies:
distance_between = self.object_data[key]['position'] - self.object_data[body]['position']
other_body_mass = self.object_data[body]['mass']
magnitude = np.linalg.norm(distance_between)
lst.append(mult(div(other_body_mass, magnitude ** 3), distance_between))
next_acc = mult(-self.__G, sum(lst))
# Update next acceleration
self.object_data[key]['next_acceleration'] = next_acc
# Update the velocity at the next time step
def __update_velocity(self, time_step):
for key in self.object_data:
self.object_data[key]['velocity'] = self.object_data[key]['velocity'] + \
mult(div(time_step, 6),
mult(2, self.object_data[key]['next_acceleration']) +
mult(5, self.object_data[key]['current_acceleration']) -
self.object_data[key]['previous_acceleration'])
# Update the previous to current acceleration,
# the current to next acceleration (for the next time step)
def __update_accs(self):
for key in self.object_data:
self.object_data[key]['previous_acceleration'] = self.object_data[key]['current_acceleration']
self.object_data[key]['current_acceleration'] = self.object_data[key]['next_acceleration']
# ======================================== Energy Calculation ========================================#
# Calculate and store kinetic, potential, and total energy of the system
def __calculate_energies(self):
E_k_list = []
E_p_list = []
for key in self.object_data:
# Calculate E_k for each body
E_k_list.append(1 / 2 * self.object_data[key]['mass'] *
np.linalg.norm(self.object_data[key]['velocity']) ** 2)
other_bodies = list(self.object_data.keys())
other_bodies.remove(key)
# Calculate E_g for each body twice
for body in other_bodies:
distance_between = self.object_data[body]['position'] - self.object_data[key]['position']
E_p_list.append(self.__G * self.object_data[key]['mass'] * self.object_data[body]['mass']
/ np.linalg.norm(distance_between))
ke = sum(E_k_list)
pe = -1 / 2 * sum(E_p_list)
te = ke + pe
# Storing values
self.energy_dict['kinetic_energy'].append(ke)
self.energy_dict['potential_energy'].append(pe)
self.energy_dict['total_energy'].append(te)
# ======================================== Iteration cycle ========================================#
# Iterate the Beeman algorithm a given number of time with given dt
def update_iteration(self, num_of_time_step, time_step):
for i in range(num_of_time_step):
# Energy Calculation and saving to self.energy_dict
self.__calculate_energies()
# Beeman Methods
self.__update_position(time_step)
self.__calc_next_acc()
self.__update_velocity(time_step)
self.__update_accs()
class Simulation:
model = Model()
__display_data = {}
__patches = []
num_of_time_steps = 0
time_step_length = 0
def __init__(self):
self.__read_display_data()
self.__read_time_step_settings()
# Initialize position and energy dictionaries
def initialize_dicts(self):
self.model.update_iteration(self.num_of_time_steps, self.time_step_length)
@staticmethod
def convert_to_days(time_step, num_of_time_step):
return int(time_step * num_of_time_step / 86400)
# ======================================== Data Reading ========================================#
def __read_time_step_settings(self):
with open('time_step_settings.csv', 'r') as time_step_csv:
csv_reader = csv.reader(time_step_csv)
data = next(csv_reader)
self.num_of_time_steps = eval(data[0])
self.time_step_length = eval(data[1])
def __read_display_data(self):
with open('display_data.csv', 'r') as display_csv_file:
csv_reader = csv.DictReader(display_csv_file)
for line in csv_reader:
line['display_radius'] = eval(line['display_radius'])
self.__display_data[line['name']] = line
# ======================================== Orbital Period ========================================#
# Calculate the angle between 2 positional vectors
@staticmethod
def __find_angle_between(a, b):
a_u = a / np.linalg.norm(a)
b_u = b / np.linalg.norm(b)
return np.arccos(np.clip(np.dot(a_u, b_u), -1.0, 1.0))
# Approximate the orbital period of a given body from its positional data by
# comparing the angle between the initial position and the position at each time step
def __find_orbital_period(self, body):
seconds_in_earth_year = 60 * 60 * 24 * 365
# Initial position
a = self.model.position_dict[body][0]
# Counter used to check if a whole orbit has been committed
counter = False
for b in self.model.position_dict[body]:
angle = self.__find_angle_between(a, b)
# Confirm that the planet has followed at least half of the orbit
if angle > (179 / 180) * np.pi:
counter = True
# If angle < np.pi / 180 but counter == 0 means the planet just started orbiting
if angle < np.pi / 360 and counter:
return round(self.model.position_dict[body].index(b) *
self.time_step_length / seconds_in_earth_year, 3)
return 0
# Remove unwanted bodies from calculating its orbital period
def __body_remover(self, unwanted_bodies):
list_of_bodies = list(self.model.position_dict.keys())
for key in list_of_bodies:
for del_key in unwanted_bodies:
if key == del_key:
list_of_bodies.remove(key)
return list_of_bodies
# Print the orbital period of wanted bodies
def print_orbital_period(self, unwanted_bodies):
list_of_bodies = self.__body_remover(unwanted_bodies=unwanted_bodies)
for key in list_of_bodies:
heading = f"Approximated Orbital Period ({key}): "
orbital_period = self.__find_orbital_period(key)
# If the angle between the starting position vector and position vector at each time step are all bigger
# 0.5 degrees or pi/360 radians (given orbit has been followed)
if orbital_period == 0:
print(heading + "The given number of time steps is not enough to predict the orbital period.")
else:
print(heading + f"{orbital_period} Earth years")
print('')
# ======================================== Solar System Animation ========================================#
# Initialize a Circle object for each body and save them to self.__patches
def __generate_body(self):
for key in self.__display_data:
body = plt.Circle(self.model.position_dict[key][0],
self.__display_data[key]['display_radius'],
color=self.__display_data[key]['display_color'],
animated=True)
self.__patches.append(body)
# Return the ith key of a dictionary
@staticmethod
def __ix(dic, i):
try:
return list(dic)[i]
except IndexError:
print("Not enough keys for animation.")
# At each time step, return the circles with their positions at that time step
def __animate_func(self, i):
for patch in self.__patches:
key = self.__ix(self.model.position_dict, self.__patches.index(patch))
patch.center = self.model.position_dict[key][i]
return self.__patches
# Display the Animation
def display_simulation(self):
fig = plt.figure()
ax = plt.axes()
self.__generate_body()
for i in range(0, len(self.__patches)):
ax.add_patch(self.__patches[i])
ax.axis('scaled')
ax.set_xlim(-3e11, 3e11)
ax.set_ylim(-3e11, 3e11)
ax.set_xlabel('x displacement (metres)')
ax.set_ylabel('y displacement (metres)')
anim = FuncAnimation(fig, self.__animate_func, frames=self.num_of_time_steps,
interval=0.2, repeat=True, blit=True)
plt.show()
# ======================================== Energy Writing and Plotting ========================================#
# Writing total energy of the system to a txt file
def write_te_to_file(self):
file = open('TotalEnergy.txt', 'w')
for i in range(self.num_of_time_steps):
if i % int(self.num_of_time_steps / 10) == 0:
file.write(f"At time step {i},\n"
f"{self.convert_to_days(self.time_step_length, i)} days since the starting point,\n"
f"Total Energy = {self.model.energy_dict['total_energy'][i]}\n\n")
file.close()
# Plot the energy graph
def display_energy_graph(self):
plt.figure()
ax = plt.axes()
ax.set_xlim(0, self.num_of_time_steps)
ax.set_ylim(1.5e34, -1.5e34)
ax.set_xlabel('Number of time step')
ax.set_ylabel('Energy(J)')
x_coordinates = list(range(self.num_of_time_steps))
for key in self.model.energy_dict:
# Plot the energies at each time step
y_coordinates = self.model.energy_dict[key]
plt.plot(x_coordinates, y_coordinates, marker='.', markersize=1, label=key)
plt.legend(loc="lower left")
plt.show()
class Satellite:
__sim = Simulation()
init_data = {}
def __init__(self):
self.__sim.model.update_iteration(self.__sim.num_of_time_steps, self.__sim.time_step_length)
self.init_data = self.__sim.model.object_data
# ======================================== Probe Launching ========================================#
# Clockwise rotation
@staticmethod
def __vector_rotation_origin(vector, radians):
x, y = vector
xx = x * np.cos(radians) + y * np.sin(radians)
yy = -x * np.sin(radians) + y * np.cos(radians)
return xx, yy
def __reset_to_init(self):
self.__sim.model.object_data = self.init_data
for key in self.__sim.model.position_dict:
self.__sim.model.position_dict[key] = []
# Launches the probe from a range of initial velocities to find each viable initial velocity
def __probe_launch(self, mass, init_pos, min_init_v, max_init_v, v_angle):
suc_launch = []
self.__sim.model.object_data['Probe']['mass'] = mass
self.__sim.model.object_data['Probe']['position'] = init_pos
# Restart the update iteration for every initial speed
for v in range(min_init_v, max_init_v):
print(f"checking outcome for initial launching velocity: {v} m/s")
self.__sim.model.object_data['Probe']['velocity'] = self.__vector_rotation_origin((v, 0), v_angle)
self.__sim.model.update_iteration(self.__sim.num_of_time_steps, self.__sim.time_step_length)
# Check if it's viable
launch = self.probe_state(v, v_angle)
# counter == 0 => (failed to reach Mars)
if launch[4] != 0:
suc_launch.append(launch)
self.__reset_to_init()
print("done!")
return suc_launch
# ======================================== Probe State Checking ========================================#
@staticmethod
def radians_degrees(radians):
return round(radians * 180 / np.pi, 2)
# Check the minimum distance between the probe and Mars
def min_distance(self, body_a, body_b):
dl = []
for i in range(self.__sim.num_of_time_steps):
pos_a = self.__sim.model.position_dict[body_a][i]
pos_b = self.__sim.model.position_dict[body_b][i]
distance = np.linalg.norm(np.subtract(pos_a, pos_b))
dl.append(distance)
print(min(dl))
# Check if the distance between two bodies is within range at the given time step
def __distance_checker(self, body_a, body_b, min_distance, max_distance, i):
pos_a = self.__sim.model.position_dict[body_a][i]
pos_b = self.__sim.model.position_dict[body_b][i]
distance = np.linalg.norm(np.subtract(pos_a, pos_b))
if min_distance <= distance <= max_distance:
return True
# Checking if the probe has completed the objectives.
# 0 = Did not reach Mars within given time
# 1 = Reached Mars but failed to return to Earth
# 2 = Reached Mars and returned to Earth
def probe_state(self, init_v, angle):
counter = 0
time_taken = -1
time_difference = -1
for i in range(self.__sim.num_of_time_steps):
in_mars_range = self.__distance_checker('Probe', 'Mars', 3.69e6, 4.839e7, i)
in_earth_range = self.__distance_checker('Probe', 'Earth', 6.678e6, 5.1378e7, i)
# If Reached Mars
if in_mars_range and counter == 0:
counter = 1
time_taken = self.__sim.convert_to_days(self.__sim.time_step_length, i)
time_difference = abs(333 - time_taken)
# If returned to Earth
if in_earth_range and counter == 1:
counter = 2
return init_v, angle, time_taken, time_difference, counter
# ======================================== Viable Condition Output ========================================#
# Write successful launches to file
# Containing the following info:
# Launching speed,
# launching direction,
# time taken to Mars,
# time difference with the viking 2 probe,
# whether return to Earth
def write_suc_launch(self, mass, init_pos, min_init_v, max_init_v, v_angle):
file = open('Viable Initial Velocities.txt', 'w')
suc_launch = self.__probe_launch(mass, init_pos, min_init_v, max_init_v, v_angle)
print(f"\nFound {len(suc_launch)} viable initial v.\n")
for launch in suc_launch:
if launch[4] == 1:
string = "N"
else:
string = "Y"
file.write(f"Launch speed: {launch[0]} m/s\n"
f"direction: {launch[1]} radians({self.radians_degrees(launch[1])} degrees) clockwise\n"
f"duration: {launch[2]} days to reach Mars\n"
f"delta t (viking 2): {launch[3]} days\n"
f"Return to Earth?: {string}\n\n")
file.close()
test = Simulation()
test.initialize_dicts()
# 1.3 Project Task
test.display_simulation()
test.print_orbital_period(unwanted_bodies=['Sun', 'Probe'])
test.write_te_to_file()
# 1.4.1 Energy Conservation
test.display_energy_graph()
# 1.4.2 Satellite to Mars
test1 = Satellite()
# test1.min_distance('Probe', 'Mars')
test1.write_suc_launch(2328, (1.5e11, -7.378e6), 26406, 26410, 0.2709)
| yuboshaouoe/UoE-Projects | UOE Projects/Computer Simulation/project-s2084333/project-s2084333.py | project-s2084333.py | py | 17,767 | python | en | code | 0 | github-code | 13 |
15509348466 | import re
def pt1():
# (1a <= 2a & 1b >= 2b) OR (2a <= 1a & 2b >= 1b)
# format : 1a-1b,2a-2b 0-1,2-3
total = 0
for line in lines:
sp = re.split("[,-]", line)
sp = list(map(int, sp)) # convert list to int as otherwise I think it compares by alphabetical order
if (sp[0] <= sp[2] and sp[1] >= sp[3]) or (sp[2] <= sp[0] and sp[3] >= sp[1]):
total += 1
print(total)
def pt1_set_ops():
total = 0
for line in lines:
sp = re.split("[,-]", line)
sp = list(map(int, sp)) # convert list to int as otherwise I think it compares by alphabetical order
set1 = set(range(sp[0], sp[1] + 1))
set2 = set(range(sp[2], sp[3] + 1))
if set1.issubset(set2) or set2.issubset(set1):
total += 1
print(total)
def pt2():
total = 0
for line in lines:
sp = re.split("[,-]", line)
sp = list(map(int, sp))
if (sp[2] <= sp[0] <= sp[3]) or (sp[2] <= sp[1] <= sp[3]) or (sp[0] <= sp[2] <= sp[1]) or (sp[0] <= sp[3] <= sp[1]):
total += 1
print(total)
def pt2_set_ops():
total = 0
for line in lines:
sp = re.split("[,-]", line)
sp = list(map(int, sp))
set1 = set(range(sp[0], sp[1] + 1))
set2 = set(range(sp[2], sp[3] + 1))
if len(set1.intersection(set2)) > 0:
total += 1
print(total)
if __name__ == "__main__":
with open("input.txt") as f:
lines = f.read().splitlines()
pt1()
pt2()
# set operation versions takes a little over double the time to execute but are more readable
pt1_set_ops()
pt2_set_ops()
| Matt-Unwin/AoC2022 | days/d4/d4.py | d4.py | py | 1,645 | python | en | code | 0 | github-code | 13 |
70752752019 | """empty message
Revision ID: 4fbd92443310
Revises: 7b3ad4f4097d
Create Date: 2021-06-23 15:26:59.727268
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4fbd92443310'
down_revision = '7b3ad4f4097d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('watchlist', sa.Column('name', sa.String(length=15), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('watchlist', 'name')
# ### end Alembic commands ###
| BlueBoi904/cinnamon | server/migrations/versions/4fbd92443310_.py | 4fbd92443310_.py | py | 663 | python | en | code | 0 | github-code | 13 |
6563963376 | from datacenter.models import Visit
from django.shortcuts import render
from django.utils import timezone
import pytz
def storage_information_view(request):
visits = Visit.objects.all()
unfinished_visits = visits.filter(leaved_at=None)
serialized_visits = []
for unfinished_visit in unfinished_visits:
serialized_visits.append(
{
'who_entered': unfinished_visit.passcard,
'entered_at': timezone.localtime(
unfinished_visit.entered_at,
pytz.timezone('Europe/Moscow')
),
'duration': Visit.format_duration(
unfinished_visit.get_duration()
),
}
)
context = {
'serialized_visits': serialized_visits,
}
return render(request, 'storage_information.html', context)
| pn00m/watching_storage | datacenter/storage_information_view.py | storage_information_view.py | py | 899 | python | en | code | 0 | github-code | 13 |
22035993335 | #
# @lc app=leetcode.cn id=406 lang=python3
#
# [406] 根据身高重建队列
#
"""
author : revang
date : 2022-02-02
method : 贪心-相邻问题: 先帮身高最大的找位置, 依次类推. 具体方法: 排序+插入
1. 排序: 先按照身高从大到小排序(身高相同的情况下K小的在前面),这样的话,无论哪个人的身高都小于等于他前面人的身高。所以接下来只要按照K值将他插入相应的位置就可以了。
例如:示例1排完序: [[7,0],[7,1],[6,1],[5,0],[5,2],[4,4]]
2. 插入: 新建一个列表
[7,0]插入第0的位置
[7,1]插入第1的位置
[6,1]插入第1的位置,这时[7,1]就往后移一位了
"""
from typing import List
# @lc code=start
class Solution:
def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:
res = []
people = sorted(people, key=lambda x: (-x[0], x[1])) # x[0]降序, x1升序
for p in people:
res.insert(p[1], p)
return res
# @lc code=end
def test():
assert Solution().reconstructQueue([[6, 0], [5, 0], [4, 0], [3, 2], [2, 2], [1, 4]]) == [[4, 0], [5, 0], [2, 2], [3, 2], [1, 4], [6, 0]]
| revang/leetcode | 406.根据身高重建队列.py | 406.根据身高重建队列.py | py | 1,204 | python | zh | code | 0 | github-code | 13 |
17040811284 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.FileInfo import FileInfo
class AlipayFincoreComplianceRcsmartContentSubmitModel(object):
def __init__(self):
self._app_name = None
self._app_token = None
self._biz_code = None
self._file_info_list = None
self._request_id = None
self._scene_code = None
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_token(self):
return self._app_token
@app_token.setter
def app_token(self, value):
self._app_token = value
@property
def biz_code(self):
return self._biz_code
@biz_code.setter
def biz_code(self, value):
self._biz_code = value
@property
def file_info_list(self):
return self._file_info_list
@file_info_list.setter
def file_info_list(self, value):
if isinstance(value, list):
self._file_info_list = list()
for i in value:
if isinstance(i, FileInfo):
self._file_info_list.append(i)
else:
self._file_info_list.append(FileInfo.from_alipay_dict(i))
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
def to_alipay_dict(self):
params = dict()
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = self.app_name.to_alipay_dict()
else:
params['app_name'] = self.app_name
if self.app_token:
if hasattr(self.app_token, 'to_alipay_dict'):
params['app_token'] = self.app_token.to_alipay_dict()
else:
params['app_token'] = self.app_token
if self.biz_code:
if hasattr(self.biz_code, 'to_alipay_dict'):
params['biz_code'] = self.biz_code.to_alipay_dict()
else:
params['biz_code'] = self.biz_code
if self.file_info_list:
if isinstance(self.file_info_list, list):
for i in range(0, len(self.file_info_list)):
element = self.file_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.file_info_list[i] = element.to_alipay_dict()
if hasattr(self.file_info_list, 'to_alipay_dict'):
params['file_info_list'] = self.file_info_list.to_alipay_dict()
else:
params['file_info_list'] = self.file_info_list
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFincoreComplianceRcsmartContentSubmitModel()
if 'app_name' in d:
o.app_name = d['app_name']
if 'app_token' in d:
o.app_token = d['app_token']
if 'biz_code' in d:
o.biz_code = d['biz_code']
if 'file_info_list' in d:
o.file_info_list = d['file_info_list']
if 'request_id' in d:
o.request_id = d['request_id']
if 'scene_code' in d:
o.scene_code = d['scene_code']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayFincoreComplianceRcsmartContentSubmitModel.py | AlipayFincoreComplianceRcsmartContentSubmitModel.py | py | 4,050 | python | en | code | 241 | github-code | 13 |
19997637764 | from flask import Flask, request
from flask_cors import cross_origin
from order import OrderController
from plan import PlanController
application = Flask(__name__)
@application.route('/', methods=['GET'])
@cross_origin()
def index():
return 'API Works! v1.0.0'
@application.route('/order', methods=['GET', 'POST', 'PUT', 'DELETE'])
@cross_origin()
def order_controller_api():
method = request.method
order_controller = OrderController()
print(method)
if method == "GET":
return order_controller.readAll()
if method == "POST":
payload = request.json
return order_controller.create(payload)
if method == "PUT":
id = request.args.get('id')
payload = request.json
return order_controller.update(id, payload)
if method == "DELETE":
id = request.args.get('id')
return order_controller.delete(id)
@application.route('/plan', methods=['GET', 'POST', 'PUT', 'DELETE'])
@cross_origin()
def plan_controller_api():
method = request.method
plan_controller = PlanController()
print(method)
if method == "GET":
return plan_controller.readAll()
if method == "POST":
payload = request.json
return plan_controller.create(payload)
if method == "PUT":
id = request.args.get('id')
payload = request.json
return plan_controller.update(id, payload)
if method == "DELETE":
id = request.args.get('id')
return plan_controller.delete(id)
if __name__ == "__main__":
application.run()
| oismaelash/alaris-flask-python-backend | application.py | application.py | py | 1,557 | python | en | code | 0 | github-code | 13 |
12054070457 | class Empty(Exception):
pass
class ArrayQueue:
"""FIFO implementation using a python list for underlying storage"""
DEFAULT_CAPACITY = 10 # moderate capacity for all new queues
def __init__(self):
self._data = [None] * ArrayQueue.DEFAULT_CAPACITY
self._size = 0
self._front = 0 # index of _data which signifies the front of the queue
def __len__(self):
"""Returns the number of elements in the queue"""
return self._size
def is_empty(self):
"""Returns True if queue is empty"""
return self._size == 0
def first(self):
"""Returns but do not remove the element at the front of the queue
Raises Empty if the queue is empty
"""
if self.is_empty():
raise Empty('Queue is empty')
return self._data[self._front]
def dequeue(self):
"""Returns and remove the element at the front of the queue
Raise Empty if the queue is empty
"""
if self.is_empty():
raise Empty('Queue is empty')
answer = self._data[self._front]
self._data[self._front] = None # to deprecate
self._front = (self._front + 1)% len(self._data)
self._size -=1
if 0 < self._size < len(self._data)//4: # if the number of elements in queue is 1/4th of list
self._resize(len(self._data)//2)
return answer
def __str__(self):
"""Returns the string representation of the current queue in memory"""
start = self._front
stop = self._front + self._size
return f"Queue: {self._data[start: stop]}"
def enqueue(self, e):
"""Add element e to the back of the queue"""
if self._size == len(self._data): # if the queue is full
self._resize(2 * len(self._data)) # double the size of list
avail = (self._front + self._size) % len(self._data)
self._data[avail] = e
self._size += 1
def _resize(self, capacity):
"""Nonpublic utility to resize to a new list of capacity >=len(self)"""
old = self._data
self._data [None] * capacity
walk = self._front
for i in range(self._size):
self._data[i] = old[walk]
walk = (1 + walk) % len(old)
self._front = 0
if __name__ == '__main__':
q = ArrayQueue()
q.enqueue(1)
q.enqueue(2)
q.enqueue(3)
q.enqueue(4)
print(len(q))
q.dequeue()
print(q.is_empty())
print(len(q))
print(str(q)) | Akorex/Algorithms-From-Scratch | Data Structures and Algorithms/Python/old/queue.py | queue.py | py | 2,539 | python | en | code | 0 | github-code | 13 |
16537611726 | import sys
from algo import a_star, solution_analyzer
import ui
from reader.argument_parser import ArgParser
from reader.on_startup import StatesOnStart
def do_solvation():
parser = ArgParser()
puzzles = parser.puzzles
greedy, uniform = parser.greedy_and_uniform
map_type = parser.map_type
algo = a_star.Algo(h_function=parser.h_function, greedy=greedy, uniform=uniform,)
for index, puzzle in enumerate(puzzles):
print('Begin solvation...\n')
on_start = StatesOnStart(puzzle)
start_state, target_state, is_solvable = on_start.get_states_and_check_solvable(map_type)
if not is_solvable:
ui.present_solution(None, parser.use_console, parser.use_gui, index == len(puzzles) - 1)
continue
solution_states = algo.solve(start_state, target_state)
solution = solution_analyzer.analyze_solution(solution_states)
ui.present_solution(solution, parser.use_console, parser.use_gui, index == len(puzzles) - 1)
sys.exit(0)
if __name__ == '__main__':
do_solvation()
| bshanae/n-puzzle | main.py | main.py | py | 1,073 | python | en | code | 0 | github-code | 13 |
38816431035 | import cv2
import numpy as np
# Read the input image
image_path = 'boxes.jpg' # Replace with the actual image path
image = cv2.imread(image_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply Gaussian blur to reduce noise and improve edge detection
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# Define the lower and upper bounds for detecting brown color
lower_brown = np.array([10, 50, 50])
upper_brown = np.array([30, 255, 255])
# Convert to HSV color space and create a mask for brown color
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_brown, upper_brown)
# Apply Canny edge detection
edges = cv2.Canny(blurred, threshold1=30, threshold2=150)
# Combine the edge mask and the brown mask
combined_mask = cv2.bitwise_and(edges, mask)
# Find contours in the combined mask
contours, _ = cv2.findContours(combined_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding rectangles around the detected boxes
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
if w > 50 and h > 50: # Adjust these values to fit your boxes
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Display the output image
cv2.imshow('Detected Boxes', image)
cv2.waitKey(0)
cv2.destroyAllWindows() | AnujNautiyal22bme024/CardboardBoxDeteection | boxdetection.py | boxdetection.py | py | 1,312 | python | en | code | 0 | github-code | 13 |
21928312443 | from collections import Counter
def find_string_anagrams(str1, pattern):
pattern_counter = Counter(pattern)
result_indexes = []
m = Counter()
start = 0
for end in range(len(str1)):
m[str1[end]] += 1
if end - start + 1 == len(pattern):
if all(pattern_counter[chr] == m[chr] for chr in pattern_counter):
result_indexes.append(start)
m[str1[start]] -= 1
start += 1
return result_indexes
| dyabk/competitive-programming | GTCI/sliding_window/problem_challenge_two.py | problem_challenge_two.py | py | 481 | python | en | code | 0 | github-code | 13 |
20906109751 | '''
27/10/2019 Developed by Xu Han (n10306986), Earl Yin Lok Chau (n10328611), Vincent Chen(n7588844)
Siamese neural network is an artificial neural network (ANN)
that uses the same weights and structure while working
in tandem on 2 dissimilar input vectors to compute comparable output vectors.
In this experiment, three Siamese Neural Networks were implemented to learn the correspondence classes
from the image pairs prepared. Furthermore, the model was first trained to ensure the pairs of images
with the same clothing type to yield a closer output vectors
than that of the pair of images from the different clothing types.
The models were then was used to differentiate the similarity metric
between the trained input clothing
with that of the new samples from unseen categories.
Fashion-MNIST dataset contains 70,000 28x 28 grayscale unique fashion clothing images in 10 different classifications.
In which these classes comprised: top, trouser, pullover, dress, coat, sandal, shirt, sneaker, bag, and ankle boot.
'''
from tensorflow import keras
from keras import regularizers
from keras import backend as K
from keras.datasets import fashion_mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Convolution2D, MaxPooling2D, Flatten, Lambda, Input, Dropout
from keras.optimizers import Adadelta
from keras.utils import to_categorical
from keras.wrappers.scikit_learn import KerasClassifier
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.utils import shuffle
# The global value is for testing the contrastive loss function by changing the margin value
# The default value is set to 1 during conducting experiment
# The value will be changed during the testing process
margin = 1
# Stamp for opening Data Verification at the first execution
initiated = False
def euclidean_distance(vects):
'''
The function will compute the distance between two vectors in a Keras layer
by using Euclidian Distance Formula.
@param: vects (two input images)
@return: the value of distance
'''
x1, x2 = vects
return K.sqrt(K.maximum(K.sum(K.square(x1 - x2), axis=1, keepdims=True), K.epsilon()))
def contrastive_loss(y_true, y_pred):
'''
Contrastive Loss function is designed by QUT assignment 2 of IFN680
@param
y_true : 1 = same equivalence class, 0 = different equivalence class
y_pred : the distance calculated by the function 'euclidean_distance'
@return
the value of contrastive loss value
'''
# if the pair of impages are in the same classes (y_true = 1),
# the penalty will calculate the formula 'K.mean(K.square(y_pred))',
# which is given for the distance returned by Siamese network
# if the pair of impages are not in the same classes (y_true = 0),
# the penalty will calculate the formula 'K.mean(K.square(K.maximum(margin - y_pred, 0)))',
# which is given for the distance that is smaller than the margin
return K.mean(y_true * K.square(y_pred) +
(1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
def accuracy(y_true, y_pred):
'''
The function will compute classification accuracy with a fixed threshold on distances, in the tensor layer.
@param
y_true : 0 = positive pair, 1 = negative pair
y_pred : the distance calculated by the function 'euclidean_distance'
@return the value for training accuracy
'''
return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))
def create_pairs(images, digit_indices, num_classes):
'''
Create positive and negative pair.
Alternates between positive and negative pairs.
@param: images, digit_indices, num_classes
@return: the pairs of positive and negative, with labels [1,0]
'''
pairs = []
labels = []
lengh = len(num_classes)
# digit indices contains images classified
# e.g. class 1 -> [[Image1],[Image2] ... [ImageN]]
# e.g. class 2 -> [[Image1],[Image2] ... [ImageN]]
# ...
# e.g. class 9 -> [[Image1],[Image2] ... [ImageN]]
# Get the minimum number of images 'n' from digit_indices
# The number 'n' is to ensure all created pairs having the same quantity
# n - 1 is for the loop reason as we will have adjecent pairs [i] & [i+1]
n = min([len(digit_indices[i]) for i in range(lengh)]) - 1
for d in range(lengh):
# Each loop creates a postive pair and a negative pair, with same equivalence label [1,0]
for i in range(n):
# Create a positive pair
z1, z2 = digit_indices[d][i], digit_indices[d][i+1]
pairs += [[images[z1], images[z2]]]
# Pick a random image from other classes
# Ensure the collected images absolute in different classes
inc = random.randrange(1, lengh)
dn = (d + inc) % lengh
# Create a negative pair
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[images[z1], images[z2]]]
# Create label pairs
labels += [1, 0]
return np.array(pairs), np.array(labels)
def define_CNNlayer_parameters():
'''
The function is cited from Tutorial 10 for exploring suitable parameters for CNN network 1, with small size of samples.
Finding the best network parmeter in candidate parmeter by Gridsearch method.
@param: train_images, train_labels, test_images, test_labels
@return: the suggested candidate parmter for current model
'''
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
#For faster training during the practival, reduce the number of examples
x_train, y_train = shuffle(x_train, y_train, random_state=0)
x_test, y_test = shuffle(x_test, y_test, random_state=0)
x_train = x_train[:2000]
y_train = y_train[:2000]
x_test = x_test[:100]
y_test = y_test[:100]
img_rows, img_cols = x_train.shape[1:3]
num_classes = len(np.unique(y_train))
# reshape the input arrays to 4D (batch_size, rows, columns, channels)
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# convert to float32 and rescale between 0 and 1
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
epochs = 3
batch_size = 128
# Define CNN Network 1
def make_model(filters, kernel_size, pool_size, dense_layer_size):
'''
Building the architecture of the model based on LeNet-5.
@param:
dense_layer_sizes: List of layer sizes to be chosen
filters: Number of convolutional filters in each convolutional layer
kernel_size: Convolutional kernel size
pool_size: Size of pooling area for max pooling
@reutrn: CNN model
'''
seq = Sequential()
# Fully connected layer
seq.add(Convolution2D(filters, kernel_size = kernel_size,
activation = 'relu',
input_shape = input_shape))
seq.add(MaxPooling2D(pool_size = pool_size,
strides = (2, 2),
padding = 'same'))
seq.add(Convolution2D(filters, kernel_size = kernel_size, activation = 'relu'))
seq.add(MaxPooling2D(pool_size = pool_size,
strides = (2, 2),
padding = 'same'))
# Fully connected layer
seq.add(Flatten())
seq.add(Dense(dense_layer_size, activation='relu'))
seq.add(Dense(dense_layer_size, activation='relu'))
seq.add(Dense(num_classes, activation='softmax'))
seq.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=['accuracy'])
return seq
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
# Create a classifier using the defined CNN model
my_classifier = KerasClassifier(make_model, verbose=1)
# Define candidate parameter values for the following parameters
param_grid = {'dense_layer_size': [64, 84, 120], 'filters': [32, 64], 'kernel_size': [2, 3], 'pool_size': [2]}
# Create an object of GridSearchCV for finding the best parameters
validator = model_selection.GridSearchCV(my_classifier, param_grid = param_grid, cv = 3, verbose=0)
# Training the model to fit the train dataset
validator.fit(x_train, y_train,
batch_size = batch_size,
epochs=epochs,
verbose=1)
# Return and print the best parameters
print('\nThe parameters of the best model are: ')
print(validator.best_params_)
# validator.best_estimator_.model returns the (unwrapped) keras model
best_model = validator.best_estimator_.model
metric_names = best_model.metrics_names
metric_values = best_model.evaluate(x_test, y_test)
for metric, value in zip(metric_names, metric_values):
print(metric, ': ', value)
means = validator.cv_results_['mean_test_score']
for mean, params in zip(means, validator.cv_results_['params']):
print("%0.3f for %r"
% (mean, params))
print()
pass
def show_images(images, name):
'''
The function shows pairs of images for verifying that the dateset is seperated properly.
@param: images, name
'''
print(name + " : " + "Positive Images")
plt.figure(figsize=(10,10))
for i in range(2):
plt.subplot(1,6,i+1)
plt.xticks([])
plt.yticks([])
plt.imshow(images[i][0].reshape(28, 28), cmap=plt.cm.binary)
plt.show()
print(name + " : " + "Negative Images")
plt.figure(figsize=(10,10))
for i in range(2):
plt.subplot(1,6,i+1)
plt.xticks([])
plt.yticks([])
plt.imshow(images[i][1].reshape(28, 28), cmap=plt.cm.binary)
plt.show()
pass
def create_base_network_1(input_shape):
'''
Create CNN Nerual Network with 7 layers for the experiment 1
Conv(32) -> MaxPooling() -> Conv(64) -> Flatten -> Dense(120) -> Dense(84) -> Dense(10)
@param: input_shape with 3-dimentions | (28, 28, 1)
@return: CNN Nerual Network (Sequential)
'''
seq = Sequential()
# Partially connected layer
seq.add(Convolution2D(32, kernel_size = (3, 3),
activation = 'relu',
input_shape = input_shape))
seq.add(MaxPooling2D(pool_size = (2, 2),
strides = (2, 2),
padding = 'same'))
seq.add(Convolution2D(64, (3, 3), activation = 'relu'))
seq.add(MaxPooling2D(pool_size = (2, 2),
strides = (2, 2),
padding = 'same'))
# Fully connected layer
seq.add(Flatten())
seq.add(Dense(120, activation='relu'))
seq.add(Dense(84, activation='relu'))
seq.add(Dense(10, activation='softmax'))
seq.summary()
return seq
def create_base_network_2(input_shape):
'''
Create CNN Nerual Network with 8 layers for the experiment 2
Conv(32) -> MaxPooling -> Conv(64) -> MaxPooling()
-> Flatten() -> Dense(120) -> Dense(84) -> Dense(10)
@param: input_shape with 3-dimentions | (28, 28, 1)
@return: CNN Nerual Network (Sequential)
'''
seq = Sequential()
# Partially connected layer
seq.add(Convolution2D(32, kernel_size = (3, 3),
activation = 'relu',
input_shape = input_shape))
seq.add(MaxPooling2D(pool_size = (2, 2),
strides = (2, 2),
padding = 'same'))
seq.add(Convolution2D(64, (3, 3), activation = 'relu'))
seq.add(MaxPooling2D(pool_size = (2, 2),
strides = (2, 2),
padding = 'same'))
# Fully connected layer
seq.add(Flatten())
seq.add(Dense(120, activation='relu',
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l1(0.01)))
seq.add(Dense(84, activation='relu'))
seq.add(Dense(10, activation='softmax'))
seq.summary()
return seq
def create_base_network_3(input_shape):
'''
Create CNN Nerual Network with 9 layers for the experiment 3
Conv(32) -> MaxPooling -> Conv(64) -> MaxPooling()
-> Flatten() -> Dense(120) -> Dropout() -> Dense(84) -> Dense(10)
@param: input_shape with 3-dimentions | (28, 28, 1)
@return: CNN Nerual Network (Sequential)
'''
seq = Sequential()
# Partially connected layer
seq.add(Convolution2D(32, kernel_size = (3, 3),
activation = 'relu',
input_shape = input_shape))
seq.add(MaxPooling2D(pool_size = (2, 2),
strides = (2, 2),
padding = 'same'))
seq.add(Convolution2D(64, (3, 3), activation = 'relu'))
seq.add(MaxPooling2D(pool_size = (2, 2),
strides = (2, 2),
padding = 'same'))
# Fully connected layer
seq.add(Flatten())
seq.add(Dense(120, activation='relu',
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l1(0.01)))
seq.add(Dropout(0.25))
seq.add(Dense(84, activation='relu'))
seq.add(Dense(10, activation='softmax'))
seq.summary()
return seq
def show_plot(history):
'''
Plot the accurary & loss line for the comparison.
Plot 5 lines within one graph, with 1 trainning line and 4 testing lines
@param: histories(containing the training info), legends
'''
plt.plot(history.history['accuracy'], color = "Black")
plt.plot(history.history['val_accuracy'], color = "Red")
plt.title('Siamese Network - Accuracy')
plt.ylabel('Percent')
plt.xlabel('Epoch')
plt.legend(['train_acc','val_acc'],loc='lower right')
plt.show()
plt.plot(history.history['loss'], color = "Black")
plt.plot(history. history['val_loss'], color = "Red")
plt.title('Siamese Network - Loss')
plt.ylabel('Percent')
plt.xlabel('Epoch')
plt.legend(['train_loss','val_loss'],loc='upper right')
plt.show()
pass
def get_data():
'''
Obtain data online and do following processes:
1. Data Combination
2. Data Classification
3. Data Optimisation
4. Data Normalisation
5. Create Postive & Negative Pairs
6. Data Verification
@return: three sets of pairs for training and testing & input_shape
'''
# Load data online
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Data combinatian (images + labels) 60,000 + 10,000
full_images = np.concatenate((train_images,test_images))
full_labels = np.concatenate((train_labels,test_labels))
img_rows, img_cols = full_images.shape[1:3]
input_shape = (img_rows, img_cols, 1)
# Data Classification such that:
# * keep 80% of images with labels in ["top", "trouser", "pullover", "coat", "sandal", "ankle boot"] are
# used for training, while 20% of which is used for testing
# * the images with labels in ["dress", "sneaker", "bag", "shirt"] are only used for testing.
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
num_classes_1, num_classes_2 = [0,1,2,4,5,9], [3,6,7,8]
images_set_1, labels_set_1 = [], []
test_images_1, test_labels_1 = [], []
for i in range(len(full_images)):
if full_labels[i] in num_classes_1:
images_set_1.append(full_images[i])
labels_set_1.append(full_labels[i])
elif full_labels[i] in num_classes_2:
test_images_1.append(full_images[i])
test_labels_1.append(full_labels[i])
# Convert the data type to nparray
images_set_1 = np.array(images_set_1)
labels_set_1 = np.array(labels_set_1)
test_images_1 = np.array(test_images_1)
test_labels_1 = np.array(test_labels_1)
# Split the data into 8:2 for training & testing
train_images_1, test_images_2, train_labels_1, test_labels_2 = model_selection.train_test_split(images_set_1, labels_set_1, test_size = 0.2)
# Combine the 100% of images with labels in ["dress", "sneaker", "bag", "shirt"]
# and the 20% of images with labels in ["top", "trouser", "pullover", "coat", "sandal", "ankle boot"
test_images_3 = np.concatenate((test_images_1, test_images_2))
test_labels_3 = np.concatenate((test_labels_1, test_labels_2))
# Data Optimisation
# - Reshape image arrays to 4D (batch_size, rows, columns, channels)
train_images_1 = train_images_1.reshape(train_images_1.shape[0], img_rows, img_cols, 1) # 80%
test_images_1 = test_images_1.reshape(test_images_1.shape[0], img_rows, img_cols, 1) # 100%
test_images_2 = test_images_2.reshape(test_images_2.shape[0], img_rows, img_cols, 1) # 20%
test_images_3 = test_images_3.reshape(test_images_3.shape[0], img_rows, img_cols, 1) # 100% + 20%
# Data NomorlisationaAAA
# 1. alter data type to float32
# 2. convert to float32 and rescale between 0 and 1
train_images_1 = train_images_1.astype("float32")
test_images_1 = test_images_1.astype("float32")
test_images_2 = test_images_2.astype("float32")
test_images_3 = test_images_3.astype("float32")
train_images_1 /= 255
test_images_1 /= 255
test_images_2 /= 255
test_images_3 /= 255
# Create training & testing positive and negative pairs for 4 sets of data
# num_classes_1, num_classes_2 = [0,1,2,4,5,9], [3,6,7,8]
# class_names = [0,1,2,3,4,5,6,7,8,9]
digit_indices = [np.where(train_labels_1 == num_classes_1[i])[0] for i in range(len(num_classes_1))]
train_pairs_1, train_pairs_labels_1 = create_pairs(train_images_1, digit_indices, num_classes_1)
digit_indices = [np.where(test_labels_1 == num_classes_2[i])[0] for i in range(len(num_classes_2))]
test_pairs_1, test_pairs_labels_1 = create_pairs(test_images_1, digit_indices, num_classes_2)
digit_indices = [np.where(test_labels_2 == num_classes_1[i])[0] for i in range(len(num_classes_1))]
test_pairs_2, test_pairs_labels_2 = create_pairs(test_images_2, digit_indices, num_classes_1)
# The data contains all classes
digit_indices = [np.where(test_labels_3 == i)[0] for i in range(len(class_names))]
test_pairs_3, test_pairs_labels_3 = create_pairs(test_images_3, digit_indices, class_names)
global initiated
# Following codes will be skipped after the first execution.
if (initiated == False):
# Data Verification
# Image with labels [0,1,2,4,5,9] = 42000
# 80% of image with labels [0,1,2,4,5,9] = 33600
# 20% of image with labels [0,1,2,4,5,9] = 8400
# Image with labels [3,6,7,8] = 28000
# 20% of image with labels [0,1,2,4,5,9] + 100% of image with labels [3,6,7,8] = 36400
print("*** Data Verification Start *** ")
print("*** Total images === ", full_images.shape[0])
print("*** Total image labels === ", np.unique(full_labels))
print("*** Image with labels [0,1,2,4,5,9] === ", images_set_1.shape[0])
print("*** Image labels [0,1,2,4,5,9] === ", np.unique(labels_set_1))
print("*** Image with labels [3,6,7,8] === ", test_images_1.shape[0])
print("*** Image labels [3,6,7,8] === ", np.unique(test_labels_1))
print("*** 80% of image with labels [0,1,2,4,5,9] === ", train_images_1.shape[0])
print("*** 80% of image labels [0,1,2,4,5,9] === ", np.unique(train_labels_1))
print("*** 20% of image with labels [0,1,2,4,5,9] === ", test_images_2.shape[0])
print("*** 20% of image labels [0,1,2,4,5,9] === ", np.unique(test_labels_2))
print("*** 20% of image with labels [0,1,2,4,5,9] + 100% of image with labels [3,6,7,8] === ", test_images_3.shape[0])
print("*** 20% of image labels [0,1,2,4,5,9] + 100% of image labels [3,6,7,8] === ", np.unique(test_labels_3))
show_images(train_pairs_1, "SET_1")
show_images(test_pairs_1, "SET_2")
show_images(test_pairs_2, "SET_3")
show_images(test_pairs_3, "SET_4")
print("*** Data Verification End *** ")
initiated = True
return (train_pairs_1, train_pairs_labels_1), (test_pairs_1, test_pairs_labels_1), (test_pairs_2, test_pairs_labels_2), (test_pairs_3, test_pairs_labels_3), input_shape
def create_siamese_network(network_mode, input_shape):
'''
initiate Siamese network with different network_mode.
@params: network_mode, input_shape
@return: model (Siamese Network)
'''
# Network initialisation
base_network = Sequential()
if network_mode == 1:
base_network = create_base_network_1(input_shape)
elif network_mode == 2:
base_network = create_base_network_2(input_shape)
else:
base_network = create_base_network_3(input_shape)
# Initiate the shape for two tensors
input_a = Input(shape=input_shape)
input_b = Input(shape=input_shape)
# Use the same base_network to input two tensors with sharing weights of the network
processed_a = base_network(input_a)
processed_b = base_network(input_b)
# Lambda Layer for calculating two tensors by using Euclidian Distance
distance = Lambda(euclidean_distance)([processed_a, processed_b])
# Model Initialisation
model = Model([input_a, input_b], distance)
model.compile(loss=contrastive_loss, optimizer=Adadelta(), metrics=[accuracy])
return model
def train_network(network_mode, epochs, test_mode):
'''
train Siamese network based on network_mode.
@params:
network_mode:
# 1 = Siamese Network Ex1
# 2 = Siamese Network EX2
# 3 = Siamese Network Ex3
epochs: it refers training times.
test_mode: 0 = conduct experiments / 1 = testing loss function
@return: model (Siamese Network)
'''
# Trainning Data : 80% of image pairs in labels with ["top", "trouser", "pullover", "coat", "sandal", "ankle boot"]
# SET1 : train_pairs_1 & train_pairs_labels_1:
# - 80% of image pairs in labels with ["top", "trouser", "pullover", "coat", "sandal", "ankle boot"]
# SET2 : test_pairs_1 & test_pairs_labels_1:
# - 100% of image pairs with labels in ["dress", "sneaker", "bag", "shirt"] are only used for testing
# SET3 : test_pairs_2, test_pairs_labels_2:
# - 20% of image pairs in labels with ["top", "trouser", "pullover", "coat", "sandal", "ankle boot"]
# SET4 : test_pairs_3, test_pairs_labels_3:
# - image pairs with all labels are only used for testing
# input_shape: (28, 28, 1)
(train_pairs_1, train_pairs_labels_1), (test_pairs_1, test_pairs_labels_1), (test_pairs_2, test_pairs_labels_2), (test_pairs_3, test_pairs_labels_3), input_shape = get_data()
batch_size = 128
global margin
# Training Siamese Network
if(test_mode == 0):
margin = 1
# SET1: Training & Testing
model = create_siamese_network(network_mode, input_shape)
history = model.fit([train_pairs_1[:, 0], train_pairs_1[:, 1]], train_pairs_labels_1,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2))
# Plot the training result (contains Acc + Loss)
show_plot(history)
# SET2: Evaluation
score = model.evaluate([test_pairs_1[:, 0], test_pairs_1[:, 1]], test_pairs_labels_1, verbose = 0)
print('SET_2 -> Test Loss = %0.2f%%' % (100 * score[0]))
print('SET_2 -> Test Accuracy = %0.2f%%' % (100 * score[1]))
# SET3: Evaluation
score = model.evaluate([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2, verbose = 0)
print('SET_3 -> Test Loss = %0.2f%%' % (100 * score[0]))
print('SET_3 -> Test Accuracy = %0.2f%%' % (100 * score[1]))
# SET4: Evaluation
score = model.evaluate([test_pairs_3[:, 0], test_pairs_3[:, 1]], test_pairs_labels_3, verbose = 0)
print('SET_4 -> Test Loss = %0.2f%%' % (100 * score[0]))
print('SET_4 -> Test Accuracy = %0.2f%%' % (100 * score[1]))
else:
# Testing the margin value m in contrasrtive loss function
# We choose SET2 and Network 3 to conduct the margin value experment
# We will compare and record the result by changing the margin value in the contrastive loss function
# [The reason why we choose the dataset and network will specify in report]
margin = 0.55
model = create_siamese_network(network_mode, input_shape)
model.fit([train_pairs_1[:, 0], train_pairs_1[:, 1]], train_pairs_labels_1,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2))
score = model.evaluate([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2, verbose = 0)
print('SET_2 (Margin = 0.55) -> Test Loss = %0.2f%%' % (100 * score[0]))
print('SET_2 (Margin = 0.55) -> Test Accuracy = %0.2f%%' % (100 * score[1]))
margin = 0.75
model = create_siamese_network(network_mode, input_shape)
model.fit([train_pairs_1[:, 0], train_pairs_1[:, 1]], train_pairs_labels_1,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2))
score = model.evaluate([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2, verbose = 0)
print('SET_2 (Margin = 0.75) -> Test Loss = %0.2f%%' % (100 * score[0]))
print('SET_2 (Margin = 0.75) -> Test Accuracy = %0.2f%%' % (100 * score[1]))
margin = 1
model = create_siamese_network(network_mode, input_shape)
model.fit([train_pairs_1[:, 0], train_pairs_1[:, 1]], train_pairs_labels_1,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2))
score = model.evaluate([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2, verbose = 0)
print('SET_2 (Margin = 1) -> Test Loss = %0.2f%%' % (100 * score[0]))
print('SET_2 (Margin = 1) -> Test Accuracy = %0.2f%%' % (100 * score[1]))
margin = 1.25
model = create_siamese_network(network_mode, input_shape)
model.fit([train_pairs_1[:, 0], train_pairs_1[:, 1]], train_pairs_labels_1,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2))
score = model.evaluate([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2, verbose = 0)
print('SET_2 (Margin = 1.25) -> Test Loss = %0.2f%%' % (100 * score[0]))
print('SET_2 (Margin = 1.25) -> Test Accuracy = %0.2f%%' % (100 * score[1]))
margin = 1.5
model = create_siamese_network(network_mode, input_shape)
model.fit([train_pairs_1[:, 0], train_pairs_1[:, 1]], train_pairs_labels_1,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2))
score = model.evaluate([test_pairs_2[:, 0], test_pairs_2[:, 1]], test_pairs_labels_2, verbose = 0)
print('SET_2 (Margin = 1.5) -> Test Loss = %0.2f%%' % (100 * score[0]))
print('SET_2 (Margin = 1.5) -> Test Accuracy = %0.2f%%' % (100 * score[1]))
pass
def main_func():
# Uncomment the method to explore the best parameters for CNN network 1
### -> define_CNNlayer_parameters()
# Phase I Experiment
# Training Siamese Network
# 1st Param:
# 1 = Open Siamese Network Ex1
# 2 = Open Siamese Network EX2
# 3 = Open Siamese Network Ex3 (Best)
# 2nd: Epochs
# 3rd: 0 = conduct experiments / 1 = testing loss function (margin change)
train_network(1, 20, 0)
train_network(2, 20, 0)
train_network(3, 20, 0)
# Phase II Experiment
# Uncomment the method to explore the contrastive loss function when margin value changes
# Margin Sets [0.55, 0.75, 1.00, 1.25, 1.5] will be put onto the training process respectively
# We had found Margin = 1 that is expected to produce the highest accuracy compared with other margin values.
#### -> train_network(3, 20, 1)
pass
if __name__ == '__main__':
main_func() | raonlok1211/IFN680 | Siamese neural network.py | Siamese neural network.py | py | 29,876 | python | en | code | 0 | github-code | 13 |
73288430736 | usuarios = {}
def cadastrar():
global usuarios
usuario = input("Digite um nome de usuário: ")
if usuario in usuarios:
print("Usuário já existe. Tente outro nome de usuário.")
return
senha1 = input("Digite sua senha: ")
senha2 = input("Confirme sua senha: ")
if senha1 == senha2:
usuarios[usuario] = senha1
print("Cadastro realizado com sucesso!")
else:
print("As senhas não coincidem. Tente novamente.")
def login():
global usuarios
usuario = input("Digite seu nome de usuário: ")
if usuario not in usuarios:
print("Usuário não encontrado. Cadastre-se primeiro.")
return
senha_correta = usuarios[usuario]
tentativas = 0
while tentativas < 3:
senha = input("Digite sua senha: ")
if senha == senha_correta:
print("Login bem-sucedido!")
return
else:
tentativas += 1
print(f"Senha incorreta. Tentativas restantes: {3 - tentativas}")
print("Você excedeu o número máximo de tentativas. Sua conta está bloqueada.")
del usuarios[usuario]
while True:
print("Selecione uma opção:")
print("1 - Cadastrar")
print("2 - Login")
print("3 - Sair")
opcao = input()
if opcao == "1":
cadastrar()
elif opcao == "2":
login()
elif opcao == "3":
print("Saindo...")
break
else:
print("Opção inválida. Tente novamente.") | ChristianF22/login_python | login2.py | login2.py | py | 1,494 | python | pt | code | 1 | github-code | 13 |
70441172817 | d = []
w = []
while True:
print("\n1. Enter the transaction")
print("2. Display the net ammount")
print("3. Exit")
resp = int( input("Enter your choice? ") )
if resp == 1:
trans = input( "\nEnter transaction with D/W and value? ")
if trans[0] == "D":
d.append( int(trans[2:]) )
elif trans[0] == "W":
w.append( int(trans[2:]) )
elif resp == 2:
print("Net = ", sum(d) - sum(w) )
else:
exit() | Jayprakash-SE/Engineering | Semester4/PythonProgramming/Test1/Q16.py | Q16.py | py | 482 | python | en | code | 0 | github-code | 13 |
36153328446 | import numpy as np
import pytest
import xarray as xr
import xbatcher # noqa: F401
from xbatcher import BatchGenerator
@pytest.fixture(scope="module")
def sample_ds_3d():
shape = (10, 50, 100)
ds = xr.Dataset(
{
"foo": (["time", "y", "x"], np.random.rand(*shape)),
"bar": (["time", "y", "x"], np.random.randint(0, 10, shape)),
},
{
"x": (["x"], np.arange(shape[-1])),
"y": (["y"], np.arange(shape[-2])),
},
)
return ds
@pytest.fixture(scope="module")
def sample_dataArray():
return xr.DataArray(np.zeros((2, 4), dtype="i4"), dims=("x", "y"), name="foo")
@pytest.fixture(scope="module")
def sample_Dataset():
return xr.Dataset(
{
"x": xr.DataArray(np.arange(10), dims="x"),
"foo": xr.DataArray(np.ones(10, dtype="float"), dims="x"),
}
)
def test_as_xarray_dataarray(sample_dataArray, sample_Dataset):
assert isinstance(
xbatcher.accessors._as_xarray_dataarray(sample_dataArray), xr.DataArray
)
assert isinstance(
xbatcher.accessors._as_xarray_dataarray(sample_Dataset), xr.DataArray
)
def test_batch_accessor_ds(sample_ds_3d):
bg_class = BatchGenerator(sample_ds_3d, input_dims={"x": 5})
bg_acc = sample_ds_3d.batch.generator(input_dims={"x": 5})
assert isinstance(bg_acc, BatchGenerator)
for batch_class, batch_acc in zip(bg_class, bg_acc):
assert isinstance(batch_acc, xr.Dataset)
assert batch_class.equals(batch_acc)
def test_batch_accessor_da(sample_ds_3d):
sample_da = sample_ds_3d["foo"]
bg_class = BatchGenerator(sample_da, input_dims={"x": 5})
bg_acc = sample_da.batch.generator(input_dims={"x": 5})
assert isinstance(bg_acc, BatchGenerator)
for batch_class, batch_acc in zip(bg_class, bg_acc):
assert batch_class.equals(batch_acc)
@pytest.mark.parametrize(
"foo_var",
[
"foo", # xr.DataArray
["foo"], # xr.Dataset
],
)
def test_tf_to_tensor(sample_ds_3d, foo_var):
tf = pytest.importorskip("tensorflow")
foo = sample_ds_3d[foo_var]
t = foo.tf.to_tensor()
assert isinstance(t, tf.Tensor)
assert t.shape == tuple(foo.sizes.values())
foo_array = foo.to_array().squeeze() if hasattr(foo, "to_array") else foo
np.testing.assert_array_equal(t, foo_array.values)
@pytest.mark.parametrize(
"foo_var",
[
"foo", # xr.DataArray
["foo"], # xr.Dataset
],
)
def test_torch_to_tensor(sample_ds_3d, foo_var):
torch = pytest.importorskip("torch")
foo = sample_ds_3d[foo_var]
t = foo.torch.to_tensor()
assert isinstance(t, torch.Tensor)
assert t.names == (None, None, None)
assert t.shape == tuple(foo.sizes.values())
foo_array = foo.to_array().squeeze() if hasattr(foo, "to_array") else foo
np.testing.assert_array_equal(t, foo_array.values)
@pytest.mark.parametrize(
"foo_var",
[
"foo", # xr.DataArray
["foo"], # xr.Dataset
],
)
def test_torch_to_named_tensor(sample_ds_3d, foo_var):
torch = pytest.importorskip("torch")
foo = sample_ds_3d[foo_var]
t = foo.torch.to_named_tensor()
assert isinstance(t, torch.Tensor)
assert t.names == tuple(foo.dims)
assert t.shape == tuple(foo.sizes.values())
foo_array = foo.to_array().squeeze() if hasattr(foo, "to_array") else foo
np.testing.assert_array_equal(t, foo_array.values)
| xarray-contrib/xbatcher | xbatcher/tests/test_accessors.py | test_accessors.py | py | 3,458 | python | en | code | 114 | github-code | 13 |
677377192 | import numpy as np
import circle
import csv
from enum import Enum
def read_from_file(nc_code_file):
with open(nc_code_file) as nc_code:
lines = nc_code.readlines()
return lines
def create_coordinates_file(coordinates_file):
with open(coordinates_file, 'w'):
pass
def append_multiple_coordinates_to_file(coordinates_file, list_of_coordinates):
with open(coordinates_file, 'a', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(list_of_coordinates)
csvFile.close()
def append_toolcall_diameter_to_file(coordinates_file, toolcall_diameter):
with open(coordinates_file, 'a') as csvFile:
csvFile.write(toolcall_diameter + "\n")
csvFile.close()
def separate_line_into_single_commands(nc_code_line):
nc_code_line = nc_code_line.strip("\n")
nc_code_list = nc_code_line.split(" ")
return nc_code_list
# diese Methode berechnet eine gerade zwischen zwei Punkten und gibt die Zwischenschritte zurück
# Paremeter: liste= NC-Code, anfangsPunkt=3-dim Vektor, drehzahl,schneiden,v = Integer
def calculate_linear_movement_coordinates(nc_code_list, start_position, rpm, tool_blade_count, velocity):
use_max_velocity = False
m_91 = False
MAX_VELOCITY = 2000
x = None
y = None
z = None
for command in nc_code_list:
if command == "FMAX":
use_max_velocity = True
elif command.find("F") == 0:
velocity = command.strip("F")
if command.find("X") == 0:
x = command.strip("X")
if command.find("Y") == 0:
y = command.strip("Y")
if command.find("Z") == 0:
z = command.strip("Z")
if command == "M91":
m_91 = True
if x is None:
x = float(start_position[0])
if y is None:
y = float(start_position[1])
if z is None:
z = float(start_position[2])
if m_91:
start_position[2] = start_position[2] + 1000
z = float(z) + 1000
destination = np.array([float(x), float(y), float(z)])
coordinates_of_path = []
# f_z = feed per tooth
f_z = 1
if use_max_velocity and rpm != 0:
f_z = MAX_VELOCITY / (rpm * tool_blade_count)
if use_max_velocity is False and rpm != 0:
f_z = float(velocity) / (rpm * tool_blade_count)
if rpm == 0:
coordinates_of_path.append(destination)
return [coordinates_of_path, destination, velocity]
path_vector = destination - start_position
path_length = np.linalg.norm(path_vector)
step_count = path_length / f_z
if (step_count == 0.0):
step_count = 1
step_length = path_vector / step_count
if f_z > path_length:
coordinates_of_path.append(destination)
return [coordinates_of_path, destination, velocity]
for i in range(1, int(step_count) + 1):
steps_from_start_to_destination = start_position + step_length * i
coordinates_of_path.append(steps_from_start_to_destination)
if step_count - i < 1 and step_count - i > 0:
steps_from_start_to_destination = start_position + step_length * step_count
coordinates_of_path.append(steps_from_start_to_destination)
return [coordinates_of_path, steps_from_start_to_destination, velocity]
# 30 CC X29.2387 Y19.4175
# diese Methode gibt einen 3-dim Vektor der die Kreismitte beschreibt zurück
def calculate_circle_center(nc_command_line, start_point):
x = None
y = None
z = None
for command in nc_command_line:
if command.find("X") == 0:
x = command.strip("X")
if command.find("Y") == 0:
y = command.strip("Y")
if command.find("Z") == 0:
z = command.strip("Z")
circle_center = create_np_array(start_point, x, y, z)
return circle_center
def calculate_circle_movement(nc_command_line, start_point, circle_center, f_z):
x = None
y = None
z = None
counter_clockwise_movement = None
for command in nc_command_line:
if command.find("X") == 0:
x = command.strip("X")
if command.find("Y") == 0:
y = command.strip("Y")
if command.find("Z") == 0:
z = command.strip("Z")
if command == "DR+":
counter_clockwise_movement = True
if command == "DR-":
counter_clockwise_movement = False
destination = create_np_array(start_point, x, y, z)
return circle.calculate_circle_points(start_point, destination, circle_center, counter_clockwise_movement, f_z)
def create_np_array(start_point, x, y, z):
if x is None:
x = float(start_point[0])
if y is None:
y = float(start_point[1])
if z is None:
z = float(start_point[2])
return np.array([float(x), float(y), float(z)])
# diese Methode list die Eigenschaften des Werkstücks ein und initialiesiert Koodinaten des Nullpunktes vom Werkstück
def init_work_piece(nc_command_list):
global RELATIVE_ORIGIN
x = nc_command_list[-3].strip("X")
y = nc_command_list[-2].strip("Y")
z = nc_command_list[-1].strip("Z")
RELATIVE_ORIGIN = np.array([float(x), float(y), float(z)])
def tool_call(nc_command_list):
rpm = 0
tool_blade_count = [2, 2]
tool_diameter = [5, 2]
for command in nc_command_list:
if command.find("S") == 0:
rpm = command.strip("S")
return [int(rpm), tool_blade_count[int(nc_command_list[3]) - 1], tool_diameter[int(nc_command_list[3]) - 1]]
class MovementCommands(Enum):
INIT = "1"
LINE = "L"
CIRCLE_CENTER = "CC"
CIRCLE = "C"
TOOLCALL = "TOOL"
def calculate_coordinates(nc_code_file, coordinates_file):
# origin = np.array([0, 0, 100])
velocity = 0
rpm = 0
tool_blade_count = 2
start_position = np.array([-500, -420, 100])
circle_center = np.array([0, 0, 0])
create_coordinates_file(coordinates_file)
for line in read_from_file(nc_code_file):
command_line = separate_line_into_single_commands(line)
if command_line[0] == MovementCommands.INIT.value:
init_work_piece(command_line)
if command_line[1] == MovementCommands.LINE.value:
movement = calculate_linear_movement_coordinates(command_line, start_position, rpm, tool_blade_count,
velocity)
path_coordinates = movement[0]
append_multiple_coordinates_to_file(coordinates_file, path_coordinates)
start_position = movement[1]
velocity = movement[2]
if command_line[1] == MovementCommands.CIRCLE_CENTER.value:
circle_center = calculate_circle_center(command_line, start_position)
if command_line[1] == MovementCommands.CIRCLE.value:
f_z = float(velocity) / (rpm * tool_blade_count)
circle_path = calculate_circle_movement(command_line, start_position, circle_center, f_z)
start_position = circle_path[len(circle_path) - 1]
append_multiple_coordinates_to_file(coordinates_file, circle_path)
if command_line[1] == MovementCommands.TOOLCALL.value:
array = tool_call(command_line)
rpm = array[0]
tool_blade_count = array[1]
toolcall = "Tool Durchmesser " + str(array[2])
append_toolcall_diameter_to_file(coordinates_file, toolcall)
def main():
pass
if __name__ == '__main__':
main()
| ekement/Milling-Machine-Simulation | path_calculation.py | path_calculation.py | py | 7,468 | python | en | code | 0 | github-code | 13 |
14907657421 | import uuid
import unittest
import pkg_resources
from kado.store import _store
from tests.lib import constants as tc
class TestIndex(unittest.TestCase):
"""Test case for :class:`kado.store._store.Index`."""
def setUp(self):
"""Setup test cases for :class:`kado.store._store.Index`."""
# Index keys.
self.KEY1 = 'KEY1'
self.KEY2 = 'KEY2'
# Index values.
self.VALUE1 = 'VALUE1'
self.VALUE2 = 'VALUE2'
# Empty index.
self.IX_EMPTY = _store.Index()
# Index with one key and one stored value.
self.IX_K1V1 = _store.Index()
self.IX_K1V1.add(self.KEY1, self.VALUE1)
# Index with one key and two values.
self.IX_K1V2 = _store.Index()
self.IX_K1V2.add(self.KEY1, self.VALUE1)
self.IX_K1V2.add(self.KEY1, self.VALUE2)
# Index with two keys, one value each.
self.IX_K2V1 = _store.Index()
self.IX_K2V1.add(self.KEY1, self.VALUE1)
self.IX_K2V1.add(self.KEY2, self.VALUE1)
# Index with two keys, two values each.
self.IX_K2V2 = _store.Index()
self.IX_K2V2.add(self.KEY1, self.VALUE1)
self.IX_K2V2.add(self.KEY1, self.VALUE2)
self.IX_K2V2.add(self.KEY2, self.VALUE1)
self.IX_K2V2.add(self.KEY2, self.VALUE2)
def test___contains___one_key(self):
"""Test one key containment in a one key index."""
self.assertIn(self.KEY1, self.IX_K1V1)
def test___contains___two_keys(self):
"""Test one key containment in a two keys index."""
self.assertIn(self.KEY1, self.IX_K2V1)
def test___contains___all_keys(self):
"""Test presence of all keys in a two keys index."""
for key in [self.KEY1, self.KEY2]:
with self.subTest(key=key):
self.assertIn(key, self.IX_K2V1)
def test___contains___invalid_key_empty(self):
"""A nonexistent key should return false on an empty index."""
self.assertNotIn('--INVALID--', self.IX_EMPTY)
def test___contains___invalid_key_one_key(self):
"""A nonexistent key should return false on an empty index."""
self.assertNotIn('--INVALID--', self.IX_K1V1)
def test___iter___empty(self):
"""Iterate over an empty index."""
self.assertEqual(list(self.IX_EMPTY), [])
def test___iter___two_key(self):
"""Iterate over a two keys index."""
for key in self.IX_K2V1:
self.assertIn(key, [self.KEY1, self.KEY2])
def test_clear_one_key(self):
"""Clear the index with only one key stored."""
with self.subTest(predicate=True):
self.assertEqual(len(self.IX_K1V1), 1)
self.IX_K1V1.clear()
self.assertEqual(len(self.IX_K1V1), 0)
def test_clear_one_key_two_values(self):
"""Clear the index with two values stored under the same key."""
with self.subTest(predicate=True):
self.assertEqual(len(self.IX_K1V2), 1)
self.assertEqual(self.IX_K1V2.count(), 2)
self.IX_K1V2.clear()
self.assertEqual(len(self.IX_K1V2), 0)
def test_clear_two_keys_one_value(self):
"""Clear the index with two keys stored."""
with self.subTest(predicate=True):
self.assertEqual(len(self.IX_K2V1), 2)
self.IX_K2V1.clear()
self.assertEqual(len(self.IX_K2V1), 0)
def test_count_all_one_key_one_value(self):
"""Count one value stored under one key."""
self.assertEqual(self.IX_K1V1.count(), 1)
def test_count_all_one_key_two_values(self):
"""Count two values stored under one key."""
self.assertEqual(self.IX_K1V2.count(), 2)
def test_count_all_two_keys_one_value(self):
"""Count two items stored under two different keys."""
self.assertEqual(self.IX_K2V1.count(), 2)
def test_count_all_two_keys_two_values(self):
"""Count four values in total spread over two keys."""
self.assertEqual(self.IX_K2V2.count(), 4)
def test_count_key_one_value(self):
"""Count one value stored under a specified key."""
self.assertEqual(self.IX_K1V1.count(self.KEY1), 1)
def test_count_key_two_values(self):
"""Count two values stored under a specified key."""
self.assertEqual(self.IX_K1V2.count(self.KEY1), 2)
def test_count_key_each_one_value(self):
"""Count each key with one stored value."""
for key in [self.KEY1, self.KEY2]:
with self.subTest(key=key):
self.assertEqual(self.IX_K2V1.count(key), 1)
def test_count_key_each_two_values(self):
"""Count each key with two stored values."""
for key in [self.KEY1, self.KEY2]:
with self.subTest(key=key):
self.assertEqual(self.IX_K2V2.count(key), 2)
def test_get_one_value(self):
"""Get one value from the index under a specified key."""
self.assertEqual(self.IX_K1V1.get(self.KEY1), [self.VALUE1, ])
def test_get_one_value_is_list(self):
"""Even with one item stored, a list should be returned."""
self.assertTrue(isinstance(self.IX_K1V1.get(self.KEY1), list))
def test_get_two_values(self):
"""Get two values from the index under a specified key."""
for val in self.IX_K1V2.get(self.KEY1):
self.assertIn(val, [self.VALUE1, self.VALUE2])
def test_get_invalid_key_empty(self):
"""Get nonexistent key should raise a ``KeyError``."""
with self.assertRaises(KeyError):
self.IX_EMPTY.get('--INVALID--')
def test_get_invalid_key_one_key(self):
"""Get nonexistent key on a filled index should raise a ``KeyError``."""
with self.assertRaises(KeyError):
self.IX_K1V1.get('--INVALID--')
def test_add_same_value(self):
"""Adding twice the same value should only store it once."""
self.IX_K1V1.add(self.KEY1, self.VALUE1)
self.assertEqual(self.IX_K1V1.get(self.KEY1), [self.VALUE1, ])
def test_remove_key(self):
"""Remove a key from the index."""
with self.subTest(predicate=True):
self.assertEqual(len(self.IX_K1V1), 1)
self.IX_K1V1.remove(self.KEY1)
self.assertEqual(len(self.IX_K1V1), 0)
def test_remove_invalid_key(self):
"""Removing nonexistent key should raise a ``KeyError``."""
with self.subTest(predicate=True):
self.assertEqual(len(self.IX_K1V1), 1)
with self.assertRaises(KeyError):
self.IX_K1V1.remove('--INVALID--')
def test_remove_value(self):
"""Remove a value from an index key."""
with self.subTest(predicate=True):
self.assertEqual(self.IX_K1V2.count(), 2)
self.IX_K1V2.remove(self.KEY1, self.VALUE1)
self.assertEqual(self.IX_K1V2.get(self.KEY1), [self.VALUE2, ])
def test_remove_invalid_value(self):
"""Remove nonexistent value should raise ``ValueError``."""
with self.subTest(predicate=True):
self.assertEqual(self.IX_K1V1.count(), 1)
with self.assertRaises(ValueError):
self.IX_K1V1.remove(self.KEY1, '--INVALID--')
def test_remove_last_entry(self):
"""Remove last value from index entry should remove the entry itself."""
with self.subTest(predicate=True):
self.assertEqual(len(self.IX_K1V1), 1)
self.assertEqual(self.IX_K1V1.count(), 1)
self.IX_K1V1.remove(self.KEY1, self.VALUE1)
self.assertEqual(len(self.IX_K1V1), 0)
with self.assertRaises(KeyError):
self.IX_K1V1.get(self.KEY1)
def test_discard_invalid_key(self):
"""Discard of a nonexistent key should not raise ``KeyError``."""
self.IX_K1V1.discard(key='--INVALID--')
def test_discard_invalid_value(self):
"""Discard of a nonexistent value should not raise ``ValueError``."""
self.IX_K1V1.discard(key=self.KEY1, value='--INVALID--')
class TestChunk(unittest.TestCase):
"""Test case for :class:`kado.store._store.Chunk`."""
def test___init__(self):
"""Test chunk initialization."""
TEST_DATA = b'1'
TEST_ID = uuid.UUID('14c1130e-e81a-12b5-5612-ae6acfb29ae5')
TEST_WHASH = '66b3d38e379784f0'
TEST_SHASH = (
'14c1130ee81a12b55612ae6acfb29ae54d4dfa75f2551c55ccdaf1e14369d31e'
)
c = _store.Chunk(TEST_DATA)
with self.subTest(test='id'):
self.assertEqual(c.id, TEST_ID)
with self.subTest(test='data'):
self.assertEqual(c.data, TEST_DATA)
with self.subTest(test='shash'):
self.assertEqual(c.shash, TEST_SHASH)
with self.subTest(test='whash'):
self.assertEqual(c.whash, TEST_WHASH)
def test__data_set_notimplementederror(self):
"""It should not be possible to reset data of a chunk."""
c = _store.Chunk(b'1')
with self.assertRaises(NotImplementedError):
c.data = b'2'
def test__id_get(self):
"""Chunk's identifier should match data's strong hash."""
TEST_ID = uuid.UUID('14c1130e-e81a-12b5-5612-ae6acfb29ae5')
c = _store.Chunk(b'1')
self.assertEqual(c._id_get(), TEST_ID)
class TestItem(unittest.TestCase):
"""Test case for :class:`kado.store._store.Item`."""
def test___init___chunks(self):
"""Ensure chunks from known data files are computed as expected."""
for name, chunks in tc.DATA_CHUNKS_KADO.items():
with pkg_resources.resource_stream('tests.lib', name) as fp:
item = _store.Item(fp.read())
for idx, ck in enumerate(item.chunks):
with self.subTest(file=name, chunk=idx, test='whash'):
self.assertEqual(chunks[idx][2], ck.whash)
with self.subTest(file=name, chunk=idx, test='shash'):
self.assertEqual(chunks[idx][3], ck.shash)
def test___init___hash(self):
"""Test the hashes of an item from known data files."""
for name, hashes in tc.DATA_HTREE_KADO.items():
with pkg_resources.resource_stream('tests.lib', name) as fp:
item = _store.Item(fp.read())
with self.subTest(file=name, test='whash'):
self.assertEqual(item.whash, hashes[0])
with self.subTest(file=name, test='shash'):
self.assertEqual(item.shash, hashes[1])
def test___init___metadata(self):
"""Ensure the item's metadata is properly initialized."""
TEST_META = {
'a': 'a',
'b': 'b',
'c': 'c',
}
item = _store.Item(metadata=TEST_META)
for k, v in TEST_META.items():
with self.subTest(key=k):
self.assertEqual(item[k], v)
def test___len___data_files(self):
"""Test item length loaded with known data files."""
for name in tc.DATA_CHUNKS_KADO:
with pkg_resources.resource_stream('tests.lib', name) as fp:
content = fp.read()
l_content = len(content)
item = _store.Item(content)
with self.subTest(file=name):
self.assertEqual(len(item), l_content)
def test_data_get_data_files(self):
"""Test getting item's data property loaded with known data files."""
for name in tc.DATA_CHUNKS_KADO:
with pkg_resources.resource_stream('tests.lib', name) as fp:
content = fp.read()
item = _store.Item(content)
with self.subTest(file=name):
self.assertEqual(item.data, content)
def test_data_set_data_files(self):
"""Test setting item's data property loaded with known data files."""
for name, chunks in tc.DATA_CHUNKS_KADO.items():
item = _store.Item()
with pkg_resources.resource_stream('tests.lib', name) as fp:
item.data = fp.read()
for idx, ck in enumerate(item.chunks):
with self.subTest(file=name, chunk=idx, test='whash'):
self.assertEqual(chunks[idx][2], ck.whash)
with self.subTest(file=name, chunk=idx, test='shash'):
self.assertEqual(chunks[idx][3], ck.shash)
def test_data_set_typeerror(self):
"""If data type is not ``bytes``, ``TypeError`` must be raised."""
item = _store.Item()
with self.assertRaises(TypeError):
item.data = 1
def test_copy_data_only(self):
"""Test copy of an item with only data loaded."""
item1 = _store.Item(b'1')
item2 = item1.copy()
with self.subTest(test='id'):
# Both items should not share the same ID.
self.assertNotEqual(item1.id, item2.id)
with self.subTest(test='data'):
self.assertEqual(item1.data, item1.data)
def test_copy_with_metadata(self):
"""Test copy of an item carrying metadata."""
TEST_META = {
'a': 'a',
'b': 'b',
'c': 'c',
}
item1 = _store.Item(b'1', metadata=TEST_META)
item2 = item1.copy()
with self.subTest(test='id'):
# Both items should not share the same ID.
self.assertNotEqual(item1.id, item2.id)
with self.subTest(test='data'):
self.assertEqual(item1.data, item1.data)
with self.subTest(test='metadata'):
for k, v in item1.items():
self.assertEqual(item2[k], v)
| jimmy-lt/kado | tests/store/test__store.py | test__store.py | py | 13,601 | python | en | code | 0 | github-code | 13 |
41576118364 | import os
import sys
import argparse
import shutil
root_path = os.path.realpath(os.path.dirname(__file__))
sys.path.append(root_path)
from task import VerifTask
AIG_BMC_TASK = 'AIG_BMC_TASK'
AIG_PROVE_TASK = 'AIG_PROVE_TASK'
BTOR_BMC_TASK = 'BTOR_BMC_TASK'
BTOR_PROVE_TASK = 'BTOR_PROVE_TASK'
def verify_task(file_name, workdir, taskname, task_type=AIG_BMC_TASK, config_file='', useconfig=False, logfile=None):
task = VerifTask(config_file,workdir,taskname,[],logfile,useconfig)
# print("task_type in verify_task = ",task_type,len(task_type))
if task_type == AIG_BMC_TASK:
task.aig_bmc_config()
elif task_type == AIG_PROVE_TASK:
task.aig_pdr_config()
elif task_type == BTOR_BMC_TASK:
task.btor_bmc_config()
elif task_type == BTOR_PROVE_TASK:
task.btor_pdr_config()
else:
print("assert error:",task_type)
assert 0
srcfile = open(file_name,'rb')
srcname = os.path.basename(file_name)
task.filename = os.path.splitext(srcname)[0]
destfile = open(f"{task.srcdir}/{task.filename}.{task.file_type}","wb")
destfile.write(srcfile.read())
srcfile.close()
destfile.close()
task.log('crate workdir')
task.log('crate veriftask')
task.log(f'write srcfile to {task.srcdir}/{task.filename}.{task.file_type}')
# shutil.copy('mycounter-false.btor2',f"{task.designdir}/design.btor")
task.log("run task")
task.run()
task.log('task over')
task.exit_callback()
return task.status
if __name__ == '__main__':
p = argparse.ArgumentParser()
# p.add_argument('configfile', help='config file', type=str)
p.add_argument('-s', '--srcfile', help='the file to verify', type=str, required=True)
p.add_argument('-w','--workdir', help='muti task work dir', type=str, required=True)
p.add_argument('-t','--taskname', help='task name', type=str, required=True)
p.add_argument('-ty', '--type', help='task type', type=str, required=True)
p.add_argument('-f', '--force', help='overwrite work dir',action='store_true')
arg = p.parse_args()
workdir = arg.workdir
taskname = arg.taskname
srcfile = arg.srcfile
tasktype = arg.type
workdir = os.path.abspath(workdir)
if not os.path.exists(workdir):
os.mkdir(workdir)
else:
if arg.force:
shutil.rmtree(workdir,ignore_errors=True)
os.mkdir(workdir)
else:
sys.exit(-1)
srcname = os.path.basename(srcfile)
dst_file = f"{workdir}/{srcname}"
shutil.copy(srcfile,dst_file)
print(tasktype)
verify_task(dst_file, workdir, taskname, tasktype)
| donghua100/verifytools | core/task/verify_task.py | verify_task.py | py | 2,679 | python | en | code | 2 | github-code | 13 |
26601498994 | import math
def createSieve(number):
startList = list(range(0, number + 1))
startList[0] = False
startList[1] = False
print(startList)
def findNext(boolList, p):
for key, value in enumerate(boolList):
if key > p and value is True:
return key
return None
def list_true(n):
values_list = []
for number in range(0,n+1):
if number == 0 or number == 1:
values_list.append(False)
else:
values_list.append(number)
return values_list
def find_next(bool_list, p):
for key, value in enumerate(bool_list):
if key > p and value is True:
return key
return None
def prime_from_list(bool_list):
index_list = []
for key, value in enumerate(bool_list):
if value==True:
index_list.append(key)
return index_list
def is_prime_fast(number):
if number == 2:
return True
elif number < 2 or number % 2 == 0:
return False
else:
for factor in range(3, int(math.sqrt(number)) + 1, 2):
if number % factor == 0:
return False
return True
def get_primes(a, n):
mylist=[]
for number in range(a, n+1):
if is_prime_fast(number):
mylist.append(number)
return mylist
def sieve(n):
bool_list = list_true(n)
p = 2
while p is not None:
bool_list = mark_false(bool_list, p)
p = find_next(bool_list, p)
if p==3:
return bool_list
return prime_from_list(bool_list)
def mark_false(bool_list, p):
for key, value in enumerate(bool_list):
if key % p == 0 and key != p:
bool_list[key] = False
elif value is not True and value is not False:
bool_list[key] = True
return bool_list
test = sieve(20)
| Timsnky/challenges | sieve/sieve.py | sieve.py | py | 1,822 | python | en | code | 0 | github-code | 13 |
21457683686 | # -*- coding: utf-8 -*-
# This file is part of Argos.
#
# Argos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Argos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Argos. If not, see <http://www.gnu.org/licenses/>.
""" Inspector Selection Pane
"""
import logging
from argos.qt import QtGui, QtWidgets, QtSlot
from argos.utils.cls import to_string
from argos.inspector.registry import InspectorRegItem
logger = logging.getLogger(__name__)
def addInspectorActionsToMenu(inspectorMenu, execInspectorDialogAction, inspectorActionGroup):
""" Adds menu items to the inpsectorMenu for the given set-inspector actions.
:param inspectorMenu: inspector menu that will be modified
:param execInspectorDialogAction: the "Browse Inspectors..." actions
:param inspectorActionGroup: action group with actions for selecting a new inspector
:return: the inspectorMenu, which has been modified.
"""
inspectorMenu.addAction(execInspectorDialogAction)
inspectorMenu.addSeparator()
for action in inspectorActionGroup.actions():
inspectorMenu.addAction(action)
return inspectorMenu
class InspectorSelectionPane(QtWidgets.QFrame):
""" Shows the attributes of the selected repo tree item
"""
def __init__(self, execInspectorDialogAction, inspectorActionGroup, parent=None):
super(InspectorSelectionPane, self).__init__(parent=parent)
#self.setFrameShape(QtWidgets.QFrame.Box)
self.layout = QtWidgets.QHBoxLayout()
self.setLayout(self.layout)
# self.label = QtWidgets.QLabel("Current inspector")
# self.layout.addWidget(self.label)
self.menuButton = QtWidgets.QPushButton("No inspector")
self.layout.addWidget(self.menuButton)
inspectorMenu = QtWidgets.QMenu("Change Inspector", parent=self.menuButton)
addInspectorActionsToMenu(inspectorMenu, execInspectorDialogAction, inspectorActionGroup)
self.menuButton.setMenu(inspectorMenu)
sizePolicy = self.sizePolicy()
sizePolicy.setVerticalPolicy(QtWidgets.QSizePolicy.Fixed)
self.setSizePolicy(sizePolicy)
@QtSlot(InspectorRegItem)
def updateFromInspectorRegItem(self, inspectorRegItem):
""" Updates the label from the full name of the InspectorRegItem
"""
library, name = inspectorRegItem.splitName()
label = "{} ({})".format(name, library) if library else name
#self.label.setText(label)
self.menuButton.setText(label)
| leehawk2001/argos | argos/inspector/selectionpane.py | selectionpane.py | py | 2,959 | python | en | code | null | github-code | 13 |
4997269525 | from django.shortcuts import render
from . import forms
from . import models
# Create your views here.
def index(request):
context = {}
context["categoryForm"] = forms.CategoryModelForm()
context["pageForm"] = forms.PageModelForm()
return render(request, "templates/index.html", context)
def success_view(request):
categoryForm = forms.CategoryModelForm()
pageForm = forms.PageModelForm()
context = {}
if request.method == "POST":
categoryForm = forms.CategoryModelForm(request.POST)
pageForm = forms.PageModelForm(request.POST)
if categoryForm.is_valid():
categoryForm_name = categoryForm.cleaned_data["name"]
categoryForm_email = categoryForm.cleaned_data["email"]
categoryForm_visits = categoryForm.cleaned_data["visits"]
categoryForm_likes = categoryForm.cleaned_data["likes"]
print("inserted category ===> ", categoryForm_name, categoryForm_email,
categoryForm_visits, categoryForm_likes)
category_db = models.CategoryModel(
name=categoryForm_name,
email=categoryForm_email,
visits=categoryForm_visits,
likes=categoryForm_likes,
)
category_db.save()
if pageForm.is_valid():
pageForm_category = pageForm.cleaned_data["category"]
pageForm_title = pageForm.cleaned_data["title"]
pageForm_url = pageForm.cleaned_data["url"]
pageForm_views = pageForm.cleaned_data["views"]
print(" Kaustav ")
print("inserted page ===>", pageForm_category,
pageForm_title, pageForm_url, pageForm_views)
page_db = models.PageModel(
category=pageForm_category,
title=pageForm_title,
url=pageForm_url,
views=pageForm_views,
)
page_db.save()
context["categoryModelQueries"] = models.CategoryModel.objects.all()
context["pageModelQueries"] = models.PageModel.objects.all()
return render(request, "templates/success.html", context=context)
| teetangh/Kaustav-CSE-LABS-and-Projects | Sem06-Web-Dev-LAB/WEEK 07/week07/question1_app/views.py | views.py | py | 2,186 | python | en | code | 2 | github-code | 13 |
73524321619 | f= open("Kaartnummers.txt","r")
lines = f.readlines()
highest = 0
linenumber = 0
i = 0
for line in lines:
i = i + 1
user = line.strip('\n').split(', ')
number = int(user[0])
if(number > highest):
highest = number
linenumber = i
print("deze file kent",i,"regels")
print("Het grootste kaartnummer is", highest,"en deze staat op regel", linenumber)
f.close() | ldehaas1612/Python | hu/Opdrachten/Week 7/3. readfile.py | 3. readfile.py | py | 389 | python | en | code | 0 | github-code | 13 |
72985744018 | from src.data_store import data_store
from src.error import InputError, AccessError
from src.other import check_valid_token
from src.stats import increase_num_dms_joined, decrease_num_dms_joined
from src.stats import increase_dms_exist, decrease_dms_exist, decrease_msgs_exist
from src.notifications import update_notification_added_dm
def dm_create_v1(token, u_ids):
'''
Creates a DM using token and u_ids and returns the dm_id
Arguments:
- token (string)
- u_ids (list of integers)
Exceptions:
InputError - when any u_id in u_ids does not refer to a valid user
AccessError - when token is invalid
Return value:
{ dm_id }
'''
store = data_store.get()
name = []
dm_dict = {}
dm_members = []
dm_messages = []
dm_id = len(store['dms']) +1
# If token is invalid, AccessError is raised
# else the payload is returned
owner = check_valid_token(token)
# Finding user of token (owner)
for user in store['users']:
for sess_id in user['session_id']:
if owner['u_id'] == user['u_id'] and owner['session_id'] == sess_id:
# Add the creator/owner's handle to name
name.append(user['handle_str'])
# Storing owner of dm
dm_dict['owner'] = user
# Adding owner to dm_members
dm_members.append(user)
break
# Adds the users' handles in name
for u_id in u_ids:
for user in store['users']:
if u_id == user['u_id']:
name.append(user['handle_str'])
# Adding valid user to dm_members
dm_members.append(user)
break
# If there is an invalid u_id in u_ids then
# length of name will be not equal length of u_ids
# Thus there is an InputError
if len(name) != len(u_ids) + 1:
raise InputError(description="Invalid user id")
# Alphabetically sorts the list 'name'
name.sort()
# Creating and storing the dm_id
dm_dict['dm_id'] = dm_id
# Converting 'name' to str then storing
dm_dict['name'] = ", ".join(name)
# Storing dm_members to data store
dm_dict['members'] = dm_members
# Storing dm_messages to data store
dm_dict['messages'] = dm_messages
# Append the dm's data to the data store
store['dms'].append(dm_dict)
data_store.set(store)
# Increase dms joined for owner of dm
increase_num_dms_joined(owner['u_id'])
# Increase dms joined for all other users in dm
for u_id in u_ids:
if u_id != owner['u_id']:
increase_num_dms_joined(u_id)
# Add notification to user
update_notification_added_dm(owner['u_id'], u_id, dm_id)
# Increase the number of dms that exist in workplace stats
increase_dms_exist()
return {
'dm_id': dm_id
}
def dm_list_v1(token):
'''
Returns the list of DMs that the user is a member of
Arguments:
- token (string)
Exceptions:
AccessError - when token is invalid
Return value:
{ dms }
'''
store = data_store.get()
dms = []
# Finding user of token
decoded_token = check_valid_token(token)
# Find all the dms that user is in
for dm in store['dms']:
for user in dm['members']:
if decoded_token['u_id'] == user['u_id']:
dms.append({'dm_id': dm['dm_id'], 'name': dm['name']})
return {
'dms': dms
}
def dm_remove_v1(token, dm_id):
'''
Removes an existing DM but only the original creator of the DM can
Arguments:
- token (string)
- dm_id (integer)
Exceptions:
InputError - when dm_id does not refer to a valid DM
AccessError - when token is invalid
AccessError - when dm_id is valid and the authorised user is not the original DM creator
Return value:
{ }
'''
store = data_store.get()
# Finding user of token
token_user = check_valid_token(token)
# Raise an InputError if dm_id is invalid
valid_dm_id = False
for dm in store['dms']:
if dm_id == dm['dm_id']:
dm_details = dm
valid_dm_id = True
if valid_dm_id == False:
raise InputError(description="dm_id does not refer to a valid DM")
# Raise an AccessError if dm_id is valid and user is not a owner of the DM
# If user is owner then remove the DM
valid_owner = False
for dm in store['dms']:
if token_user['u_id'] == dm['owner']['u_id'] and dm_id == dm['dm_id']:
valid_owner = True
store['dms'].remove(dm)
if valid_owner == False:
raise AccessError(description="dm_id is valid and the authorised user is not the original DM creator")
data_store.set(store)
# Decrease dms joined for owner of dm
decrease_num_dms_joined(token_user['u_id'])
# Decrease dms joined for all other members of dm
for members in dm_details['members']:
if members['u_id'] != token_user['u_id']:
decrease_num_dms_joined(members['u_id'])
# Decrease the number of dms that exist in workplace stats
decrease_dms_exist()
num_msgs_to_remove = len(dm_details['messages'])
decrease_msgs_exist(num_msgs_to_remove)
return {
}
def dm_details_v1(token, dm_id):
'''
Given a DM with ID dm_id that the authorised user is a member of, provide basic details about the DM
Arguments:
- token (string)
- dm_id (integer)
Exceptions:
InputError - when dm_id does not refer to a valid DM
AccessError - when token is invalid
AccessError - when dm_id is valid and the authorised user is not a member of the DM
Return value:
{ name, members }
'''
store = data_store.get()
# Finding user of token
token_user = check_valid_token(token)
# Raise an InputError if dm_id is invalid
valid_dm_id = False
for dm in store['dms']:
if dm_id == dm['dm_id']:
valid_dm_id = True
break
if valid_dm_id == False:
raise InputError(description="dm_id does not refer to a valid DM")
# Raise an AccessError if dm_id is valid and user is not a member of the DM
valid_member = False
for member in dm['members']:
if token_user['u_id'] == member['u_id']:
valid_member = True
break
if valid_member == False:
raise AccessError(description="dm_id is valid and the authorised user is not a member of the DM")
return {
'name': dm['name'],
'members': dm['members']
}
def dm_leave_v1(token, dm_id):
'''
Removes a member of DM but name of DM is not updated
Arguments:
- token (string)
- dm_id (integer)
Exceptions:
InputError - when dm_id does not refer to a valid DM
AccessError - when token is invalid
AccessError - when dm_id is valid and the authorised user is not a member of the DM
Return value:
{ }
'''
store = data_store.get()
# Finding user of token
token_user = check_valid_token(token)
# Raise an InputError if dm_id is invalid
valid_dm_id = False
for dm in store['dms']:
if dm_id == dm['dm_id']:
valid_dm_id = True
break
if valid_dm_id == False:
raise InputError(description="dm_id does not refer to a valid DM")
# Raise an AccessError if dm_id is valid and user is not a member of the DM
valid_member = False
for member in dm['members']:
if token_user['u_id'] == member['u_id']:
dm['members'].remove(member)
valid_member = True
break
if valid_member == False:
raise AccessError(description="dm_id is valid and the authorised user is not a member of the DM")
if token_user['u_id'] == dm['owner']['u_id']:
dm['owner'] = None
# Decrease dms joined for user that left
decrease_num_dms_joined(token_user['u_id'])
return {
}
def dm_messages_v1(token, dm_id, start):
'''
Returns up to 50 messages in a given DM
Arguments:
- token (sting)
- dm_id (integer)
- start (integer)
Exceptions:
InputError - when dm_id does not refer to a valid DM
InputError - start is greater than the total number of messages in the channel
AccessError - when token is invalid
AccessError - when dm_id is valid and the authorised user is not a member of the DM
Return value:
{ messages, start, end }
'''
store = data_store.get()
# Finding user of token
token_user = check_valid_token(token)
# Raise an InputError if dm_id is invalid
valid_dm_id = False
for dm in store['dms']:
if dm_id == dm['dm_id']:
valid_dm_id = True
break
if valid_dm_id == False:
raise InputError(description="dm_id does not refer to a valid DM")
# Raise an AccessError if dm_id is valid and user is not a member of the DM
valid_member = False
for member in dm['members']:
if token_user['u_id'] == member['u_id']:
valid_member = True
break
if valid_member == False:
raise AccessError(description="dm_id is valid and the authorised user is not a member of the DM")
# Raise an InputError when start is greater than total number of messages in DM
if start > len(dm['messages']):
raise InputError(description="start is greater than the total number of messages in the DM")
messages = []
end = 0
for message in dm['messages']:
if end < start:
end += 1
elif end == start + 50:
break
else:
messages.append(message)
end += 1
if end == len(dm['messages']):
end = -1
return {
'messages': messages,
'start': start,
'end': end
}
| spoicywings/Major_project_backend | src/dm.py | dm.py | py | 10,100 | python | en | code | 0 | github-code | 13 |
31941240450 | import heapq
from typing import List
from typing import Tuple
class Solution:
def minimumWeight(self, n: int, edges: List[List[int]], src1: int,
src2: int, dest: int) -> int:
INF = 10**12
def dijkstra(graph: List[List[Tuple[int, int]]],
src: int) -> List[int]:
dist = [INF] * n
dist[src] = 0
pq = [(0, src)]
while pq:
d, u = heapq.heappop(pq)
if dist[u] < d:
continue
for v, w in graph[u]:
if dist[v] > d + w:
dist[v] = d + w
heapq.heappush(pq, (dist[v], v))
return dist
adj = [[] for _ in range(n)]
rev = [[] for _ in range(n)]
for u, v, c in edges:
adj[u].append((v, c))
rev[v].append((u, c))
dist1 = dijkstra(adj, src1)
dist2 = dijkstra(adj, src2)
dist3 = dijkstra(rev, dest)
ans = INF
for i in range(n):
ans = min(ans, dist1[i] + dist2[i] + dist3[i])
return -1 if ans == INF else ans
if __name__ == '__main__':
solu = Solution()
n = 6
edges = [[0, 2, 2], [0, 5, 6], [1, 0, 3], [1, 4, 5], [2, 1, 1], [2, 3, 3],
[2, 3, 4], [3, 4, 2], [4, 5, 1]]
src1 = 0
src2 = 1
dest = 5
print(solu.minimumWeight(n, edges, src1, src2, dest))
n = 5
edges = [[4, 2, 20], [4, 3, 46], [0, 1, 15], [0, 1, 43], [0, 1, 32],
[3, 1, 13]]
src1 = 0
src2 = 4
dest = 1
print(solu.minimumWeight(n, edges, src1, src2, dest))
| wylu/leetcodecn | src/python/contest/week284/6032.得到要求路径的最小带权子图.py | 6032.得到要求路径的最小带权子图.py | py | 1,657 | python | en | code | 3 | github-code | 13 |
16276259294 | from django.urls import path
from . import views
urlpatterns = [
path("dashboard/", views.VisualizationsView.as_view(), name="vis"),
path("line-charts/", views.LineChartsView.as_view(), name="line_charts"),
path(
"get-model-item/<int:model_id>/",
views.get_model_selector_item,
name="get_model_selector_item",
),
path(
"get-prediction-item/<int:prediction_id>/",
views.get_prediction_selector_item,
name="get_prediction_selector_item",
),
path(
"get-geocode-info/<int:geocode>/",
views.get_geocode_info,
name="get_geocode_info",
),
]
| Mosqlimate-project/Data-platform | src/vis/urls.py | urls.py | py | 640 | python | en | code | 5 | github-code | 13 |
5746582476 | """HiddenFootprints walkability prediction network module
Based on a Resnet + UNet structure to predict where people can walk in a scene.
"""
import torch
import torch.nn as nn
from .resnet import ResUNet
import numpy as np
import torchvision.transforms as transforms
import cv2
class GeneratorHeatMap(nn.Module):
"""Network model class
Based on ResUNet.
"""
def __init__(self):
super(GeneratorHeatMap, self).__init__()
num_in_layers = 3
self.backbone = ResUNet(encoder='resnet50', pretrained=True, num_in_layers=num_in_layers, num_out_layers=1)
self.backbone.cuda()
def forward(self, img, return_ft=False):
# if return_ft==True, out[0] is prediction, out[1] is feature map
out = self.backbone(img, return_ft=return_ft)
return out
class FootprintsPredictor(nn.Module):
"""A wrapper class to load model, test on single image.
"""
def __init__(self):
super(FootprintsPredictor, self).__init__()
# model
self.netG = GeneratorHeatMap()
def load_model(self, model_file):
self.netG.load_state_dict(torch.load(model_file))
print('Loaded model: {}.'.format(model_file))
# given an image, return output
def forward(self, img, return_ft=False):
out = self.netG(img, return_ft=return_ft)
pred = {}
if return_ft:
pred['locmap'], pred['ftmaps'] = out
else:
pred['locmap'] = out
pred['ftmaps'] = None
return pred
def test_single_im(self, img):
"""Test the model on a single image. img size: hxwx3
"""
# data transform
data_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))])
h,w = img.shape[:2]
resize_factor = 480. / h
img = cv2.resize(img, None, fx=resize_factor, fy=resize_factor, interpolation=cv2.INTER_AREA)
w_new = img.shape[1]
# crops and their weights
center_crop_left = (w_new-640)//2
center_crop_right = (w_new+640)//2
center_center = w_new//2
left_weights = np.zeros((1, w_new//4))
left_weights[0, :center_crop_left//4] = 1.0
left_weights[0, center_crop_left//4:center_center//4] = np.linspace(1, 0, center_center//4 - center_crop_left//4)
left_weights_map = np.tile(left_weights, (480//4, 1))
center_weights = np.zeros((1, w_new//4))
center_weights[0, center_crop_left//4:center_center//4] = np.linspace(0, 1, center_center//4 - center_crop_left//4)
center_weights[0, center_center//4:center_crop_right//4] = np.linspace(1, 0, center_crop_right//4 - center_center//4)
center_weights_map = np.tile(center_weights, (480//4, 1))
right_weights = np.zeros((1, w_new//4))
right_weights[0, center_crop_right//4:] = 1.0
right_weights[0, center_center//4:center_crop_right//4] = np.linspace(0, 1, center_crop_right//4 - center_center//4)
right_weights_map = np.tile(right_weights, (480//4, 1))
weights_map = [left_weights_map, center_weights_map, right_weights_map]
# take weighted three fix-sized crops
x_crops = [0, (w_new-640)//2, w_new-640]
pred_map_whole = np.zeros((480//4,w_new//4))
for (x_crop_i,x_crop) in enumerate(x_crops):
img_cropped = img[:, x_crop:x_crop+640]
pred_map_cur = np.zeros(pred_map_whole.shape)
# convert to tensor
img_cropped = data_transform(img_cropped).float()
real_img = img_cropped.unsqueeze(0).cuda()
pred_map = self(real_img)['locmap'].squeeze().detach().cpu().numpy() # hxw
# merge
x_crop = int(x_crop//4)
pred_map_cur[:, x_crop:x_crop+640//4] = pred_map
pred_map_whole += pred_map_cur * weights_map[x_crop_i]
# resize it back to image size
pred_map_whole = cv2.resize(pred_map_whole, None, fx=4/resize_factor, fy=4/resize_factor, interpolation=cv2.INTER_AREA)
return pred_map_whole
| jinsungit/hiddenfootprints | hiddenfootprints/model/networks.py | networks.py | py | 4,238 | python | en | code | 7 | github-code | 13 |
32078941550 | from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAG_URL = reverse('recipe:tags-list')
def create_recipe(user, **kwargs):
payload = {
'title': 'Test',
'price': 5.00,
'owner': user,
}
payload.update(kwargs)
return Recipe.objects.create(**payload)
def create_user(email='testmail@gmail.com', password='testpassword',
name='VitoScaletta'):
return get_user_model().objects.create_user(email, name, password)
def create_tag(user, name):
return Tag.objects.create(owner=user, name=name)
class PublicInteraction(TestCase):
'''Tests all anonymous interactions with the API'''
def setUp(self):
self.client = APIClient()
def test_cant_get_tag(self):
'''Tests that anonymous user can't view the tags'''
res = self.client.get(TAG_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_cant_create_tag(self):
'''Tests that anonymous user can't create the tag'''
res = self.client.post(TAG_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateInteraction(TestCase):
'''Tests all authorized interactions with the API'''
def setUp(self):
self.client = APIClient()
self.user = create_user()
self.client.force_authenticate(self.user)
def test_if_can_view(self):
'''Tests if the user can view tags'''
create_tag(self.user, 'Tag1')
create_tag(self.user, 'Tag2')
tags = Tag.objects.all().order_by('-name')
res = self.client.get(TAG_URL)
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_limited_for_one_user(self):
'''Tests if user can retrieve only his tags'''
create_tag(self.user, 'Tag1')
create_tag(self.user, 'Tag2')
other_tag = (
create_user('tony@gmail.com', 'test', 'testpassword'),
'Tag From Second User'
)
res = self.client.get(TAG_URL)
serializer = TagSerializer(other_tag)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertNotIn(serializer, res.data)
def test_if_tag_created(self):
'''
Tests if the tag can be created and the data
in this new tag matches the data of the request
'''
res = self.client.post(TAG_URL, {'name': 'Vegan'})
tag = Tag.objects.all().filter(name='Vegan', owner=self.user)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertTrue(tag.exists())
self.assertEqual('Vegan', str(tag[0]))
def test_invalid_tag_data(self):
'''Tests if the tag is created when invalid data provided'''
res = self.client.post(TAG_URL, {'name': ''})
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_can_filter_tags_by_recepies(self):
'''Tests if the user can filter tags assigned to recepies'''
tag_1 = create_tag(self.user, name='One')
tag_2 = create_tag(self.user, name='Two')
tag_3 = create_tag(self.user, name='Three')
recipe = create_recipe(self.user)
recipe.tags.add(tag_1, tag_2)
serializer_1 = TagSerializer([tag_2, tag_1], many=True)
serializer_2 = TagSerializer(tag_3)
res = self.client.get(TAG_URL, {'assigned': '1'})
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer_1.data, res.data)
self.assertNotIn(serializer_2.data, res.data)
def test_can_filter_tags_without_recepies(self):
'''
Tests if a user can retrieve all tags not assigned to any recepies
'''
tag_1 = create_tag(self.user, name='One')
tag_2 = create_tag(self.user, name='Two')
tag_3 = create_tag(self.user, name='Three')
recipe = create_recipe(self.user)
recipe.tags.add(tag_1, tag_2)
serializer_1 = TagSerializer([tag_2, tag_1], many=True)
serializer_2 = TagSerializer(
[tag_3, ],
many=True
)
res = self.client.get(TAG_URL, {'not_assigned': '1'})
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer_2.data, res.data)
self.assertNotIn(serializer_1.data, res.data)
def test_unique_filtering(self):
'''Tests if tags returned by filters are unique'''
tag_1 = create_tag(self.user, 'One')
recipe = create_recipe(self.user)
recipe_2 = create_recipe(self.user, title='another')
recipe.tags.add(tag_1)
recipe_2.tags.add(tag_1)
res = self.client.get(TAG_URL, {'assigned': '1'})
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
| samgans/Recipe-API | app/recipe/tests/test_tags.py | test_tags.py | py | 5,122 | python | en | code | 0 | github-code | 13 |
9523692362 | import os
import time
import datetime
import torch
import torch.utils.data
from opts import opts
import ref
from models.hg_3d_gan import Hourglass3DGAN
from utils.utils import adjust_learning_rate
from datasets.fusion import Fusion
from datasets.h36m import H36M
from datasets.mpii import MPII
from utils.logger import Logger
from train import train, val
def main():
opt = opts().parse()
now = datetime.datetime.now()
logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat()))
model = Hourglass3DGAN(opt)
# Shifted stuff to the model object
# criterion = torch.nn.MSELoss().cuda()
# optimizer = torch.optim.RMSprop(model.parameters(), opt.LR,
# alpha = ref.alpha,
# eps = ref.epsilon,
# weight_decay = ref.weightDecay,
# momentum = ref.momentum)
if opt.ratio3D < ref.eps:
val_loader = torch.utils.data.DataLoader(
MPII(opt, 'val', returnMeta = True),
batch_size = 1,
shuffle = False,
num_workers = int(ref.nThreads)
)
else:
val_loader = torch.utils.data.DataLoader(
H36M(opt, 'val'),
batch_size = 1,
shuffle = False,
num_workers = int(ref.nThreads)
)
if opt.test:
val(0, opt, val_loader, model)
return
train_loader = torch.utils.data.DataLoader(
Fusion(opt, 'train'),
batch_size = opt.trainBatch,
shuffle = True if opt.DEBUG == 0 else False,
num_workers = int(ref.nThreads)
)
for epoch in range(1, opt.nEpochs + 1):
loss_train, acc_train, mpjpe_train, loss3d_train = train(epoch, opt, train_loader, model)
logger.scalar_summary('loss_train', loss_train, epoch)
logger.scalar_summary('acc_train', acc_train, epoch)
logger.scalar_summary('mpjpe_train', mpjpe_train, epoch)
logger.scalar_summary('loss3d_train', loss3d_train, epoch)
if epoch % opt.valIntervals == 0:
loss_val, acc_val, mpjpe_val, loss3d_val = val(epoch, opt, val_loader, model)
logger.scalar_summary('loss_val', loss_val, epoch)
logger.scalar_summary('acc_val', acc_val, epoch)
logger.scalar_summary('mpjpe_val', mpjpe_val, epoch)
logger.scalar_summary('loss3d_val', loss3d_val, epoch)
torch.save(model.netG, os.path.join(opt.saveDir, 'gen-model_{}.pth'.format(epoch)))
torch.save(model.netD, os.path.join(opt.saveDir, 'dis-model_{}.pth'.format(epoch)))
logger.write('{:8f} {:8f} {:8f} {:8f} {:8f} {:8f} {:8f} {:8f} \n'.format(loss_train, acc_train, mpjpe_train, loss3d_train, loss_val, acc_val, mpjpe_val, loss3d_val))
else:
logger.write('{:8f} {:8f} {:8f} {:8f} \n'.format(loss_train, acc_train, mpjpe_train, loss3d_train))
# adjust_learning_rate(optimizer, epoch, opt.dropLR, opt.LR)
logger.close()
if __name__ == '__main__':
main()
| anuragmundhada/pose-hgreg-gan | src/main.py | main.py | py | 2,896 | python | en | code | 8 | github-code | 13 |
27622391650 | import asyncio
import time
import aiohttp
start_time = time.time()
url = 'https://fanyi.baidu.com/sug'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36 Edg/106.0.1370.37'
}
kw_list = ['cat', 'dog', 'mouse']
async def request(data):
async with aiohttp.ClientSession() as session:
async with await session.post(url=url, headers=headers, data=data) as response:
result = await response.json()
print(result)
tasks = []
for kw in kw_list:
data = {
'kw': kw
}
c = request(data)
task = asyncio.ensure_future(c)
tasks.append(task)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
end_time = time.time()
print(end_time - start_time)
| New-Heartbeat/spider-learn | 多任务/多任务协程爬虫实例.py | 多任务协程爬虫实例.py | py | 852 | python | en | code | 0 | github-code | 13 |
39443435063 | # -*- coding: utf-8 -*-
"""Display the driver database as a table."""
import PySide2.QtWidgets as QtWidgets
import PySide2.QtCore as QtCore
import PySide2.QtGui as QtGui
from . import config
from ..lib.driver import Driver
class DriverDatabaseFrame(QtWidgets.QWidget):
"""Display, sort, filter, etc the database of availabe drive units."""
new_manufacturer_added = QtCore.Signal(set)
def __init__(self):
"""Initialize database frame."""
QtWidgets.QWidget.__init__(self)
self.table_widget = QtWidgets.QTableWidget(self)
self.table_widget.setSortingEnabled(True)
labels = ["Manufacturer", "Model", "d [in]", "Fs [Hz]", u"Vas [m³]",
u"Sd [m²]", "Qts", "Qes", "xmax [mm]", "m [kg]",
"P (AES) [W]"]
self.table_widget.setColumnCount(len(labels))
self.table_widget.setHorizontalHeaderLabels(labels)
# populate table
for driver in config.driver_db:
self.add_driver_entry(driver)
add_driver_button = QtWidgets.QPushButton(self)
add_driver_button.setIcon(QtGui.QIcon.fromTheme('list-add'))
add_driver_button.setText("Add new driver")
add_driver_button.clicked.connect(self.add_driver)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(add_driver_button, stretch=0)
vbox.addWidget(self.table_widget)
self.setLayout(vbox)
def add_driver_entry(self, driver):
"""Add a new driver entry to the QTableWidget.
Args:
driver : driver to add to the table
"""
rows = self.table_widget.rowCount()
self.table_widget.setRowCount(rows+1)
items = []
items.append(QtWidgets.QTableWidgetItem(driver.manufacturer))
items.append(QtWidgets.QTableWidgetItem(driver.model))
items.append(QtWidgets.QTableWidgetItem("{0:4g}".format(driver.diameter)))
items.append(QtWidgets.QTableWidgetItem("{0:4g}".format(driver.fs)))
items.append(QtWidgets.QTableWidgetItem("{0:4g}".format(driver.Vas)))
items.append(QtWidgets.QTableWidgetItem("{0:4g}".format(driver.Sd)))
items.append(QtWidgets.QTableWidgetItem("{0:4g}".format(driver.Qts)))
items.append(QtWidgets.QTableWidgetItem("{0:4g}".format(driver.Qes)))
items.append(QtWidgets.QTableWidgetItem("{0:4g}".format(1e3*driver.xmax)))
items.append(QtWidgets.QTableWidgetItem("{0:4g}".format(driver.weight)))
items.append(QtWidgets.QTableWidgetItem("{0:4g}".format(driver.power)))
for i, item in enumerate(items):
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
self.table_widget.setItem(rows, i, item)
def add_driver(self):
"""Dialog for adding a new driver to the database."""
self.add_driver_dialog = QtWidgets.QDialog()
# Driver general specification
general_info = QtWidgets.QGroupBox("General Specification")
info_form = QtWidgets.QFormLayout()
info_form.setFieldGrowthPolicy(QtWidgets.QFormLayout.FieldsStayAtSizeHint)
manuf_label = QtWidgets.QLabel()
manuf_label.setText("Manufacturer")
self.manuf_line = QtWidgets.QLineEdit()
model_label = QtWidgets.QLabel()
model_label.setText("Model")
self.model_line = QtWidgets.QLineEdit()
diameter_label = QtWidgets.QLabel()
diameter_label.setText("Diameter")
self.diameter_box = QtWidgets.QDoubleSpinBox()
self.diameter_box.setSuffix(' "')
self.diameter_box.setRange(0.5, 40.0)
weight_label = QtWidgets.QLabel()
weight_label.setText("Net Weight")
self.weight_box = QtWidgets.QDoubleSpinBox()
self.weight_box.setSuffix(" kg")
self.weight_box.setRange(0.1, 40.0)
power_label = QtWidgets.QLabel()
power_label.setText("AES Power Handling")
self.power_box = QtWidgets.QDoubleSpinBox()
self.power_box.setSuffix(" W")
self.power_box.setRange(1.0, 4000.0)
info_form.addRow(manuf_label, self.manuf_line)
info_form.addRow(model_label, self.model_line)
info_form.addRow(diameter_label, self.diameter_box)
info_form.addRow(weight_label, self.weight_box)
info_form.addRow(power_label, self.power_box)
general_info.setLayout(info_form)
# Thiele/Small parameters
ts_info = QtWidgets.QGroupBox("Thiele/Small Parameters")
ts_form = QtWidgets.QFormLayout()
fs_label = QtWidgets.QLabel()
fs_label.setText("Resonance Frequency: fs")
self.fs_box = QtWidgets.QDoubleSpinBox()
self.fs_box.setSuffix(" Hz")
self.fs_box.setRange(10.0, 2e4)
Qts_label = QtWidgets.QLabel()
Qts_label.setText("Total Q of Driver at fs: Qts")
self.Qts_box = QtWidgets.QDoubleSpinBox()
self.Qts_box.setRange(0.0, 1.0)
Sd_label = QtWidgets.QLabel()
Sd_label.setText("Diaphragm Area: Sd")
self.Sd_box = QtWidgets.QDoubleSpinBox()
self.Sd_box.setSuffix(u" cm²")
self.Sd_box.setRange(0.0, 1e3)
xmax_label = QtWidgets.QLabel()
xmax_label.setText("Maximum linear peak excursion: xmax")
self.xmax_box = QtWidgets.QDoubleSpinBox()
self.xmax_box.setSuffix(" mm")
self.xmax_box.setRange(0.0, 20.0)
Vas_label = QtWidgets.QLabel()
Vas_label.setText("Equivalent Compliance Volume: Vas")
self.Vas_box = QtWidgets.QDoubleSpinBox()
self.Vas_box.setSuffix(" l")
self.Vas_box.setRange(0.0, 1e3)
ts_form.addRow(fs_label, self.fs_box)
ts_form.addRow(Qts_label, self.Qts_box)
ts_form.addRow(Sd_label, self.Sd_box)
ts_form.addRow(xmax_label, self.xmax_box)
ts_form.addRow(Vas_label, self.Vas_box)
ts_info.setLayout(ts_form)
# Accept/cancel buttons
buttons_hbox = QtWidgets.QHBoxLayout()
accept_button = QtWidgets.QPushButton(self)
accept_button.setIcon(QtGui.QIcon.fromTheme('dialog-apply'))
accept_button.setText("Accept")
accept_button.clicked.connect(self.write_driver_to_db)
cancel_button = QtWidgets.QPushButton(self)
cancel_button.setIcon(QtGui.QIcon.fromTheme('gtk-close'))
cancel_button.setText("Cancel")
cancel_button.clicked.connect(self.add_driver_dialog.reject)
buttons_hbox.addWidget(cancel_button)
buttons_hbox.addWidget(accept_button)
# putting it together
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(general_info)
vbox.addWidget(ts_info)
vbox.addLayout(buttons_hbox)
self.add_driver_dialog.setLayout(vbox)
self.add_driver_dialog.exec_()
def write_driver_to_db(self):
"""Add the newly created driver to the database."""
new_driver = Driver(self.manuf_line.text(), self.model_line.text())
new_driver.diameter = self.diameter_box.value()
new_driver.power = self.power_box.value()
new_driver.weight = self.weight_box.value()
new_driver.fs = self.fs_box.value()
new_driver.Vas = self.Vas_box.value()/1e3 # l to m³
new_driver.Qts = self.Qts_box.value()
new_driver.Sd = self.Sd_box.value()/1e4 # cm² to m²
new_driver.xmax = self.xmax_box.value()/1e3 # mm to m
config.driver_db.append(new_driver)
config.driver_db.write_to_disk(config.local_db_fname)
self.add_driver_entry(new_driver)
if new_driver.manufacturer not in config.driver_db.manufacturers:
config.driver_db.manufacturers.add(new_driver.manufacturer)
self.new_manufacturer_added.emit(config.driver_db.manufacturers)
self.add_driver_dialog.accept()
| Psirus/altai | altai/gui/driver_db_frame.py | driver_db_frame.py | py | 7,756 | python | en | code | 0 | github-code | 13 |
5609788008 | from sklearn.cluster import DBSCAN
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
import logging
import time
import sys
import numpy as np
from scripts.clustering.news import News
from scripts.clustering.util import *
start_time = time.time()
# Files
dataset = '/data/kasandra/year/all.normalized.json'
result_base = '/data/kasandra/year/result'
min_samples = 50
year = 2016
eps_step = 0.1
eps_start = 1
eps_end = 2 + eps_step
log_path = '/data/logs/%s.dbscan.logs' % str(year)
# Первая неделя марта
(mart_start, mart_end) = (1425157200000, 1427835600000)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] %(levelname)-5s %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
fh = logging.FileHandler(log_path)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
root.addHandler(fh)
logging.info("log file name: %s" % log_path)
logging.info("min_samples: %s" % min_samples)
logging.info("Start load news...")
news = []
with open(dataset, encoding="utf8") as f:
for line in f:
news.append(News.from_json(line))
words = []
for n in news:
words.extend(n.content.split())
logging.info("Start couting news...")
counts = Counter(words)
one_time = [k for k, v in dict(counts).items() if v == 1]
logging.info("total words in dataset: %s" % (len(words) - len(one_time)))
one_time_words = set(one_time)
mart_news = list(filter(lambda x: x.date > mart_start and x.date < mart_end, news))
mart_content = [filter_words(x.content) for x in mart_news]
logging.info(
"count news range: %s, from: %s, to: %s" % (len(mart_content), millis_to_str(mart_start), millis_to_str(mart_end)))
start_vectorize = time.time()
logging.info("Start vectorization...")
tfidf_vectorizer = TfidfVectorizer(use_idf=True, tokenizer=lambda text: text.split(" "), stop_words=one_time_words,
max_df=0.5, min_df=2, norm='l2') # , ngram_range=(1, 3)
tfidf_matrix = tfidf_vectorizer.fit_transform(mart_content)
logging.info("vocabulary size: %s, vectorize time: %s s" % (tfidf_matrix.shape[1], time.time() - start_vectorize))
for eps in np.arange(eps_start, eps_end, eps_step):
result_path = result_base + '/%s.%s.dbscan.json' % (str(year), str(eps))
start_clustering = time.time()
logging.info("Start clustering for eps: %s ..." % eps)
db = DBSCAN(eps=eps, min_samples=min_samples).fit(tfidf_matrix)
labels = db.labels_
logging.info("clustering time: %s s" % (time.time() - start_clustering))
logging.info("Start save result...")
save_clusters(mart_news, labels, result_path)
logging.info("End clustering for eps: %s ..." % eps)
logging.info("End script, total time: %s s" % (time.time() - start_time))
| jaitl/kasandra-rus | kasandra_nlp/scripts/clustering/dbscan.py | dbscan.py | py | 2,867 | python | en | code | 1 | github-code | 13 |
6748551084 | from types import ModuleType
from typing import List, Optional, Callable, Union, Dict
from importlib import import_module
from flask import Flask, Blueprint
from flask_jsonrpc import JSONRPC
class AutoBluePrint(object):
def __init__(self, app: Optional[Flask] = None, jsonrpc: Optional[JSONRPC] = None):
if app:
self.init_app(app, jsonrpc)
def init_app(self, app: Flask, jsonrpc: JSONRPC):
"""自动注册蓝图"""
# 从配置文件中读取需要注册到项目中的蓝图路径信息
blueprint_path_list: List = app.config.get("INSTALL_BLUEPRINTS", [])
# 从配置文件中读取总路由模块
url_root_path: str = app.config.get("URL_ROOT_PATH", "application.urls")
# 总路由模块
url_root_module: ModuleType = import_module(url_root_path)
# 总路由列表
if not hasattr(url_root_module, "urlpatterns"):
message: str = "总路由文件 URL_ROOT_PATH,没有路由列表!请在总路由文件中设置 urlpatterns 路由列表"
app.logger.error(message)
raise Exception(message)
root_urlpatterns: List = url_root_module.urlpatterns
# 遍历蓝图路径列表,对每一个蓝图进行初始化
for blueprint_path in blueprint_path_list:
# 获取蓝图路径中最后一段的包名作为蓝图的名称
blueprint_name: str = blueprint_path.split(".")[-1]
# 给当前蓝图目录创建一个蓝图对象
blueprint: Blueprint = Blueprint(blueprint_name, blueprint_path)
# 蓝图路由的前缀
url_prefix: str = ""
# 蓝图下的子路由列表
urlpatterns: List = []
# 获取蓝图的父级目录,目的是为了拼接总路由中所有蓝图下的urls子路由文件的路径
blueprint_father_path: str = ".".join(blueprint_path.split(".")[:-1])
# 循环总路由列表
for item in root_urlpatterns:
# 判断当前蓝图是否有注册到总路由中提供对外访问,如果没有把蓝图注册到总路由中,则无法被外界访问。
if blueprint_name in item["blueprint_url_subffix"]:
# 导入当前蓝图下的子路由模块
urls_module: ModuleType = import_module(f"{blueprint_father_path}.{item['blueprint_url_subffix']}")
# 获取子路由文件中的路由列表
urlpatterns: List = getattr(urls_module, "urlpatterns", [])
apipatterns: List = getattr(urls_module, "apipatterns", [])
# 提取蓝图路由的前缀
url_prefix = item["url_prefix"]
# 把urlpatterns的每一个路由信息添加注册到蓝图对象里面
for url in urlpatterns:
blueprint.add_url_rule(**url)
for api in apipatterns:
api["name"] = f"{url_prefix[1:].title()}.{api['rule']}" # Home.menu
jsonrpc.register_view_function(**api)
break
try:
# 让蓝图自动发现模型模块
import_module(f"{blueprint_path}.models")
except ModuleNotFoundError:
pass
# 最后把蓝图对象注册到app实例对象
# url_prefix 是地址前缀,将来我们将来实现一个总路由来声明它
app.register_blueprint(blueprint, url_prefix=url_prefix)
# def path(rule: str, name: Union[Callable, str], **kwargs) -> Dict:
# """绑定url地址和视图的映射关系"""
# if isinstance(name, Callable):
# # 子路由
# return {"rule": rule, "view_func": name, **kwargs}
# elif isinstance(name, str):
# # 总路由
# return {"url_prefix": rule, "blueprint_url_subffix": name, **kwargs}
# else:
# return {}
def path(rule: str, view_func: Callable, **kwargs) -> Dict:
"""绑定url地址和视图的映射关系,和参数名对应上"""
return {"rule": rule, "view_func": view_func, **kwargs}
def include(url_prefix: str, blueprint_url_subffix: str) -> Dict:
"""
绑定路由前缀和蓝图的映射关系(为了让总路由像Django一样注册)
:param url_prefix: 路由前缀
:param blueprint_url_subffix: 蓝图名称,
格式:蓝图包名.路由模块名
例如:蓝图目录是home, 路由模块名是urls,则参数:home.urls
:return: Dict
"""
return {"url_prefix": url_prefix, "blueprint_url_subffix": blueprint_url_subffix}
| HkwJsxl/yingmingapi | application/utils/blueprint.py | blueprint.py | py | 4,708 | python | en | code | 0 | github-code | 13 |
16136917294 | """
=================
spectral analysis
=================
"""
# imports
import mne
import numpy as np
import pandas as pd
import pickle
import os.path as op
from mne.time_frequency import psd_welch
def calculatePSD(path,
subjects,
tasks,
freqs,
n_overlap,
n_fft=1000,
n_job=1):
"""
run spectral analysis using Welch’s method with a Hanning window of 1s with 50% overlap.
Paremeters
----------
path : str
path to clean data
freqs : dict
"""
psd_unaggregated = {}
psd_total = pd.DataFrame()
for n_sub in subjects:
psd_aggregated = {}
for task in tasks:
epo_name = f'sub-{n_sub}_ses-01_task-{task}_proc-clean_epo.fif'
dir = op.join(path, epo_name)
# open clean epochs
epochs = mne.read_epochs(dir)
# calculate psd for broadbands and all channels
psds, _ = psd_welch(epochs,
fmin=1,
fmax=40,
picks='all',
n_fft=n_fft,
n_overlap=n_overlap,
n_jobs=n_job)
psd_unaggregated[f'{n_sub}-{task}'] = psds
# freq_dict[f'{n_sub}-{task}'] = freqs
# transform
psd_transformed = 10. * np.log10(psds)
# aggregate over the epoch dimention
psd_transformed = psd_transformed.mean(0)
# calculate psds for different frequency bands across different brain areas
ch_nam = epochs.ch_names
ba_list = _patch_brain()
for key in ba_list.keys():
channels = ba_list[key]
temp1 = [psd_transformed[ch_nam.index(i)] for i in channels] # sift psd of relevant channels out
# aggregate over different frequency bands
for k, v in freqs.items():
temp2 = [temp1[i][v[0]:v[1]] for i in range(len(temp1))] # TODO change this code: depending on the
# parameters of psd_welch it would malfunction! I should use something like this:
# temp2 = temp1[:, np.where((freqs[k][0] <= psd_freq) & (psd_freq <= freqs[k][1]) == True)[0]]
# where psd freq is the frequency vector from psd_welch
temp3 = np.array(temp2)
psd_aggregated[f'{key}-{k}'] = temp3.mean(0).mean(0)
psd_df = pd.DataFrame(psd_aggregated, index=[f'{n_sub}-{task}'])
psd_total = psd_total.append(psd_df)
# save
psd_total.to_csv('docs/psds_2nd_analysis.csv')
with open('psd_unaggragated_2nd_analysis.pkl', 'wb') as handle:
pickle.dump(psd_unaggregated, handle)
def _patch_brain():
brain_areas = {
'LF': ['Fp1', 'F3', 'F7', 'AF3', 'F1', 'F5', 'FT7'],
'LC': ['C3', 'T7', 'FC1', 'FC3', 'FC5', 'C1', 'C5'],
'LP': ['P3', 'P7', 'CP1', 'CP3', 'CP5', 'TP7', 'P1', 'P5'],
'LO': ['O1', 'PO3'],
'RF': ['Fp2', 'F4', 'F8', 'AF4', 'F2', 'F6', 'FT8'],
'RC': ['C4', 'T8', 'FC2', 'FC4', 'FC6', 'C2', 'C6'],
'RP': ['P4', 'P8', 'CP2', 'CP4', 'CP6', 'TP8', 'P2', 'P6'],
'RO': ['O2', 'PO4'],
'FZ': ['Fpz', 'Fz'],
'CZ': ['Cz', 'CPz', 'FCz'],
'PZ': ['Pz', 'POz'],
'OZ': ['Oz', 'Iz'],
'all': ['Fp1',
'Fp2',
'F3',
'F4',
'C3',
'C4',
'P3',
'P4',
'O1',
'O2',
'F7',
'F8',
'T7',
'T8',
'P7',
'P8',
'Fpz',
'Fz',
'Cz',
'CPz',
'Pz',
'POz',
'Oz',
'Iz',
'AF3',
'AF4',
'F1',
'F2',
'F5',
'F6',
'FC1',
'FC2',
'FC3',
'FC4',
'FC5',
'FC6',
'FT7',
'FT8',
'C1',
'C2',
'C5',
'C6',
'CP1',
'CP2',
'CP3',
'CP4',
'CP5',
'CP6',
'TP7',
'TP8',
'P1',
'P2',
'P5',
'P6',
'PO3',
'PO4']}
return brain_areas
def extract_psds_freatures(path='docs/1.psd_unaggragated_2nd_analysis.pkl',
n_epochs=60):
with open(path, 'rb') as handle:
psds_unagg = pickle.load(handle)
features = {}
for k, v in psds_unagg.items():
features[k+'_start_allbroadband'] = v[:n_epochs].mean()
features[k+'_end_allbroadband'] = v[-n_epochs:].mean()
# features[k] = v.mean(0)
features_csv = pd.DataFrame.from_dict(features)
features_csv.to_csv('start_end_features.csv')
return features
| Yeganehfrh/SuggNet | src/sugnet/preprocessing/spectral_analysis.py | spectral_analysis.py | py | 5,828 | python | en | code | 1 | github-code | 13 |
16140887922 | from users.models import Customer
from django.contrib.auth.models import User
from django.shortcuts import render
from .models import OrderItem
from .forms import OrderCreateForm, CustomerCreateForm, UserCreateForm
from django.forms import modelformset_factory
from cart.cart import Cart
def order_create(request):
cart = Cart(request)
if request.method == 'POST':
form = OrderCreateForm(request.POST)
user_form = UserCreateForm(request.POST)
customer_create_form = CustomerCreateForm(request.POST)
if form.is_valid()and customer_create_form.is_valid():
#Commit the form to get the data
user = user_form.save(commit=False)
customer = customer_create_form.save(commit=False)
order = form.save(commit=False)
#Set the field value
user.username = user.email.split('@')[0]
user.save()
Customer.objects.create(user=user, address=customer.address,postal_code=customer.postal_code, city=customer.city )
order.customer = user
order.save()
for item in cart:
OrderItem.objects.create(order=order,
product=item['product'],
price=item['price'],
quantity=item['quantity'])
# clear the cart
cart.clear()
return render(request,
'order/created.html',
{'order':order,'customer':customer,'user':user})
else:
form = OrderCreateForm()
user_form = UserCreateForm()
customer_create_form = CustomerCreateForm()
return render(request,
'order/create.html',
{'cart': cart,
'form': form,
'customer_create_form':customer_create_form,
'user_form':user_form})
| acor8826/phoenix | orders/views.py | views.py | py | 1,952 | python | en | code | 0 | github-code | 13 |
39732155451 | filename = "name.txt"
# Dosya açılır ve her satırı lines dizisine okunur.
with open(filename) as f:
lines = f.readlines()
# İsim, soyisim ve yaş listeleri oluşturulur.
name_list = []
surname_list = []
age_list = []
# Her satır için kelimelere ayrılır ve uygun listelere eklenir.
for line in lines:
elements = line.split()
name_list.append(elements[0]) # 0 index Name olarak geçiyor,
surname_list.append(elements[1]) # 1 index Surname olarak geçiyor,
age_list.append(elements[2]) # 2 index Age olarak geçiyor.
# Verileri ekrana yazdırır.
print(name_list, surname_list, age_list)
| musaninsopasi/name | name.py | name.py | py | 648 | python | tr | code | 0 | github-code | 13 |
38380911963 | import networkx as nx
import string
def parse(input_data):
val_map = {k: v for k, v in zip(string.ascii_lowercase, range(26))}
val_map['S'] = 0
val_map['E'] = 25
edges = dict()
potential_starting_pts = [] # for pt 2
# Parse input into list of lists
grid = [
[step for step in list(line.strip())] for line in input_data.splitlines()
]
# max rows and cols
nr = len(grid)
nc = len(grid[0])
# for each node, find a list of all valid neighbors
for r, row in enumerate(grid):
for c, col in enumerate(row):
if col == "S":
start = (r, c)
h = 0
elif col == 'E':
end = (r, c)
h = 26
else:
h = val_map[col]
# for part 2
if col == 'a':
potential_starting_pts.append((r, c))
potential_neighbors = [(r+1, c), (r-1, c), (r, c-1), (r, c+1)]
valid_neighbors = []
for neighbor in potential_neighbors:
# check if it's in bounds
if neighbor[0] >= 0 and neighbor[0] < nr and neighbor[1] >= 0 and neighbor[1] < nc:
# check if step up is within one letter of elevation
if val_map[grid[neighbor[0]][neighbor[1]]] - h <= 1:
valid_neighbors.append(neighbor)
edges[(r, c)] = valid_neighbors
return edges, start, end, potential_starting_pts
def solve1(input_data: str) -> int:
edges, start, end, _ = parse(input_data)
# create a directed graph from the dict of edges
graph = nx.DiGraph(edges)
# use networkx to find the shortest path
return nx.shortest_path_length(graph, source=start, target=end)
def solve2(input_data):
edges, _, end, starts = parse(input_data)
# create a directed graph from the dict of edges
graph = nx.DiGraph(edges)
path_lengths = []
for start in starts:
try:
path_lengths.append(nx.shortest_path_length(graph, source=start, target=end))
except: # no path found
pass
return min(path_lengths)
if __name__ == '__main__':
from aocd.models import Puzzle
import networkx as nx
sample_data = """Sabqponm
abcryxxl
accszExk
acctuvwj
abdefghi"""
puzzle = Puzzle(2022, 12)
assert solve1(sample_data) == 31
assert solve2(sample_data) == 29
answer_1 = solve1(puzzle.input_data)
print(answer_1)
puzzle.answer_a = answer_1
answer_2 = solve2(puzzle.input_data)
print(answer_2)
puzzle.answer_b = answer_2 | mharty3/advent_of_code | 2022/day-12.py | day-12.py | py | 2,704 | python | en | code | 0 | github-code | 13 |
69892974099 | import csv
import sys
import json
import logging
import argparse
from collections import defaultdict
from flask import Flask, render_template, request
app = Flask(__name__)
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%Y/%m/%d %H:%M:%S",
level=logging.INFO,
)
csv.register_dialect(
"csv", delimiter=",", quoting=csv.QUOTE_MINIMAL, quotechar='"', doublequote=True,
escapechar=None, lineterminator="\n", skipinitialspace=False,
)
def read_csv(file, dialect, write_log=True):
if write_log:
logger.info(f"Reading {file}")
with open(file, "r", encoding="utf8", newline="") as f:
reader = csv.reader(f, dialect=dialect)
row_list = [row for row in reader]
if write_log:
rows = len(row_list)
logger.info(f"Read {rows:,} rows")
return row_list
def get_global_data():
geneid_data_file = "geneid_commonname.csv"
relation_data_file = "commonname_relation.csv"
geneid_data = read_csv(geneid_data_file, "csv")
relation_data = read_csv(relation_data_file, "csv")
geneid_header, geneid_data = geneid_data[0], geneid_data[1:]
relation_header, relation_data = relation_data[0], relation_data[1:]
assert geneid_header == [
"plant", "GeneID", "CommonName",
"alias_GeneIDs_in_the_sentence", "pmid", "sentence",
]
assert relation_header == [
"head", "relation", "tail",
"head_entity", "head_type",
"tail_entity", "tail_type",
"simple", "pmid", "sentence",
]
plant_to_geneid = defaultdict(lambda: set())
geneid_commonname_pmid = defaultdict(lambda: defaultdict(lambda: set()))
commonname_to_relation = defaultdict(lambda: set())
for plant, geneid, commonname, _alias_geneids_in_the_sentence, pmid, _sentence in geneid_data:
plant_to_geneid[plant].add(geneid)
geneid_commonname_pmid[geneid][commonname].add(pmid)
for relation_datum in relation_data:
relation_datum = tuple(relation_datum)
(
head, relation, tail,
head_entity, head_type,
tail_entity, tail_type,
simple, pmid, sentence,
) = relation_datum
if head_type == "CommonName":
commonname_to_relation[head_entity].add(relation_datum)
if tail_type == "CommonName":
commonname_to_relation[tail_entity].add(relation_datum)
plant_to_geneid = {
plant: sorted(geneid_set)
for plant, geneid_set in plant_to_geneid.items()
}
return plant_to_geneid, geneid_commonname_pmid, commonname_to_relation
plant_to_geneid, geneid_commonname_pmid, commonname_to_relation = get_global_data()
@app.route("/")
def home():
return render_template("home.html")
@app.route("/run_load_gene_list", methods=["POST"])
def run_load_gene_list():
data = json.loads(request.data)
plant = data["plant"]
geneid_list = plant_to_geneid[plant]
response = {
"geneid_list": geneid_list
}
return json.dumps(response)
@app.route("/run_generate_graph", methods=["POST"])
def run_generate_graph():
data = json.loads(request.data)
type_to_color = {
"GeneID": "#d5abff", # 270°, 33%, 100% violet
"CommonName": "#abffff", # 180°, 33%, 100% cyan
"Compound": "#d5ffab", # 90°, 33%, 100% yellow-green
"Species": "#ffffab", # 60°, 33%, 100% yellow
"Location": "#ffd5ab", # 30°, 33%, 100% orange
"Process": "#ffabab", # 0°, 33%, 100% red
}
node_list = [
{"id": -1, "label": "GeneID", "color": type_to_color["GeneID"]},
{"id": -2, "label": "CommonName", "color": type_to_color["CommonName"]},
{"id": -3, "label": "CommonName", "color": type_to_color["CommonName"]},
{"id": -4, "label": "Compound", "color": type_to_color["Compound"]},
{"id": -5, "label": "Species", "color": type_to_color["Species"]},
{"id": -6, "label": "Location", "color": type_to_color["Location"]},
{"id": -7, "label": "Process", "color": type_to_color["Process"]},
]
edge_list = [
{"from": -1, "to": -2},
{"from": -2, "to": -3},
{"from": -2, "to": -4},
{"from": -2, "to": -5},
{"from": -2, "to": -6},
{"from": -2, "to": -7},
]
name_to_nid = {}
pair_to_label = defaultdict(lambda: [])
pair_to_width = defaultdict(lambda: 0)
# GeneID
geneid = data["geneid"]
name_to_nid[geneid] = 0
node_list.append({"id": 0, "label": geneid, "color": type_to_color["GeneID"]})
edge_list.append({"from": 0, "to": -1})
# CommonName
commonname_to_pmid_set = geneid_commonname_pmid.get(geneid, {})
for commonname, pmid_set in commonname_to_pmid_set.items():
nid = name_to_nid.get(commonname, None)
if nid is None:
nid = len(node_list)
name_to_nid[commonname] = nid
node_list.append({"id": nid, "label": commonname, "color": type_to_color["CommonName"]})
for pmid in pmid_set:
# edge_list.append({"from": 0, "to": nid, "label": f"PMID{pmid}"})
# edge_list.append({"to": 0, "from": nid})
pair_to_width[(0, nid)] += 1
# Entity relations
for commonname in commonname_to_pmid_set:
for relation_datum in commonname_to_relation.get(commonname, []):
(
head, relation, tail,
head_entity, head_type,
tail_entity, tail_type,
simple, pmid, sentence,
) = relation_datum
# head node
head_nid = name_to_nid.get(head_entity, None)
if head_nid is None:
head_nid = len(node_list)
name_to_nid[head_entity] = head_nid
node_list.append({"id": head_nid, "label": head_entity, "color": type_to_color[head_type]})
# tail node
tail_nid = name_to_nid.get(tail_entity, None)
if tail_nid is None:
tail_nid = len(node_list)
name_to_nid[tail_entity] = tail_nid
node_list.append({"id": tail_nid, "label": tail_entity, "color": type_to_color[tail_type]})
# CommonName -> Entity edge
if head_type == "CommonName":
if tail_type == "CommonName":
from_nid, to_nid = sorted((head_nid, tail_nid))
else:
from_nid, to_nid = head_nid, tail_nid
else:
from_nid, to_nid = tail_nid, head_nid
if simple == "T":
# edge_list.append({"from": head_nid, "to": tail_nid, "label": f"PMID{pmid}: {relation}"})
# edge_list.append({"from": head_nid, "to": tail_nid, "label": relation})
pair_to_label[(from_nid, to_nid)].append(relation)
pair_to_width[(from_nid, to_nid)] += 1
else:
# edge_list.append({"from": head_nid, "to": tail_nid, "label": f"PMID{pmid}"})
# edge_list.append({"from": head_nid, "to": tail_nid})
pair_to_width[(from_nid, to_nid)] += 1
for pair, width in pair_to_width.items():
from_nid, to_nid = pair
label = "\n".join(pair_to_label[pair])
edge_list.append({"from": from_nid, "to": to_nid, "width": width, "label": label})
response = {
"node_list": node_list,
"edge_list": edge_list,
}
return json.dumps(response)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-host", default="0.0.0.0")
parser.add_argument("-port", default="12345")
arg = parser.parse_args()
app.run(host=arg.host, port=arg.port)
return
if __name__ == "__main__":
main()
sys.exit()
| jacobvsdanniel/plant_ner_spacy | geneid-commonname-relation-visualization/server.py | server.py | py | 7,816 | python | en | code | 0 | github-code | 13 |
33556041886 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import sys
import ctypes
import PIL.ImageGrab
# from . import windows
# from . import util
# from .keyboard_hook import KeyboardHook
if sys.platform != 'win32':
import platform
raise Exception('Invalid platform: %s (%s)' % (sys.platform, platform.platform()))
def screenshot():
return PIL.ImageGrab.grab()
class Mouse:
def __init__(self):
pass
@staticmethod
def move(x, y, width=1280, height=720, x_offset=0, y_offset=0, is_relative=False):
# ctypes.windll.user32.SetCursorPos(x, y)
extra = ctypes.c_ulong(0)
input_type = _InputType()
dwFlags = Mouse.Event.MOVE
if not is_relative:
dwFlags |= Mouse.Event.ABSOLUTE
x = 1 + int((x+x_offset) * 65536 / width)
y = 1 + int((y+y_offset) * 65536 / height)
input_type.mi = _MouseInput(x, y, 0, dwFlags, 0, ctypes.pointer(extra))
command = _Input(ctypes.c_ulong(0), input_type)
ctypes.windll.user32.SendInput(1, ctypes.pointer(command), ctypes.sizeof(command))
@staticmethod
def move_on_foreground_window(x, y, width=1920, height=1080, is_relative=False):
rect = get_foreground_window_rect()
Mouse.move(x+rect.left, y+rect.top, width=width, height=height, is_relative=is_relative)
# Mouse.move(x, y, width, height)
@staticmethod
def click(event):
ctypes.windll.user32.mouse_event(event, 0, 0, 0, 0)
@staticmethod
def perform_click():
Mouse.click(0x0002)
Mouse.click(0x0004)
@staticmethod
def get_cursor_pos():
point = _Point()
if ctypes.windll.user32.GetCursorPos(ctypes.pointer(point)):
return (point.x, point.y)
else:
return (0, 0)
class Event:
"""
https://docs.microsoft.com/ko-kr/windows/win32/api/winuser/nf-winuser-mouse_event
"""
ABSOLUTE = 0x8000
LEFT_DOWN = 0x0002
LEFT_UP = 0x0004
MIDDLE_DOWN = 0x0020
MIDDLE_UP = 0x0040
MOVE = 0x0001
RIGHT_DOWN = 0x0008
RIGHT_UP = 0x0010
WHEEL = 0x0800
X_DOWN = 0x0080
X_UP = 0x0100
class _KeyboardInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", ctypes.POINTER(ctypes.c_ulong))]
class _HardwardInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class _MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", ctypes.POINTER(ctypes.c_ulong))]
class _InputType(ctypes.Union):
_fields_ = [("ki", _KeyboardInput),
("mi", _MouseInput),
("hi", _HardwardInput)]
class _Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", _InputType)]
class _Point(ctypes.Structure):
_fields_ = [("x", ctypes.c_ulong),
("y", ctypes.c_ulong)]
class _MSLLHOOKSTRUCT(ctypes.Structure):
_fields_ = [("pt", _Point),
("mouseData", ctypes.c_ulong), # ctypes.wintypes.DWORD
("flags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", ctypes.POINTER(ctypes.c_ulong))]
"""
class RAWINPUTDEVICE(ctypes.Structure):
from ctypes.wintypes import HWND
_fields_ = [("usUsagePage", ctypes.c_ushort),
("usUsage", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong), # == ctypes.wintypes.DWORD
("hwndTarget", HWND)]
"""
class Keyboard:
class Key:
N1 = 0x02
Q = 0x10
E = 0x12
W = 0x11
A = 0x1E
S = 0x1F
D = 0x20
R = 0x13
SHIFT = 0x2A
SPACE = 0x39
@staticmethod
def click(key_code):
Keyboard.press(key_code)
Keyboard.release(key_code)
@staticmethod
def press(key_code):
extra = ctypes.c_ulong(0)
input_type = _InputType()
input_type.ki = _KeyboardInput(0, key_code, 0x0008, 0, ctypes.pointer(extra))
key = _Input(ctypes.c_ulong(1), input_type)
ctypes.windll.user32.SendInput(1, ctypes.pointer(key), ctypes.sizeof(key))
@staticmethod
def release(key_code):
extra = ctypes.c_ulong(0)
input_type = _InputType()
input_type.ki = _KeyboardInput(0, key_code, 0x0008 | 0x0002, 0, ctypes.pointer(extra))
key = _Input(ctypes.c_ulong(1), input_type)
ctypes.windll.user32.SendInput(1, ctypes.pointer(key), ctypes.sizeof(key))
def get_foreground_window_title():
lib = ctypes.windll.user32
handle = lib.GetForegroundWindow()
buffer = ctypes.create_unicode_buffer(255)
lib.GetWindowTextW(handle, buffer, ctypes.sizeof(buffer))
return buffer.value
class _Rect(ctypes.Structure):
_fields_ = [("left", ctypes.c_long),
("top", ctypes.c_long),
("right", ctypes.c_long),
("bottom", ctypes.c_long)]
def get_foreground_window_rect():
lib = ctypes.windll.user32
handle = lib.GetForegroundWindow()
rect = _Rect()
lib.GetWindowRect(handle, ctypes.pointer(rect))
return rect
def get_foreground_window_grab():
"""Set Display Resolution to 1920x1080 and Size to 100%."""
rect = get_foreground_window_rect()
return PIL.ImageGrab.grab(bbox=(rect.left, rect.top, rect.right, rect.bottom))
| rapsealk/win32py | win32py/__init__.py | __init__.py | py | 5,803 | python | en | code | 0 | github-code | 13 |
27300298935 | from era5grib.nci import *
import pandas
import pytest
@pytest.mark.xfail
def test_19810101T0000():
# era5land only available for some fields
date = pandas.to_datetime("19810101T0000")
ds = read_wrf(date, date)
assert numpy.all(numpy.isfinite(ds.sp_surf))
@pytest.mark.xfail
def test_19810101T0100():
# era5land available for all fields
date = pandas.to_datetime("19810101T0100")
ds = read_wrf(date, date)
assert numpy.all(numpy.isfinite(ds.sp_surf))
#def test_19790101T0000():
# era5land not available
# date = pandas.to_datetime("19790101T0000")
# ds = read_wrf(date, date)
# assert numpy.all(numpy.isfinite(ds.sp_surf))
@pytest.mark.xfail
def test_19481231T2300():
# era5 and era5land not available
date = pandas.to_datetime("19481231T2300")
with pytest.raises(ValueError):
ds = read_wrf(date, date)
| coecms/era5grib | test/test_nci.py | test_nci.py | py | 877 | python | en | code | 4 | github-code | 13 |
25566045173 | from collections import deque
operators = {
"a": lambda a, b: a + b,
"s": lambda a, b: a - b,
"d": lambda a, b: a / b if b != 0 else a,
"m": lambda a, b: a * b,
}
def math_operations(*numbers, **operations):
numbers = deque(numbers)
while numbers:
for key, value in operations.items():
if not numbers:
break
number = numbers.popleft()
operations[key] = operators[key](value, number)
result = ""
for key, value in sorted(operations.items(), key=lambda x: (-x[1], x[0])):
result += f"{key}: {value:.1f}\n"
return result
print(math_operations(2.1, 12.56, 0.0, -3.899, 6.0, -20.65, a=1, s=7, d=33, m=15))
print(math_operations(-1.0, 0.5, 1.6, 0.5, 6.1, -2.8, 80.0, a=0, s=(-2.3), d=0, m=0))
print(math_operations(6.0, a=0, s=0, d=5, m=0)) | mustanska/SoftUni | Python_Advanced/Functions Advanced/math_operations.py | math_operations.py | py | 848 | python | en | code | 0 | github-code | 13 |
70869983059 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Hamiltonian with PBC condition
"""
import numpy as np
C1=1
C2=2
C3=1
C4=0.833
CGA=1
CGB=CGA
CA=0
CB=0
LA=1
LB=LA
np.save('./input/parameters.npy') | lvhz/pyNodalLine | parameters.py | parameters.py | py | 212 | python | en | code | 0 | github-code | 13 |
30243323213 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import datetime
import json
import logging
from django.utils.translation import ugettext as _
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, HTML
import floppyforms.__future__ as forms
from .models import Person
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class PersonImportForm(forms.Form):
data = forms.CharField(widget=forms.Textarea())
def __init__(self, instance, *args, **kwargs):
super(PersonImportForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'data',
FormActions(Submit('submit', _('Importar'),
css_class='btn-primary pull-right',
data_loading_text=_('Importando...')), )
) # yapf: disable
def clean_data(self):
try:
data = json.loads(self.cleaned_data['data'])
except ValueError:
raise forms.ValidationError('No es un objeto JSON válido.')
try:
items = data.items()
except AttributeError:
raise forms.ValidationError('No es un diccionario JSON válido.')
try:
return {datetime.datetime.strptime(k, '%Y-%m-%d').date(): v for k, v in items}
except ValueError:
raise forms.ValidationError('Una de las fechas no fue valida.')
return data
class PersonUpdateForm(forms.ModelForm):
class Meta(object):
model = Person
fields = (
'default_meal_data',
'valid_calories',
'valid_carbs',
'valid_proteins',
'valid_fat',
'valid_fiber',
'charts',
'timezone',
'owner',
) # yapf: disable
def __init__(self, *args, **kwargs):
super(PersonUpdateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'owner',
'timezone',
'valid_calories',
'valid_carbs',
'valid_proteins',
'valid_fat',
'valid_fiber',
'charts',
'default_meal_data',
FormActions(
Submit('submit', _('Guardar'),
css_class='btn-primary pull-right',
data_loading_text=_('Guardando...')), )
) # yapf: disable
class PersonCreateValueForm(forms.Form):
name = forms.CharField()
def __init__(self, instance, *args, **kwargs):
super(PersonCreateValueForm, self).__init__(*args, **kwargs)
self.instance = instance
self.helper = FormHelper()
self.helper.layout = Layout(
'name',
FormActions(Submit('submit', _('Agregar'),
css_class='btn-primary pull-right',
data_loading_text=_('Agregando...')), )
) # yapf: disable
def clean_name(self):
name = self.cleaned_data['name']
if name in self.instance.values:
raise forms.ValidationError(_('Ya hay un valor con ese nombre.'))
return name
class PersonValuesSelectDatesForm(forms.Form):
date_start = forms.DateField(label=_('Inicio'))
date_end = forms.DateField(label=_('Final'))
def __init__(self, instance, *args, **kwargs):
initial = kwargs.pop('initial', {})
initial.update({
'date_start': instance.today_date().strftime("%F"),
'date_end': instance.today_date().strftime("%F"),
})
kwargs['initial'] = initial
super(PersonValuesSelectDatesForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-inline'
self.helper.layout = Layout(
'date_start',
'date_end',
Submit('submit', _('Siguiente'),
css_class='btn-primary', ) ,
) # yapf: disable
class PersonAddValuesForm(forms.Form):
def __init__(self, instance, date_start, date_end, *args, **kwargs):
if date_start > date_end:
raise ValueError('date_start is greater than date_end')
self.instance = instance
self.date_start = date_start
self.date_end = date_end
fields_by_date = self._generate_fields()
super(PersonAddValuesForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_show_labels = False
self.helper.layout = Layout(HTML('<table class="table"><thead><tr><th>Valor</th>'))
date = self.date_start
fields = []
while date <= self.date_end:
self.helper.layout.append(HTML('<th>{}</th>'.format(date.strftime('%F'))))
date += datetime.timedelta(days=1)
self.helper.layout.append(HTML("</tr></thead><tbody>"))
for value, fields in fields_by_date:
self.helper.layout.append(HTML("<tr><td>{}</td>".format(value)))
for field in fields:
self.fields[field['field_name']] = forms.FloatField(initial=field['initial'],
required=False)
self.fields[field['field_name']].widget.attrs['step'] = 'any'
self.helper.layout.append(HTML("<td>"))
self.helper.layout.append(field['field_name'])
self.helper.layout.append(HTML("</td>"))
self.helper.layout.append(HTML("</tr>"))
self.helper.layout.append(HTML("</tbody></table>"))
self.helper.layout.append(FormActions(Submit('submit', _('Guardar'),
css_class='btn-primary pull-right',
data_loading_text=_('Guardando...')), ))
def _generate_fields(self):
values_by_fields = []
for value, values in sorted(self.instance.values.items()):
date = self.date_start
fields = []
while date <= self.date_end:
date_str = date.strftime('%F')
fields.append({
'initial': values.get(date_str, None),
'field_name': "{}_{}".format(value, date_str),
'value': value,
'date': date_str
})
date += datetime.timedelta(days=1)
values_by_fields.append((value, fields))
return values_by_fields
@staticmethod
def _get_initial(fields_by_date):
initial = {}
for fields in fields_by_date.values():
for field in fields:
initial[field['field_name']] = field['initial']
def get_date_value_field_triplets(self):
for _value, fields in self._generate_fields():
for field in fields:
yield field['date'], field['value'], field['field_name']
| pignacio/vld_django | vld_django/persons/forms.py | forms.py | py | 7,106 | python | en | code | 0 | github-code | 13 |
74605644818 | from typing import List, cast
import aioredis
import discord
import discord.ext.commands
import discord.ext.tasks
from shared import configuration
configuration.DEFAULTS.update({
"token": "",
"db": "mysql+pool://pkmn:passwd@localhost/pkmndb?max_connections=20&stale_timeout=300",
"owners": [154363842451734528]
})
class Config():
def __init__(self) -> None:
pass
@property
def owners(self) -> List[str]:
return cast(List[str], configuration.get('owners'))
@property
def token(self) -> str:
return cast(str, configuration.get('token'))
class Bot(discord.ext.commands.Bot):
def __init__(self) -> None:
self.config = Config()
super().__init__(command_prefix=discord.ext.commands.when_mentioned_or('='))
super().load_extension('turnipbot.commands')
super().load_extension('pkmnhelper.recommendations')
super().load_extension('database')
super().load_extension('discordbot.updater')
super().load_extension('discordbot.owner')
super().load_extension('discordbot.errors')
self.redis: aioredis.Redis = None
def init(self) -> None:
self.run(self.config.token)
async def on_ready(self) -> None:
self.redis = await aioredis.create_redis_pool("redis://localhost", minsize=5, maxsize=10)
print('Logged in as {username} ({id})'.format(username=self.user.name, id=self.user.id))
print('Connected to {0}'.format(', '.join([server.name for server in self.guilds])))
print('--------')
def init() -> None:
client = Bot()
client.init()
if __name__ == "__main__":
init()
| EightBitEllie/ACNH-Turnip-Bot | turnipbot/main.py | main.py | py | 1,653 | python | en | code | 0 | github-code | 13 |
25564143443 | # -*- coding: utf-8 -*-
from qgis.PyQt.QtGui import QIcon
from ..utils import PLUGIN_FOLDER
from .features import Waterpoint
from .popup_layer_source_mixin import PopupLayerSourceMixin
from .importable_feature_layer import ImportableFeatureLayer
from .waterpoint_buffer_popup_layer import WaterpointBufferPopupLayer
class WaterpointLayer(ImportableFeatureLayer, PopupLayerSourceMixin):
LAYER_NAME = "Waterpoints"
STYLE = "waterpoint"
@classmethod
def getFeatureType(cls):
return Waterpoint
def __init__(self,
workspaceFile,
*dependentLayers):
"""Create or open a Waterpoint layer."""
ImportableFeatureLayer.__init__(self, workspaceFile,
layerName=WaterpointLayer.defaultName(),
styleName=WaterpointLayer.defaultStyle())
PopupLayerSourceMixin.__init__(self)
self.connectPopups()
@property
def hasPopups(self):
return True
@property
def popupLayerTypes(self):
return [WaterpointBufferPopupLayer]
@property
def relativeLayerPosition(self):
"""Makes the Paddock Land Types popups appear *over* the Paddock layer."""
return 1
@property
def zoomPopupLayerOnLoad(self):
"""True for this becaus Waterpoints don't zoom nicely."""
return True
@classmethod
def icon(cls):
"""The icon to paint to represent this layer."""
return QIcon(f":/plugins/{PLUGIN_FOLDER}/images/waterpoint.png")
| Trailmarker/paddock-power | paddock_power/src/layers/waterpoint_layer.py | waterpoint_layer.py | py | 1,565 | python | en | code | 0 | github-code | 13 |
30766035945 | # -*- coding:UTF-8 -*-
"""
轮询组合内的基金,获取基金的消息
以行为单位,存储基金内容
"""
from IOFile import read_group_fund_json, read_chenxingcode_json
from FundParameterInfo import FundInfo
if __name__ == '__main__':
fund_list = []
group_fund_info = read_group_fund_json() # 获取组合基金信息
chenxing_code = read_chenxingcode_json() # 获取晨星编码
for index in range(len(group_fund_info)):
each_fund = FundInfo(group_fund_info[index]["ID"], group_fund_info[index]["name"],
chenxing_code[group_fund_info[index]["ID"]])
# 从天天基金网上更新信息
each_fund.update_fund_info_by_tiantian()
# 从晨星网上更新信息
each_fund.update_fund_info_by_chenxing()
fund_list.append(each_fund)
# 将信息写入文件
result_dir = '../output/'
output_head = '代码' + ',' + '规模' + ',' + '基龄' + ',' + '3月回撤' + ',' + '标准差' + ',' + '风险系数' + ',' + '夏普比' + ',' + \
'阿尔法' + ',' + '贝塔' + ',' + 'R平方' + ',' + '股仓' + ',' + '债仓' + ',' + '十股' + ',' + '五债' + '\n'
with open(result_dir + 'fund_info.csv', 'w') as csv_file:
csv_file.write(output_head)
for fund_index in fund_list:
output_line = fund_index.fund_code + fund_index.fund_name + ',' + \
str(fund_index.fund_size) + ',' + \
fund_index.established_date + ',' + \
str(fund_index.three_month_retracement) + ',' + \
str(fund_index.risk_assessment["standard_deviation"]) + ',' + \
str(fund_index.risk_assessment["risk_coefficient"]) + ',' + \
str(fund_index.risk_assessment["sharpby"]) + ',' + \
str(fund_index.risk_statistics["alpha"]) + ',' +\
str(fund_index.risk_statistics["beta"]) + ',' + \
str(fund_index.risk_statistics["r_square"]) + ',' + \
str(fund_index.stock_total_position["stock_total_position"]) + ',' + \
str(fund_index.bond_total_position["bond_total_position"]) + ',' + \
str(fund_index.stock_total_position["ten_stock_position"]) + ',' + \
str(fund_index.bond_total_position["five_bond_position"]) + '\n'
csv_file.write(output_line)
| MrDujing/FundCombination | src/export_fund_info.py | export_fund_info.py | py | 2,554 | python | en | code | 48 | github-code | 13 |
195124754 | import csv
import random
from typing import Dict, List
from django.core.exceptions import ValidationError
from django.core.management.base import CommandError
from django.core.validators import validate_email
from phishing.management.commands._base import EmailCommand
from phishing.models import Target, TargetPool
from phishstick.settings import PHISHING_TEMPLATES
class Command(EmailCommand):
help = 'Send emails to targets.'
def add_arguments(self, parser):
parser.add_argument('targets',
help='A CSV file with two columns: email address and group.',
type=lambda path: csv.reader(open(path, 'r', newline='')))
parser.add_argument('--ignore-duplicates',
help='Ignore duplicate addresses.',
action='store_true')
def handle(self, *args, **options):
self.check_database()
groups = self.get_groups(options['targets'], options['ignore_duplicates'])
templates = self.get_templates()
self.send_emails(groups, templates)
def check_database(self):
if Target.objects.exists() or TargetPool.objects.exists():
self.stdout.write(self.style.WARNING(
'The database is not empty! '
'Please make sure that you know what you are doing!'))
self.abort_if_no('Do you still want to continue? [y/n] ')
def get_groups(self, targets: csv.reader, ignore_duplicates: bool) -> Dict[str, List[str]]:
groups = {}
addresses = set()
for cols in targets:
try:
address, group = cols
validate_email(address)
except ValueError:
self.abort(f'Wrong number of columns: {cols}.')
except ValidationError:
self.abort(f'Invalid email address: {address!r}.')
groups.setdefault(group, []).append(address)
if not ignore_duplicates and address in addresses:
self.stdout.writelines([
self.style.ERROR(f'Email address present twice: {address!r}.'),
self.style.WARNING('Use \'--ignore-duplicates\' if this is desired.')])
self.abort()
else:
addresses.add(address)
for group in groups.values():
random.shuffle(group)
self.stdout.write(
f'Found {len(addresses)} distinct email addresses.\n'
f'Found {len(groups)} groups:')
self.stdout.writelines(
f' * {name!r} ({len(group)} addresses)' for name, group in groups.items())
self.abort_if_no('Is this correct? [y/n] ')
return groups
def get_templates(self) -> List[str]:
templates = list(PHISHING_TEMPLATES)
self.stdout.write(f'Found {len(templates)} templates:')
self.stdout.writelines(f' * {template!r}' for template in templates)
self.abort_if_no('Is this correct? [y/n] ')
return templates
def send_emails(self, groups: Dict[str, List[str]], templates: List[str]):
n_targets = sum(len(group) for group in groups.values())
self.abort_if_no(f'Are you sure you want to send {n_targets} emails? [y/n] ')
self.stdout.write(self.style.SUCCESS('Let the phishing begin!'))
i = 0
failures = []
for group, addresses in groups.items():
for template_i, template in enumerate(templates):
pool, _ = TargetPool.objects.get_or_create(group=group, template=template)
for address in addresses[template_i::len(templates)]:
target = Target.objects.create()
self.stdout.write(f'[{i+1}/{n_targets}]', ending=' ')
try:
self.send_email(address, target, pool)
except Exception as exc:
self.stdout.write(self.style.ERROR(f'Failed: {exc!r}'))
failures.append((address, target.id, pool.id))
i += 1
self.save_failures(failures)
| tarhses/phishstick | phishing/management/commands/send_emails.py | send_emails.py | py | 4,056 | python | en | code | 1 | github-code | 13 |
3318566956 | import random
import time
import csv
class GA_multi_lines:
"""
this class present a genetic algorithm.
this class resive fitness function and data about the genetic options:
population_size, mutation and number of generations
by the given data, the algorithm try to solve the problem
"""
def __init__(self, generations = 50, population_size=50, mode_mutation=0.04, res_mutation=0.04, operations_pref_len=[]):
self.generations = generations
self.population_size = population_size
self.mode_mutation = mode_mutation
self.res_mutation = res_mutation
self.infeasibles_counter = 0
self.feasibles_counter = 0
self.cross_solutions = 0
self.operations_pref_len = operations_pref_len
def first_population(self, operations, preferences_function, resources_number, fitness_function):
"""
create the first population with the operation and preferences data
operations: list of Operation, all operation data
preferences_function: function, according to this function the gen created
return: list of dictionary, [{"modes": number, "operations": number}]
"""
self.infeasibles_counter = 0
self.feasibles_counter = 0
self.cross_solutions = 0
population = []
fitness = []
for _ in range(self.population_size):
gen, solution = self.create_feasible_gen(operations, preferences_function, resources_number, fitness_function)
population.append(gen)
fitness.append(solution)
print("+1... population =", self.population_size)
return population, fitness
def check_cross_solution(self, resources_list, modes_list, operations_list):
operations_coef = {str(pos):set() for pos in range(1, len(resources_list[0]) + 1)}
copy_resources_list = [resources[:] for resources in resources_list]
for res_number, operations in enumerate(copy_resources_list, start=1):
index = 0
# remove all not needed resource from the resources list
while index < len(operations):
needed_resource = False
mode_resorces = operations_list[operations[index]].get_mode_by_name(str(modes_list[int(operations[index]) - 1])).resources
needed_resource = any(str(res_number) == resource.number for resource in mode_resorces)
if not needed_resource:
operations.remove(operations[index])
# if the resource is used, check next resource and add 1 to the number of used resources
else:
index += 1
for operations in copy_resources_list:
for pos, op in enumerate(operations[:-1], start=1):
follow_operations = set(operations[pos:])
for follow_op in follow_operations:
if op in operations_coef[follow_op]:
return True
else:
operations_coef[str(op)].update(follow_operations)
return False
def create_feasible_gen(self, operations, preferences_function, resources_number, fitness_function):
while True:
modes = []
resources = [[] for i in range(resources_number)]
# for each operation randomly select mode
for op in operations.values():
modes.append(random.randint(1, len(op.modes)))
# the preferences_function return all operation that can be start after all already done operations
# according to the preferences limits
for resource in range(resources_number):
possible_resources = preferences_function(resources[resource])
while possible_resources:
# randomly choise the next operation from all available operations
res = random.choice(possible_resources)
resources[resource].append(res)
possible_resources = preferences_function(resources[resource])
solution = fitness_function(resources, modes).bellman_ford_LB(0, len(operations) + 1, self.operations_pref_len)
if solution:
# for each gen, save the choisen modes and the operations order
self.feasibles_counter += 1
if self.check_cross_solution(resources, modes, operations):
self.cross_solutions += 1
return {"modes": modes, "resources": resources}, solution
else:
self.infeasibles_counter += 1
if self.infeasibles_counter % 1000000 == 0:
print("infeasibles_counter =", self.infeasibles_counter)
print("feasibles_counter =", self.feasibles_counter, "\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
def __crossover(self, parent_1, parent_2, mode_index, res_index):
"""
crossover between two parents to create 2 new sons.
parent_1: dictionary, gen data
parent_2: dictionary, gen data
mode_index: number, index for the modes crossover
op_index: number, index for the operations crossover
return: dictionary, new son
"""
# mode crossover
# take from 0 to index-1 from the first parent and from index to the end from the second parent
modes = parent_1["modes"][0:mode_index] + parent_2["modes"][mode_index:]
# operation crossover
# take from 0 to index-1 from the first parent all not selected operations from the second parent
resources = [[] for i in range(len(parent_1["resources"]))]
for res_number, res in enumerate(parent_1["resources"]):
resources[res_number] = res[0:res_index]
for p2_res in res:
if p2_res not in resources[res_number]:
resources[res_number].append(p2_res)
# return the new son
return {"modes": modes, "resources": resources}
def crossover(self, parent_1, parent_2):
"""
crossover between two parents to create 2 new sons.
parent_1: dictionary, gen data
parent_2: dictionary, gen data
return: 2 dictionary, 2 new sons
"""
# lattery the cross index, one for the modes and another for the operations
max_index = len(parent_1["modes"]) - 2
mode_index = random.randint(1, max_index)
res_index = random.randint(1, max_index)
# create 2 new sons
son_1 = self.__crossover(parent_1, parent_2, mode_index, res_index)
son_2 = self.__crossover(parent_2, parent_1, mode_index, res_index)
return son_1, son_2
def mutation(self, son, operations, preferences_function):
"""
do motation on the new son.
son: dictionary, son's data
operations: list of Operation, all operation data
preferences_function: function, according to this function the gen created
return: dictionary, the son after the motation, if was
"""
# if the lottery number less then the motation chance, do the modes motation
if random.random() <= self.mode_mutation:
# lottery an operation on which we will do the motation
op = random.randint(0, len(operations) - 1)
operation = operations[str(op + 1)]
# lottery the operation new mode
mode = random.randint(1, len(operation.modes))
# if the operation have more the one mode, lottery while not choisen new mode
while len(operation.modes) > 1 and mode == son["modes"][op]:
mode = random.randint(1, len(operation.modes))
son["modes"][op] = mode
# if the lottery number less then the motation chance, do the operations motation
if random.random() <= self.res_mutation:
resource_number = random.randint(1, len(son["resources"]) - 1)
# lottery an operation on which we will do the motation
index = random.randint(1, len(son["resources"][0]) - 1)
# precede the choisen operation, only if its possible according to the preferences
for i in range(index):
if son["resources"][resource_number][index] in preferences_function(son["resources"][resource_number][:i]):
son["resources"][resource_number].insert(i, son["resources"][resource_number].pop(index))
return son
def solve(self, job):
"""
use genetic algorithm on the problem and find the best UB.
job: Job object, all problem data
return: dictionary, {"value": best found ub, "generations": number of generations, "time": run time}
"""
history = []
start = time.time()
# create first population for the algorithm
population, fitness = self.first_population(job.operations, job.next_operations, len(job.resources), job.add_resources_to_bellman_ford_graph)
# calcolate population score by the job fitness function
history.append(sum(fitness) / len(fitness))
for generation in range(self.generations):
print("generation:", generation)
# calcolate the probability of each gen to be selected as parent
probability = [1 / item for item in fitness]
F = sum(probability)
weights = [item / F for item in probability]
# create |population_size| new sons
sons = []
while len(sons) < self.population_size:
parent_1, parent_2 = random.choices(population=population, weights=weights, k=2)
son_1, son_2 = self.crossover(parent_1, parent_2)
son_1 = self.mutation(son_1, job.operations, job.next_operations)
son_2 = self.mutation(son_2, job.operations, job.next_operations)
solution_1 = job.add_resources_to_bellman_ford_graph(son_1["resources"], son_1["modes"]).bellman_ford_LB(0, len(job.operations) + 1, self.operations_pref_len)
if not solution_1:
son_1, solution_1 = self.create_feasible_gen(job.operations, job.next_operations, len(job.resources), job.add_resources_to_bellman_ford_graph)
solution_2 = job.add_resources_to_bellman_ford_graph(son_2["resources"], son_2["modes"]).bellman_ford_LB(0, len(job.operations) + 1, self.operations_pref_len)
if not solution_2:
son_2, solution_2 = self.create_feasible_gen(job.operations, job.next_operations, len(job.resources), job.add_resources_to_bellman_ford_graph)
sons.append(son_1)
sons.append(son_2)
fitness.append(solution_1)
fitness.append(solution_2)
population += sons
new_population = []
new_fitness = []
# take the best |population_size| gens from the population
for item_from_fitness, item_from_population in sorted(zip(fitness, population), key=lambda pair: pair[0]):
new_population.append(item_from_population)
new_fitness.append(item_from_fitness)
population = new_population[:self.population_size]
fitness = new_fitness[:self.population_size]
history.append(sum(fitness) / float(len(fitness)))
# we may stack in local minimom, try to escape by incrise the mutation chance
# if fitness[0] == fitness[-1]:
# break
run_time = time.time() - start
with open("ga.csv", "a+") as f:
writer = csv.writer(f)
writer.writerow(history)
# return the solution value, number of generations, the taken time and the solution draw data
# solution_draw_data = job.find_ub_ga(population[0]["operations"], population[0]["modes"])["to_draw"]
# modify the solution title to the GA run time
# solution_draw_data["title"] = "solution in {:.10f} sec\ncreated nodes = 0, max queue size = 0".format(run_time)
if self.check_cross_solution(population[0]["resources"], population[0]["modes"], job.operations):
print("@@@@@@@@@best_solution_have_cross_resources@@@@@@@@@@@")
feasibles = (self.feasibles_counter / (self.feasibles_counter + self.infeasibles_counter)) * 100
return {"value": fitness[0], "generations": generation, "time": run_time, "to_draw": None, "feasibles": feasibles, "cross_solutions": self.cross_solutions}
| danielifshitz/RSSP | code/genetic_multi_lines.py | genetic_multi_lines.py | py | 12,573 | python | en | code | 1 | github-code | 13 |
35225498920 | # write a function that removes duplicate entries from a list
names = ['larry', 'curly', 'joe', 'adam', 'brian', 'larry', 'joe']
def removeDuplicate(names):
unique_names = []
for name in names:
if name not in unique_names:
unique_names.append(name)
return unique_names
print(names)
print("after removing duplicates:", removeDuplicate(names))
| Abir-Al-Arafat/Problem-Solving-in-Python | Basic Ones/removeDuplicate.py | removeDuplicate.py | py | 379 | python | en | code | 0 | github-code | 13 |
14742892142 | import os
from dotenv import load_dotenv
import telebot
from brownie import (
Contract,
accounts,
chain,
rpc,
web3,
history,
interface,
Wei,
ZERO_ADDRESS,
)
import time, re, json
load_dotenv()
SSC_BOT_KEY = os.getenv("SSC_BOT_KEY")
USE_DYNAMIC_LOOKUP = os.getenv("USE_DYNAMIC_LOOKUP")
ENV = os.getenv("ENV")
def main():
bot = telebot.TeleBot(SSC_BOT_KEY)
test_group = os.getenv("TEST_GROUP")
prod_group = os.getenv("PROD_GROUP")
sscs = lookup_sscs()
addresses_provider = interface.AddressProvider("0x9be19Ee7Bc4099D62737a7255f5c227fBcd6dB93")
oracle = interface.Oracle(addresses_provider.addressById("ORACLE"))
strin = ""
count = 0
for s in sscs:
strat = interface.GenericStrategy(s)
vault = assess_vault_version(strat.vault())
token = interface.IERC20(vault.token())
token_price = get_price(oracle, token.address)
usd_tendable = token_price * token.balanceOf(s) / 10**token.decimals()
if usd_tendable > 100:
tendable_str = "\nTendable Amount in USD: $"+ "{:,.2f}".format(usd_tendable)
else:
tendable_str = ""
gov = accounts.at(vault.governance(), force=True)
params = vault.strategies(strat)
lastTime = params.dict()["lastReport"]
since_last = int(time.time()) - lastTime
hours_since_last = since_last/60/60
desiredRatio = params.dict()["debtRatio"]
beforeDebt = params.dict()["totalDebt"]
beforeGain = params.dict()["totalGain"]
beforeLoss = params.dict()["totalLoss"]
assets = vault.totalAssets()
realRatio = beforeDebt/(assets+1)
if desiredRatio == 0 and realRatio < 0.01:
continue
count = count + 1
try:
print("Harvesting strategy: " + s)
tx = strat.harvest({'from': gov})
except:
strin = strin + "\n\n" + strat.name() + "\n\U0001F6A8 Failed Harvest!\n" + s + " Last Harvest (h): " + "{:.1f}".format((since_last)/60/60)
continue
params = vault.strategies(strat)
profit = params.dict()["totalGain"] - beforeGain
profit_usd = token_price * profit / 10**token.decimals()
loss = params.dict()["totalLoss"] - beforeLoss
debt_delta = params.dict()["totalDebt"] - beforeDebt
debt_delta_usd = token_price * debt_delta / 10**token.decimals()
percent = 0
if beforeDebt > 0:
if loss > profit:
percent = -1 * loss / beforeDebt
else:
percent = profit / beforeDebt
over_year = percent * 3.154e+7 / (params.dict()["lastReport"] - lastTime)
# Set harvest inidcator
shouldHarvest = False
if hours_since_last > 200 or profit_usd > 30_000:
shouldHarvest = True
harvestIndicator = ""
if shouldHarvest:
harvestIndicator = "\U0001F468" + "\u200D" + "\U0001F33E "
# Generate display string
strin = strin + "\n\n"+harvestIndicator+"[" + strat.name() + "](https://etherscan.io/address/" + s + ")\n"
strin = strin + s
strin = strin + " \nLast Harvest (h): " + "{:.1f}".format(hours_since_last)
strin = strin + "\nProfit on harvest USD: $"+ "{:,.2f}".format(profit_usd)
strin = strin + '\nRatio (Desired | Real): ' + "{:.2%}".format(desiredRatio/10000) + ' | ' + "{:.2%}".format(realRatio)
strin = strin + '\nDebt delta: $'+ "{:,.2f}".format(debt_delta_usd)
strin = strin + "\nBasic APR: " + "{:.1%}".format(over_year)
strin = strin + tendable_str
strin = str(count) + " total active strategies found." + strin
if ENV == "PROD":
chat_id = prod_group
else:
chat_id = test_group
bot.send_message(chat_id, strin, parse_mode ="markdown", disable_web_page_preview = True)
#print(strin)
def lookup_sscs():
if USE_DYNAMIC_LOOKUP == "False":
f = open("ssc_list.json", "r", errors="ignore")
data = json.load(f)
ssc_strats = data['sscs']
else:
# Fetch all v2 strategies and query by name
addresses_provider = Contract("0x9be19Ee7Bc4099D62737a7255f5c227fBcd6dB93")
strategies_helper = Contract(addresses_provider.addressById("HELPER_STRATEGIES"))
v2_strategies = strategies_helper.assetsStrategiesAddresses()
ssc_strats = []
for s in v2_strategies:
strat = interface.GenericStrategy(s)
name = strat.name().lower()
style1 = re.search("singlesided", name)
style2 = re.search("ssc", name)
if style1 or style2:
ssc_strats.append(s)
vault = interface.Vault032(strat.vault())
print(strat.address, vault.name(), strat.name())
return ssc_strats
def assess_vault_version(vault):
if int(interface.Vault032(vault).apiVersion().replace(".", "")) > 31:
return interface.Vault032(vault)
else:
return interface.Vault031(vault)
def get_price(oracle, token):
return oracle.getPriceUsdcRecommended(token) / 10**6 | flashfish0x/telegram_ssc | scripts/test.py | test.py | py | 5,182 | python | en | code | 0 | github-code | 13 |
40105371619 | import subprocess, os, shutil, sys, requests, argparse
from pprint import pprint
try:
from pytube import YouTube# Sure that YouTube and Playlist can be downloaded
except Exception as e:
print(e)
print('[Run] pip(/3) install pytube')
exit()
banner = '''
██╗ ██╗ ██████╗ ██╗ ██╗████████╗██╗ ██╗██████╗ ███████╗██████╗
╚██╗ ██╔╝██╔═══██╗██║ ██║╚══██╔══╝██║ ██║██╔══██╗██╔════╝██╔══██╗
╚████╔╝ ██║ ██║██║ ██║ ██║ ██║ ██║██████╔╝█████╗ ██████╔╝
╚██╔╝ ██║ ██║██║ ██║ ██║ ██║ ██║██╔══██╗██╔══╝ ██╔══██╗
██║ ╚██████╔╝╚██████╔╝ ██║ ╚██████╔╝██████╔╝███████╗██║ ██║
╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝v2.0
'''
print(u'\u001b[34;1m'+banner+'\u001b[0m')
class AdvDownload():
def __init__(self,url):
'''checking for installation and making small sequence changes for playlist and single video'''
self.checkinstall()
if 'https://www.youtube.com/playlist' in url:
from pytube import Playlist
import re
self.pl = Playlist(url)
self.pl._video_regex = re.compile(r"\"url\":\"(/watch\?v=[\w-]*)")
qual = input('Wish to download (H)D[ 720 & + ] or (S)D[ 720 & - (30FPS mostly)] or just (a)udio (h/s/a): ')[0].lower()
for url in self.pl.video_urls:
self.yt = YouTube(url)
print(f'\nTITLE {self.yt.title}')
if qual == 'h':
self.makeDirs()
streams = self.getQuality(qual)
self.downloader(qual)
if qual == 'a':
continue
self.compile()
else:
self.yt = YouTube(url)
print(f'\nTITLE {self.yt.title}')
qual = input('Wish to download (H)D[ 720 & + ] or (S)D[ 720 & - (30FPS mostly)] or just (a)udio (h/s/a): ')[0].lower()
if qual == 'h':
self.makeDirs()
streams = self.getQuality(qual)
self.downloader(qual)
if qual == 'a':
sys.exit()
self.compile()
def downloader(self, qual):
'''Download audio and or video stream'''
if qual == 'h':
# Downloading the video
print('\n {0:<13}{1:>10}{2:>9}'.format('No.','Resolution','fps'))
for num, stream in enumerate(self.yt.streams.filter(adaptive=True).order_by('resolution').desc()):
print(f'|-Stream {num:-<{5}} {stream.resolution:->{10}} {stream.fps:->{10}}')
choice = int(input('Enter the stream number: '))
print(self.yt.streams.filter(adaptive=True).order_by('resolution').desc()[choice].download('video'))
# DOWNLOADING VIDEO DONE
print(self.yt.streams.filter(only_audio=True).order_by('filesize').desc().first().download('audio'))
# DOWNLOADING AUDIO DONE
elif qual == 's':
print('\n {0:<13}{1:>10}{2:>9}'.format('No.','Resolution','fps'))
for num, stream in enumerate(self.yt.streams.filter(progressive=True).order_by('resolution').desc()):
print(f'\Stream {num:-<{5}} {stream.resolution:->{10}} {stream.fps:->{10}}')
choice = int(input('Enter the stream number: '))
print(self.yt.streams.filter(progressive=True).order_by('resolution').desc()[choice].download())
# DOWNLOAD COMPLETED
# sys.exit()
elif qual == 'a':
ret = self.yt.streams.filter(only_audio=True).order_by('filesize').desc().first().download()
ext = os.path.splitext(ret)[1]
if ext != 'mp3':
shutil.move(ret,self.yt.title+'.mp3')
# DOWNLOADED AND EXTENSION CHANGED
# sys.exit()
def compile(self):
'''Complies and deletes the folders'''
audiofiles = os.listdir('audio')
videofiles = os.listdir('video')
for audiofile in audiofiles:
audio = audiofile.split('.')[0]
for videofile in videofiles:
video = videofile.split('.')[0]
if audio == video:
print(f'Compiling {audiofile} with {videofile}')
subprocess.Popen(f'ffmpeg -i audio/"{audiofile}" -i video/"{videofile}" -c copy "{audiofile}" -y').wait()
# COMPILING DONE
# if input('Delete the uncompiled audio and video files?(y/n)')[0].lower() == 'y':
shutil.rmtree('video')
shutil.rmtree('audio')
#REMOVED THE FOLDERS
def getQuality(self, qual):
'''Get what quality the user wants'''
if qual == 's':
# for stream in self.yt.streams.filter(progressive=True):
# print(stream)
return self.yt.streams.filter(progressive=True).order_by('resolution').desc()
elif qual == 'h':
return self.yt.streams.filter(adaptive=True).order_by('resolution').desc()
def checkinstall(self):
'''Checking what OS and if ffmpeg is installed and installing ffmpeg'''
platform = sys.platform
if platform == 'linux':
print('[+] Linux detected checking for ffmpeg installation')
res = subprocess.Popen('dpkg -s ffmpeg',shell=True).wait()
print('-'*100)
if res != 0:
print('ffmpeg not installed')
subprocess.Popen('sudo apt-get install ffmpeg -y',shell=True).wait()
elif platform == 'win32':
print('[+] Windows detected checking for ffmpeg installation')
if not os.path.exists('C:\FFMPEG\\ffmpeg.exe'):
print('[+] No installation checking for temp ffmpeg.exe file')
if 'ffmpeg.exe' not in os.listdir():
print('[+] ffmpeg.exe not found')
files = {'ffmpeg.exe':'https://filebin.net/505d74kxyod1h80c/ffmpeg.exe?t=eh57h9it'}
print('Downloading ffmpeg.exe')
res = requests.get(files['ffmpeg.exe'])
fil = open('ffmpeg.exe','wb')
for chunk in res.iter_content(100000):
fil.write(chunk)
fil.close()# HAVE TO CHECK IF THIS WORKS
else:
print('[+] ffmpeg.exe found in current dir')
def makeDirs(self):
'''
make dirs for uncompiled audio and video
'''
os.makedirs('audio',exist_ok=True)
os.makedirs('video',exist_ok=True)
# print('making temp audio and video dirs for uncompiled data')
def getargs():
parser = argparse.ArgumentParser(description='Download YouTube videos')
parser.add_argument('-u','--url',help='url of YouTube page')
args = parser.parse_args()
return args
if getargs().url != None:
y = AdvDownload(getargs().url)
else:
print('[syntax] python '+__file__+' -u https:\\\\YOUTUBE_URL')
| Aryan09005/YouTube-HD-downloader | AdvDownload.py | AdvDownload.py | py | 7,764 | python | en | code | 0 | github-code | 13 |
35241367232 | import functions
import data
import visualizations
# Datos de entrenamiento y Prueba
train_df = data.data_open_2("AUDUSD_train.csv")
test_df = data.data_open_2("AUDUSD_test.csv")
# Preprocesamiento de dataframes para evaluación de estrategia
# Creación de indicadores
# Exponential Moving Average y Aroon Oscillator
train_df = (
train_df.pipe(functions.calculate_ema, 20)
.pipe(functions.calculate_aroon_oscillator)
)
# Parabolic SAR
train_df["Parabolic_SAR"] = functions.calculate_parabolic_sar(train_df)
# Exponential Moving Average y Aroon Oscillator
test_df = (
test_df.pipe(functions.calculate_ema, 20)
.pipe(functions.calculate_aroon_oscillator)
)
# Parabolic SAR
test_df["Parabolic_SAR"] = functions.calculate_parabolic_sar(test_df)
# Optimización de parámetros y Backtesting (sólo sobre Train se realiza)
best_params_train,iter_val_test = functions.best_fit_params(train_df)
print("Best take_profit_ratio:", best_params_train[0])
print("Best stop_loss_ratio:", best_params_train[1])
print("Best position_size:", best_params_train[2])
# Creación del portafolio a partir de la estrategia con parámetros optimizados
# Backtesting sobre entrenamiento
portfolio = functions.run_trading_strategy(train_df,
capital=100000,
take_profit_ratio=best_params_train[0],
stop_loss_ratio=best_params_train[1],
position_size=best_params_train[2])
# Sobre datos de prueba
portfolio_test = functions.run_trading_strategy(test_df,
capital=100000,
take_profit_ratio=best_params_train[0],
stop_loss_ratio=best_params_train[1],
position_size=best_params_train[2])
# Métricas de atribución al desempeño
MAD_train = functions.portfolio_metrics(portfolio[["timeStamp",
"Long_Entry","Exit","Position",
"Capital","Daily_Profit",
"Take_Profit","Stop_Loss","Returns",
"Cumulative_Returns"]])
MAD_test = functions.portfolio_metrics(portfolio_test[["timeStamp",
"Long_Entry","Exit","Position",
"Capital","Daily_Profit",
"Take_Profit","Stop_Loss","Returns",
"Cumulative_Returns"]])
# Visualizaciones
# Visualización de indicadores calculados sobre las series de tiempo
# Se tienen que realizar sobre el slice de la posición 1 hacia adelante porque el Aroon en 0 es 0 y hace
# que la gráfica salga espantosa
train_ind = visualizations.plot_indicators(train_df.iloc[1:,:])
test_ind = visualizations.plot_indicators(test_df.iloc[1:,:])
# Visualización de la estrategia sobre la serie de tiempo
trading_strat_train = visualizations.plot_indicators_2(portfolio.iloc[1:,:])
trading_strat_test = visualizations.plot_indicators_2(portfolio_test.iloc[1:,:])
#Visualización de la convergencia en la optimización de la estrategia
conv_graph = visualizations.plot_capital_evolution(iter_val_test) | feramdor/Lab5 | main.py | main.py | py | 3,473 | python | es | code | 0 | github-code | 13 |
43086112012 | # 剑指 Offer II 072. 求平方根
# 给定一个非负整数 x ,计算并返回 x 的平方根,即实现 int sqrt(int x) 函数。
# 正数的平方根有两个,只输出其中的正数平方根。
# 如果平方根不是整数,输出只保留整数的部分,小数部分将被舍去。
# 示例 1:
# 输入: x = 4
# 输出: 2
# 示例 2:
# 输入: x = 8
# 输出: 2
# 解释: 8 的平方根是 2.82842...,由于小数部分将被舍去,所以返回 2
# 提示:
# 0 <= x <= 231 - 1
class Solution:
def mySqrt(self, x: int) -> int:
# 二分法
if x <= 1:
return x
# 由于题目要求取整数部分,其实也就是找一个数 x 满足 h**2 < x 同时 (h+1)**2 > x
left = 0
right = x
while left <= right:
mid = (left+right)//2
if mid**2 <= x and (mid+1)**2 > x:
return mid
elif mid**2 < x: # 数值偏小,需要扩大
left = mid + 1
elif mid**2 > x: # 数值偏大,需要缩小
right = mid - 1
| Guo-xuejian/leetcode-practice | 剑指OfferII072.求平方根.py | 剑指OfferII072.求平方根.py | py | 1,089 | python | zh | code | 1 | github-code | 13 |
18987865477 | import math
import psutil
import os,sys
def findsquares(squares):
winsquarenums = set()
perrow = int(math.sqrt(squares))
for s in range(squares-perrow-1):
if s % perrow != perrow-1:
winsquarenums.add(frozenset({s,s+1,s+perrow,s+perrow+1}))
return winsquarenums
def remove_useless_wsn(winsquarenums):
discardos = set()
for ws1 in winsquarenums:
for ws2 in winsquarenums:
if ws1!=ws2 and ws1.issubset(ws2):
discardos.add(ws2)
for d in discardos:
winsquarenums.discard(d)
def findfivers(squares):
winsquarenums = set()
perrow = int(math.sqrt(squares))
for s in range(squares):
if perrow - (s % perrow) >= 5:
winsquarenums.add(frozenset({s,s+1,s+2,s+3,s+4}))
if perrow - (s // perrow) >= 5:
winsquarenums.add(frozenset({s,s+perrow+1,s+2*(perrow+1),s+3*(perrow+1),s+4*(perrow+1)}))
if perrow - (s // perrow) >= 5:
winsquarenums.add(frozenset({s,s+perrow,s+2*perrow,s+3*perrow,s+4*perrow}))
if (s % perrow) >= 4:
winsquarenums.add(frozenset({s,s+perrow-1,s+2*(perrow-1),s+3*(perrow-1),s+4*(perrow-1)}))
return winsquarenums
def resources_avaliable():
memory = psutil.virtual_memory()
if memory.percent > 97:
return False
return True
room_num = 0
def provide_room_num():
global room_num
room_num+=1
if room_num > 1e7:
room_num = 0
return room_num | yannikkellerde/GABOR | util.py | util.py | py | 1,492 | python | en | code | 3 | github-code | 13 |
32817004919 | from .packages import *
import argparse
import os
__version__ = '0.4.0'
def cli_mode():
menu = '''\
Num6 - A Powerful Cryptography Tool
1. For word or line encryption
2. For word or line decryption
3. For file encryption enter path
4. For file decryption enter path
0. For stop the programme
00. For clearing the screen
© Copyright collected by Md. Almas Ali\
'''
print(menu)
while 1:
sta = input('\nChoice : ')
if sta == '1':
lop = input('Enter your word : ')
print(f'Output:\n\n{encrypt(lop)}')
elif sta == '2':
lop = input('Enter your word : ')
print(f'Output:\n\n{decrypt(lop)}')
elif sta == '3':
try:
En = fileEn()
print(En)
try:
with open('encoded.txt', 'w') as fff:
fff.write(En)
print('\nFile encoded.txt saved...')
except:
print('Something went wrong !')
except:
print('Wrong path, try again !')
elif sta == '4':
try:
De = fileDe()
print(De)
try:
with open('decoded.txt', 'w') as fff:
fff.write(De)
print('\nFile decoded.txt saved...')
except:
print('Something went wrong !')
except:
print('Wrong path, try again !')
elif sta == '0':
exit('Existing Num6...')
elif sta == '00':
if os.name == 'nt':
os.system('cls')
print(menu)
else:
os.system('clear')
print(menu)
else:
print('Wrong selection try again !')
def main_cli():
parser = argparse.ArgumentParser(prog='Num6')
parser.version = __version__
parser.add_argument('-v', '--version',
help='show the version information', action='version')
parser.add_argument('-e', '--encrypt', help='to encrypt data from cli')
parser.add_argument('-d', '--decrypt', help='to decrypt data from cli')
parser.add_argument(
'-p', '--pin', help='set pin for encrpyt or decrypt data from cli')
parser.add_argument(
'-c', '--cli', help='to use in interactive cli mode', action='store_true')
parser.add_argument(
'-g', '--gui', help='to use in interactive GUI mode', action='store_true')
args = parser.parse_args()
if args.cli:
cli_mode()
# print(any(vars(args).values()))
elif args.gui:
from . import num6_gui
elif args.encrypt:
try:
print(encrypt(args.encrypt, int(args.pin)))
except:
print(encrypt(args.encrypt))
elif args.decrypt:
try:
print(decrypt(args.decrypt, int(args.pin)))
except:
print(decrypt(args.decrypt))
elif not any(vars(args).values()):
print('Num6: error: at least expected one argument')
parser.print_help()
| Almas-Ali/Num6 | num6/cli.py | cli.py | py | 3,139 | python | en | code | 7 | github-code | 13 |
16866469607 | from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# def Pi(num_steps):
# step = 1.0/num_steps
# sum = 0
# for i in range(num_steps):
# x = (i+0.5)*step
# sum += 4.0/(1.0+x**2)
# pi = step*sum
# return pi
# print("The pi estimate from process %d/%d is %s" %(rank, size, Pi(100*(rank+1))))
a = 100;
a = a+100;
data = [(rank+1)**2]
data = comm.gather(data, root = 0)
if rank==0:
print(data)
print(a)
# from time import sleep
# from jug import TaskGenerator
#
# @TaskGenerator
# def is_prime(n):
# sleep(1.)
# for j in range(2, n-1):
# if (n %j) == 0:
# return False
# return True
#
# @TaskGenerator
# def count_primes(ps):
# return sum(ps)
#
# @TaskGenerator
# def write_output(n):
# output = open('output.txt', 'wt')
# output.write("Found {0} primes <= 100.\n".format(n))
# output.close()
#
# primes100 = []
# for n in range(2,101):
# primes100.append(is_prime(n))
#
# n_primes = count_primes(primes100)
# write_output(n_primes) | deepakagrawal/PatientScheduling | prime.py | prime.py | py | 1,087 | python | en | code | 0 | github-code | 13 |
26384329910 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist # 我們 import 自己寫的 mnist.py(要放在同一個資料夾)
FLAGS = None
def placeholder_inputs(batch_size):
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl):
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size, FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
# 有幾個是預測對的
true_count += sess.run(eval_correct, feed_dict=feed_dict)
# where eval_correct is defined as mnist.evaluation(logits, labels_placeholder)
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
# e.g.
# Num examples: 55000 Num correct: 47973 Precision @ 1: 0.8722
def run_training():
data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(
FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images_placeholder,
FLAGS.hidden1,
FLAGS.hidden2) # 可參考 mnist.py 的實作
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder) # 可參考 mnist.py 的實作
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate) # 可參考 mnist.py 的實作
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder) # 可參考 mnist.py 的實作
summary = tf.summary.merge_all() # 用在 tensorboard
init = tf.global_variables_initializer()
# 會把 training 的 checkpoints 記下來
saver = tf.train.Saver()
sess = tf.Session()
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
sess.run(init)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder)
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step) # 寫到 log 內
summary_writer.flush()
# 這邊有教怎麼存 checkpoint, and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt') # 重要!
print( "checkpoint saved in : " + checkpoint_file )
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train) # 只有換 dataset 而已
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
def main(_): # 看過了
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=2000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--input_data_dir',
type=str,
default=os.path.join(os.getenv('TEST_TMPDIR', '../'),
'mnist/input_data'),
help='Directory to put the input data.'
)
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(os.getenv('TEST_TMPDIR', './tmp'),
'mnist/logs/fully_connected_feed'),
help='Directory to put the log data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | AdrianHsu/tensorflow-basic-models | mechanics101/fully_connected_feed.py | fully_connected_feed.py | py | 6,488 | python | en | code | 0 | github-code | 13 |
44276254198 | import random
home = input('Кто играет дома?: ')
visitor = input('Кто играет в гостях?: ')
result = []
for i in range(100):
preres = random.randint(0, 2)
result.append(preres)
print(result)
homeWin = result.count(1)
visitorWin = result.count(2)
draw = result.count(0)
total = [homeWin, visitorWin, draw]
print(total)
max = max(total)
print(max)
if homeWin == max:
print('Победит команда ' + home)
elif visitorWin == max:
print('Победит команда ' + visitor)
else:
print('Ничья')
| novikoph/sandbox | super.py | super.py | py | 580 | python | ru | code | 0 | github-code | 13 |
26149420283 | import torch
from torch.utils.data import Dataset
import torch.nn.functional as func
import os
from glob import glob
import h5py
import cv2
from tqdm import tqdm
import numpy as np
import random
import yaml
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
plt.style.use('seaborn-whitegrid')
# local modules
from dataloader.base_dataset import BaseDataset
from dataloader.encodings import *
from myutils.vis_events.visualization import *
class H5Dataset(BaseDataset):
def __init__(self, h5_file_path, config):
super().__init__()
self.config = config
self.h5_file_path = h5_file_path
self.set_data_scale()
self.load_metadata()
self.set_data_mode()
def set_data_scale(self):
self.h5_file = h5py.File(self.h5_file_path, 'r')
self.need_gt_events = self.config.get('need_gt_events', False)
self.real_world_test = self.config.get('real_world_test', False)
self.custom_resolution = self.config.get('custom_resolution', None)
self.dataset_length = self.config.get('dataset_length', None)
self.add_noise = self.config.get('add_noise', {'enabled': False})
self.sensor_resolution = self.h5_file.attrs['sensor_resolution'].tolist()
self.scale = self.config['scale']
self.ori_scale = self.config['ori_scale']
self.gt_sensor_resolution = None
self.gt_prex = None
if self.real_world_test:
if self.ori_scale == 'down8' and not self.need_gt_events:
self.inp_sensor_resolution = [round(i / 8) for i in self.sensor_resolution]
self.inp_down_sensor_resolution = [round(i / self.scale) for i in self.inp_sensor_resolution]
self.inp_prex = 'down8_real'
self.gt_prex = self.inp_prex
if self.scale == 2:
self.gt_sensor_resolution = [round(i / 4) for i in self.sensor_resolution]
elif self.scale == 4:
self.gt_sensor_resolution = [round(i / 2) for i in self.sensor_resolution]
elif self.scale == 8:
self.gt_sensor_resolution = self.sensor_resolution
else:
self.gt_sensor_resolution = self.sensor_resolution
else:
raise Exception(f'Error real world test!')
elif self.ori_scale == 'ori':
self.inp_sensor_resolution = self.sensor_resolution
self.inp_down_sensor_resolution = [round(i / self.scale) for i in self.inp_sensor_resolution]
self.inp_prex = 'ori'
if not self.need_gt_events:
self.gt_sensor_resolution = [round(i * self.scale) for i in self.inp_sensor_resolution]
self.gt_prex = self.inp_prex
elif self.scale == 1:
self.gt_sensor_resolution = self.sensor_resolution
self.gt_prex = 'ori'
else:
raise Exception(f'Error scale setting: scale {self.scale}, ori_scale {self.ori_scale}')
elif self.ori_scale == 'down2':
self.inp_sensor_resolution = [round(i / 2) for i in self.sensor_resolution]
self.inp_down_sensor_resolution = [round(i / self.scale) for i in self.inp_sensor_resolution]
self.inp_prex = 'down2'
if not self.need_gt_events:
self.gt_sensor_resolution = [round(i * self.scale) for i in self.inp_sensor_resolution]
self.gt_prex = self.inp_prex
elif self.scale == 2:
self.gt_sensor_resolution = self.sensor_resolution
self.gt_prex = 'ori'
else:
raise Exception(f'Error scale setting: scale {self.scale}, ori_scale {self.ori_scale}')
elif self.ori_scale == 'down4':
self.inp_sensor_resolution = [round(i / 4) for i in self.sensor_resolution]
self.inp_down_sensor_resolution = [round(i / self.scale) for i in self.inp_sensor_resolution]
self.inp_prex = 'down4'
if not self.need_gt_events:
self.gt_sensor_resolution = [round(i * self.scale) for i in self.inp_sensor_resolution]
self.gt_prex = self.inp_prex
elif self.scale == 2:
self.gt_sensor_resolution = [round(i / 2) for i in self.sensor_resolution]
self.gt_prex = 'down2'
elif self.scale == 4:
self.gt_sensor_resolution = self.sensor_resolution
self.gt_prex ='ori'
else:
raise Exception(f'Error scale setting: scale {self.scale}, ori_scale {self.ori_scale}')
elif self.ori_scale == 'down8':
self.inp_sensor_resolution = [round(i / 8) for i in self.sensor_resolution]
self.inp_down_sensor_resolution = [round(i / self.scale) for i in self.inp_sensor_resolution]
self.inp_prex = 'down8'
if not self.need_gt_events:
self.gt_sensor_resolution = [round(i * self.scale) for i in self.inp_sensor_resolution]
self.gt_prex = self.inp_prex
elif self.scale == 2:
self.gt_sensor_resolution = [round(i / 4) for i in self.sensor_resolution]
self.gt_prex = 'down4'
elif self.scale == 4:
self.gt_sensor_resolution = [round(i / 2) for i in self.sensor_resolution]
self.gt_prex ='down2'
elif self.scale == 8:
self.gt_sensor_resolution = self.sensor_resolution
self.gt_prex ='ori'
else:
raise Exception(f'Error scale setting: scale {self.scale}, ori_scale {self.ori_scale}')
elif self.ori_scale == 'down16':
self.inp_sensor_resolution = [round(i / 16) for i in self.sensor_resolution]
self.inp_down_sensor_resolution = [round(i / self.scale) for i in self.inp_sensor_resolution]
self.inp_prex = 'down16'
if not self.need_gt_events:
self.gt_sensor_resolution = [round(i * self.scale) for i in self.inp_sensor_resolution]
self.gt_prex = self.inp_prex
elif self.scale == 2:
self.gt_sensor_resolution = [round(i / 8) for i in self.sensor_resolution]
self.gt_prex = 'down8'
elif self.scale == 4:
self.gt_sensor_resolution = [round(i / 4) for i in self.sensor_resolution]
self.gt_prex ='down4'
elif self.scale == 8:
self.gt_sensor_resolution = [round(i / 2) for i in self.sensor_resolution]
self.gt_prex ='down2'
elif self.scale == 16:
self.gt_sensor_resolution = self.sensor_resolution
self.gt_prex ='ori'
else:
raise Exception(f'Error scale setting: scale {self.scale}, ori_scale {self.ori_scale}')
else:
raise Exception(f'Error scale setting: scale {self.scale}, ori_scale {self.ori_scale}')
def load_metadata(self):
self.time_bins = self.config['time_bins']
self.num_events = len(self.h5_file[f'{self.inp_prex}_events']['ts'][:])
self.num_gt_events = len(self.h5_file[f'{self.gt_prex}_events']['ts'][:]) if self.need_gt_events else None
self.t0 = self.h5_file[f'{self.inp_prex}_events']['ts'][0]
self.tk = self.h5_file[f'{self.inp_prex}_events']['ts'][-1]
self.duration = self.tk - self.t0
self.hot_events = torch.zeros(self.inp_sensor_resolution)
self.hot_idx = 0
self.need_gt_frame = self.config.get('need_gt_frame', False)
if self.need_gt_frame:
self.gt_frame_ts = []
for img_name in self.h5_file['ori_images']:
self.gt_frame_ts.append(self.h5_file['ori_images/{}'.format(img_name)].attrs['timestamp'])
def set_data_mode(self):
self.data_mode = self.config['mode']
self.window = self.config['window']
self.sliding_window = self.config['sliding_window']
if self.data_mode == 'events':
max_length = max(int(self.num_events / (self.window - self.sliding_window)), 0)
if self.dataset_length is not None:
self.length = self.dataset_length if self.dataset_length <= max_length else max_length
else:
self.length = max_length
self.event_indices, self.gt_event_indices = self.compute_k_indices()
elif self.data_mode == 'time':
max_length = max(int(self.duration / (self.window - self.sliding_window)), 0)
if self.dataset_length is not None:
self.length = self.dataset_length if self.dataset_length <= max_length else max_length
else:
self.length = max_length
self.event_indices, self.gt_event_indices = self.compute_timeblock_indices()
elif self.data_mode == 'frame':
max_length = len(self.h5_file['ori_images']) - 1
self.num_frames = len(self.h5_file['ori_images'])
if self.dataset_length is not None:
self.length = self.dataset_length if self.dataset_length <= max_length else max_length
else:
self.length = max_length
self.event_indices, self.gt_event_indices = self.compute_frame_indices()
else:
raise Exception("Invalid data mode chosen ({})".format(self.data_mode))
if self.length == 0:
raise Exception("Current voxel generation parameters lead to sequence length of zero")
def compute_k_indices(self):
"""
For each block of k events, find the start and
end indices of the corresponding events
"""
k_indices = []
gt_k_indices = []
for i in range(self.__len__()):
idx0 = (self.window - self.sliding_window) * i
idx1 = idx0 + self.window
if idx1 > self.num_events - 1:
idx1 = self.num_events - 1
if self.need_gt_events:
gt_idx0, gt_idx1 = self.get_gt_event_indices_num(idx0, idx1)
# gt_idx0, gt_idx1 = self.get_gt_event_indices_time(idx0, idx1)
gt_k_indices.append([gt_idx0, gt_idx1])
k_indices.append([idx0, idx1])
return k_indices, gt_k_indices
def compute_timeblock_indices(self):
"""
For each block of time (using t_events), find the start and
end indices of the corresponding events
"""
timeblock_indices = []
gt_timeblock_indices = []
start_idx = 0
for i in range(self.__len__()):
start_time = ((self.window - self.sliding_window) * i) + self.t0
end_time = start_time + self.window
end_idx = self.find_ts_index(end_time)
if self.need_gt_events:
gt_idx0, gt_idx1 = self.get_gt_event_indices_num(start_idx, end_idx)
# gt_idx0, gt_idx1 = self.get_gt_event_indices_time(start_idx, end_idx)
gt_timeblock_indices.append([gt_idx0, gt_idx1])
timeblock_indices.append([start_idx, end_idx])
start_idx = end_idx
return timeblock_indices, gt_timeblock_indices
def compute_frame_indices(self):
frame_indices = []
gt_frame_indices = []
start_idx = 0
for ts in self.gt_frame_ts[:self.length]:
end_idx = self.find_ts_index(ts)
if self.need_gt_events:
gt_idx0, gt_idx1 = self.get_gt_event_indices_num(start_idx, end_idx)
# gt_idx0, gt_idx1 = self.get_gt_event_indices_time(start_idx, end_idx)
gt_frame_indices.append([gt_idx0, gt_idx1])
frame_indices.append([start_idx, end_idx])
start_idx = end_idx
return frame_indices, gt_frame_indices
# for img_name in self.h5_file['ori_images']:
# end_idx = self.h5_file['ori_images/{}'.format(img_name)].attrs['event_idx']
# gt_idx0, gt_idx1 = self.get_gt_event_indices_num(start_idx, end_idx)
# # gt_idx0, gt_idx1 = self.get_gt_event_indices_time(start_idx, end_idx)
# frame_indices.append([start_idx, end_idx])
# gt_frame_indices.append([gt_idx0, gt_idx1])
# start_idx = end_idx
# return frame_indices, gt_frame_indices
def find_ts_index(self, timestamp):
idx = self.binary_search_h5_dset(self.h5_file[f'{self.inp_prex}_events/ts'][:], timestamp)
if idx > self.num_events - 1:
idx = self.num_events - 1
return idx
def __getitem__(self, index, Pause=False, seed=None):
if seed is None:
seed = random.randint(0, 2**32)
idx0, idx1 = self.get_event_indices(index)
if self.need_gt_events:
gt_idx0, gt_idx1 = self.get_gt_event_indices(index)
# gt_idx0, gt_idx1 = self.get_gt_event_indices_num(idx0, idx1)
# gt_idx0, gt_idx1 = self.get_gt_event_indices_time(idx0, idx1)
# events
inp_events = self.get_events(idx0, idx1)
if self.config['data_augment']['enabled']:
inp_events = self.augment_event(inp_events, self.inp_sensor_resolution, seed)
inp_events_torch = self.event_formatting(inp_events)
if self.need_gt_events:
gt_events = self.get_gt_events(gt_idx0, gt_idx1)
if self.config['data_augment']['enabled']:
gt_events = self.augment_event(gt_events, self.gt_sensor_resolution, seed)
gt_events_torch = self.event_formatting(gt_events)
else:
gt_events_torch = torch.zeros([4, 1])
# add noise
if self.add_noise['enabled']:
noise = self.add_noise_event(self.window, self.inp_sensor_resolution, seed,
noise_level=self.add_noise['noise_level'])
inp_events_torch = torch.cat([inp_events_torch, noise], dim=1)
# gt frame
gt_img = None
gt_img_torch = torch.zeros([1] + self.gt_sensor_resolution)
gt_img_inp_size_torch = torch.zeros([1] + self.inp_sensor_resolution)
if self.need_gt_frame:
gt_img = self.get_gt_frame(idx0, idx1)
if self.config['data_augment']['enabled']:
gt_img = self.augment_frame(gt_img, seed)
gt_img_torch = self.frame_formatting(cv2.resize(gt_img, dsize=self.gt_sensor_resolution[::-1], interpolation=cv2.INTER_CUBIC))
gt_img_inp_size_torch = self.frame_formatting(cv2.resize(gt_img, dsize=self.inp_sensor_resolution[::-1], interpolation=cv2.INTER_CUBIC))
frame = None
frame_torch = torch.zeros([1] + self.gt_sensor_resolution)
if self.data_mode == 'frame':
frame = self.get_frame(index)
if self.config['data_augment']['enabled']:
frame = self.augment_frame(frame, seed)
frame_torch = self.frame_formatting(cv2.resize(frame, dsize=self.gt_sensor_resolution[::-1], interpolation=cv2.INTER_CUBIC))
# Pause
if Pause:
inp_events_torch = torch.zeros([4, 1])
# add noise
# if self.add_noise['enabled']:
# inp_cnt_noise = self.add_noise_cnt(size=[2]+self.inp_sensor_resolution, seed=seed,
# noise_std=self.config['add_noise']['noise_std'], noise_fraction=self.config['add_noise']['noise_fraction'])
# gt_cnt_noise = self.add_noise_cnt(size=[2]+self.gt_sensor_resolution, seed=seed,
# noise_std=self.config['add_noise']['noise_std'], noise_fraction=self.config['add_noise']['noise_fraction'])
# inp_stack_noise = self.add_noise_stack(size=[int(self.time_bins)]+self.inp_sensor_resolution, seed=seed,
# noise_std=self.config['add_noise']['noise_std'], noise_fraction=self.config['add_noise']['noise_fraction'])
# gt_stack_noise = self.add_noise_stack(size=[int(self.time_bins)]+self.gt_sensor_resolution, seed=seed,
# noise_std=self.config['add_noise']['noise_std'], noise_fraction=self.config['add_noise']['noise_fraction'])
# else:
# inp_cnt_noise = torch.zeros([2]+self.inp_sensor_resolution)
# gt_cnt_noise = torch.zeros([2]+self.gt_sensor_resolution)
# inp_stack_noise = torch.zeros([int(self.time_bins)]+self.inp_sensor_resolution)
# gt_stack_noise = torch.zeros([int(self.time_bins)]+self.gt_sensor_resolution)
# convert events
# inp_event_voxel = self.create_voxel_encoding(inp_events_torch, self.inp_sensor_resolution)
inp_event_stack = self.create_stack_encoding(inp_events_torch, self.inp_sensor_resolution)# + inp_stack_noise
inp_event_cnt = self.create_cnt_encoding(inp_events_torch, self.inp_sensor_resolution)# + inp_cnt_noise
inp_bicubic_cnt = func.interpolate(inp_event_cnt.unsqueeze(0), size=self.gt_sensor_resolution, mode='bicubic', align_corners=False).squeeze(0)
inp_bicubic_stack = func.interpolate(inp_event_stack.unsqueeze(0), size=self.gt_sensor_resolution, mode='bicubic', align_corners=False).squeeze(0)
inp_near_cnt = func.interpolate(inp_event_cnt.unsqueeze(0), size=self.gt_sensor_resolution, mode='nearest').squeeze(0)
inp_near_stack = func.interpolate(inp_event_stack.unsqueeze(0), size=self.gt_sensor_resolution, mode='nearest').squeeze(0)
inp_normalized_events = self.create_normalized_events(inp_events_torch, self.inp_sensor_resolution)
# num_point = inp_normalized_events.size()[1]
# inp_scaled_normalized_events = inp_normalized_events.unsqueeze(2).repeat(1, 1, self.scale**2).view(-1, self.scale**2*num_point)
# inp_scaled_events = self.create_scaled_encoding(inp_normalized_events, self.gt_sensor_resolution, 'events')
inp_scaled_cnt = self.create_scaled_encoding(inp_normalized_events, self.gt_sensor_resolution, 'cnt')# + gt_cnt_noise
inp_scaled_stack = self.create_scaled_encoding(inp_normalized_events, self.gt_sensor_resolution, 'stack')# + gt_stack_noise
# inp_event_pol_mask = self.create_polarity_mask(inp_events_torch[-1])
inp_down_cnt, inp_down_scaled_cnt = self.create_unsupervised_data(inp_normalized_events)
gt_event_stack = self.create_stack_encoding(gt_events_torch, self.gt_sensor_resolution)
gt_event_cnt = self.create_cnt_encoding(gt_events_torch, self.gt_sensor_resolution)
# gt_normalized_events = self.create_normalized_events(gt_events_torch, self.gt_sensor_resolution)
if self.custom_resolution is not None:
inp_custom_cnt, inp_custom_scaled_cnt, inp_custom_down_cnt, inp_custom_down_scaled_cnt, gt_custom_cnt \
= self.create_custom_data(inp_event_cnt, inp_scaled_cnt, inp_down_cnt, inp_down_scaled_cnt, gt_event_cnt)
# = self.create_custom_data(inp_events_torch, gt_events_torch)
else:
inp_custom_cnt, inp_custom_scaled_cnt, inp_custom_down_cnt, inp_custom_down_scaled_cnt, gt_custom_cnt \
= [torch.zeros_like(inp_event_cnt) for _ in range(5)]
# hot pixel removal
# if self.config["hot_filter"]["enabled"]:
# hot_mask = self.create_hot_mask(inp_events_torch, self.inp_sensor_resolution)
# hot_mask_voxel = torch.stack([hot_mask] * self.time_bins, axis=0)
# hot_mask_cnt = torch.stack([hot_mask] * 2, axis=0)
# inp_event_voxel = inp_event_voxel * hot_mask_voxel
# inp_event_cnt = inp_event_cnt * hot_mask_cnt
item = {
# 'inp_voxel': inp_event_voxel, # TBxHxW
'inp_stack': inp_event_stack, # TBxHxW
'inp_cnt': inp_event_cnt, # 2xHxW, 0 for positive, 1 for negtive
'inp_bicubic_cnt': inp_bicubic_cnt,
'inp_bicubic_stack': inp_bicubic_stack,
'inp_near_cnt': inp_near_cnt,
'inp_near_stack': inp_near_stack,
# 'inp_events': inp_events_torch.transpose(0, 1), # Nx4: x, y, t, p
# 'inp_normalized_events': inp_normalized_events.transpose(0, 1), # Nx4: x, y, t, p
# 'inp_scaled_normalized_events': inp_scaled_normalized_events.transpose(0, 1), # rNx4: x, y, t, p
# 'inp_scaled_events': inp_scaled_events.transpose(0, 1), # rNx4: x, y, t, p
'inp_scaled_cnt': inp_scaled_cnt, # 2xkHxkW, 0 for positive, 1 for negtive
'inp_scaled_stack': inp_scaled_stack, # TBxkHxkW
'inp_down_cnt': inp_down_cnt, # 2xH/kxW/k, 0 for positive, 1 for negtive
'inp_down_scaled_cnt': inp_down_scaled_cnt, # 2xHxW, 0 for positive, 1 for negtive
# 'inp_pol_mask': inp_event_pol_mask, # Nx2, 0 for positve mask, 1 for negative mask
'inp_custom_cnt': inp_custom_cnt,
'inp_custom_scaled_cnt': inp_custom_scaled_cnt,
'inp_custom_down_cnt': inp_custom_down_cnt,
'inp_custom_down_scaled_cnt': inp_custom_down_scaled_cnt,
'gt_custom_cnt': gt_custom_cnt,
'gt_stack': gt_event_stack, # TBxkHxkW
# 'gt_events': gt_events_torch.transpose(0, 1), # rNx4: x, y, t, p
'gt_cnt': gt_event_cnt, # 2xkHxkW, 0 for positive, 1 for negtive
# 'gt_normalized_events': gt_normalized_events.transpose(0, 1), # rNx4: x, y, t, p
'gt_img': gt_img_torch, # 1xkHxkW, 0~1
'gt_inp_size_img': gt_img_inp_size_torch, # 1xHxW, 0~1
'frame': frame_torch # 1xkHxkW, 0~1
}
return item
def __len__(self):
return self.length
def get_event_indices(self, index):
"""
Get start and end indices of events at index
"""
idx0, idx1 = self.event_indices[index]
if not (idx0 >= 0 and idx1 < self.num_events):
raise Exception("WARNING: Event indices {},{} out of bounds 0,{}".format(idx0, idx1, self.num_events))
return idx0, idx1
def get_gt_event_indices(self, index):
"""
Get start and end indices of gt events at index
"""
gt_idx0, gt_idx1 = self.gt_event_indices[index]
if not (gt_idx0 >= 0 and gt_idx1 < self.num_gt_events):
raise Exception("WARNING: Gt event indices {},{} out of bounds 0,{}".format(gt_idx0, gt_idx1, self.num_gt_events))
return gt_idx0, gt_idx1
def get_gt_event_indices_time(self, idx0, idx1):
"""
Get start and end indices of gt events using idx0 and idx1 of input events based on time.
"""
gt_t0, gt_tk = self.h5_file[f'{self.inp_prex}_events/ts'][idx0], self.h5_file[f'{self.inp_prex}_events/ts'][idx1]
gt_idx0 = self.binary_search_h5_dset(self.h5_file[f'{self.gt_prex}_events/ts'][:], gt_t0)
gt_idx1 = self.binary_search_h5_dset(self.h5_file[f'{self.gt_prex}_events/ts'][:], gt_tk)
if gt_idx0 < 0:
gt_idx0 = 0
if gt_idx1 > self.num_gt_events - 1:
gt_idx1 = self.num_gt_events -1
if not (gt_idx0 >= 0 and gt_idx1 < self.num_gt_events):
raise Exception("WARNING: GT event indices {},{} out of bounds 0,{}".format(gt_idx0, gt_idx1, self.num_gt_events))
return gt_idx0, gt_idx1
def get_gt_event_indices_num(self, idx0, idx1):
"""
Get start and end indices of gt events using idx0 and idx1 of input events based on numbers.
"""
# assert self.data_mode == 'events', f'Data mode {self.data_mode} is invalid for getting GT events based on numbers, \
# please set "mode" in the config file to "events"!'
num_events = idx1 - idx0
num_gt_events = self.scale**2 * num_events
gt_t0 = self.h5_file[f'{self.inp_prex}_events/ts'][idx0]
gt_idx0 = self.binary_search_h5_dset(self.h5_file[f'{self.gt_prex}_events/ts'][:], gt_t0)
gt_idx1 = gt_idx0 + num_gt_events
if gt_idx0 < 0:
gt_idx0 = 0
gt_idx1 = gt_idx0 + num_gt_events
if gt_idx1 > self.num_gt_events - 1:
gt_idx1 = self.num_gt_events -1
gt_idx0 = gt_idx1 - num_gt_events
if not (gt_idx0 >= 0 and gt_idx1 < self.num_gt_events):
raise Exception("WARNING: GT event indices {},{} out of bounds 0,{}".format(gt_idx0, gt_idx1, self.num_gt_events))
return gt_idx0, gt_idx1
def get_gt_frame(self, event_idx0, event_idx1):
ref_idx = int((event_idx0 + event_idx1) // 2)
event_ts = self.h5_file[f'{self.inp_prex}_events/ts'][ref_idx]
gt_img_idx = self.binary_search_h5_dset(self.gt_frame_ts, event_ts)
if gt_img_idx >= len(self.gt_frame_ts):
gt_img_idx = len(self.gt_frame_ts) - 1
if gt_img_idx < 0:
gt_img_idx = 0
return self.h5_file['ori_images/image{:09d}'.format(gt_img_idx)][:]
def get_frame(self, index):
return self.h5_file['ori_images']['image{:09d}'.format(index)][:]
def get_events(self, idx0, idx1):
xs = self.h5_file[f'{self.inp_prex}_events/xs'][idx0:idx1]
ys = self.h5_file[f'{self.inp_prex}_events/ys'][idx0:idx1]
ts = self.h5_file[f'{self.inp_prex}_events/ts'][idx0:idx1]
ps = self.h5_file[f'{self.inp_prex}_events/ps'][idx0:idx1]
return np.concatenate((xs[np.newaxis, ...], ys[np.newaxis, ...], ts[np.newaxis, ...], ps[np.newaxis, ...]), axis=0)
def get_gt_events(self, idx0, idx1):
xs = self.h5_file[f'{self.gt_prex}_events/xs'][idx0:idx1]
ys = self.h5_file[f'{self.gt_prex}_events/ys'][idx0:idx1]
ts = self.h5_file[f'{self.gt_prex}_events/ts'][idx0:idx1]
ps = self.h5_file[f'{self.gt_prex}_events/ps'][idx0:idx1]
return np.concatenate((xs[np.newaxis, ...], ys[np.newaxis, ...], ts[np.newaxis, ...], ps[np.newaxis, ...]), axis=0)
def create_normalized_events(self, events, sensor_resolution):
"""
events: torch.tensor, 4xN [x, y, t, p]
return: normalized events: torch.tensor, 4xN [x, y, t, p]
"""
xs, ys, ts, ps = events[0], events[1], events[2], events[3]
xs, ys = xs / sensor_resolution[1], ys / sensor_resolution[0]
norm_events = torch.stack([xs, ys, ts, ps]).float()
return norm_events
def create_scaled_encoding(self, normalized_events, sensor_resolution, mode):
"""
normalized events: torch.tensor, 4xN [x, y, t, p]
return: scaled data
"""
xs, ys, ts, ps = normalized_events[0], normalized_events[1], normalized_events[2], normalized_events[3]
if mode == 'cnt':
scaled_data = events_to_channels(xs*sensor_resolution[1], ys*sensor_resolution[0], ps, sensor_size=sensor_resolution)
elif mode == 'stack':
scaled_data = events_to_stack_no_polarity(xs*sensor_resolution[1], ys*sensor_resolution[0], ts, ps, B=self.time_bins, sensor_size=sensor_resolution)
elif mode == 'events':
scaled_data = torch.stack([(xs*sensor_resolution[1]).long(), (ys*sensor_resolution[0]).long(), ts, ps], dim=0)
else:
raise Exception(f'mode: {mode} is NOT supported!')
return scaled_data
def create_unsupervised_data(self, normalized_events):
"""
normalized events: torch.tensor, 4xN [x, y, t, p]
return: scaled data
"""
xs, ys, ts, ps = normalized_events[0], normalized_events[1], normalized_events[2], normalized_events[3]
inp_down_events = torch.stack([(xs*self.inp_down_sensor_resolution[1]).long(), (ys*self.inp_down_sensor_resolution[0]).long(), ts, ps], dim=0)
inp_down_normalized_events = self.create_normalized_events(inp_down_events, self.inp_down_sensor_resolution)
inp_down_cnt = self.create_scaled_encoding(inp_down_normalized_events, self.inp_down_sensor_resolution, mode='cnt') // self.scale**2
inp_down_scaled_cnt = self.create_scaled_encoding(inp_down_normalized_events, self.inp_sensor_resolution, mode='cnt') // self.scale**2
return inp_down_cnt, inp_down_scaled_cnt
# def create_custom_data(self, inp_events_torch, gt_events_torch):
# """
# inp_events_torch: torch.tensor, 4xN [x, y, t, p]
# gt_events_torch: torch.tensor, 4xN [x, y, t, p]
# return: inp_custom_cnt, gt_custom_cnt
# """
# custom_factor = (float(self.inp_sensor_resolution[0]) / self.custom_resolution[0]) * (float(self.inp_sensor_resolution[1]) / self.custom_resolution[1])
# # inp
# inp_normalized_events = self.create_normalized_events(inp_events_torch, self.inp_sensor_resolution)
# inp_custom_events = self.create_scaled_encoding(inp_normalized_events, self.custom_resolution, 'events')
# inp_custom_normalized_events = self.create_normalized_events(inp_custom_events, self.custom_resolution)
# inp_custom_cnt = self.create_scaled_encoding(inp_custom_normalized_events, self.custom_resolution, 'cnt') // custom_factor
# inp_custom_scaled_cnt = self.create_scaled_encoding(inp_custom_normalized_events, [i * self.scale for i in self.custom_resolution], 'cnt') // custom_factor
# # inp down
# down_size = [round(i / self.scale) for i in self.custom_resolution]
# inp_custom_down_events = self.create_scaled_encoding(inp_custom_normalized_events, down_size, 'events')
# inp_custom_down_normalized_events = self.create_normalized_events(inp_custom_down_events, down_size)
# inp_custom_down_cnt = self.create_scaled_encoding(inp_custom_down_normalized_events, down_size, 'cnt') // custom_factor // self.scale**2
# inp_custom_down_scaled_cnt = self.create_scaled_encoding(inp_custom_down_normalized_events, self.custom_resolution, 'cnt') // custom_factor // self.scale**2
# # gt
# gt_normalized_events = self.create_normalized_events(gt_events_torch, self.gt_sensor_resolution)
# gt_custom_cnt = self.create_scaled_encoding(gt_normalized_events, [i * self.scale for i in self.custom_resolution], 'cnt') // custom_factor
# return inp_custom_cnt, inp_custom_scaled_cnt, inp_custom_down_cnt, inp_custom_down_scaled_cnt, gt_custom_cnt
def create_custom_data(self, inp_cnt, inp_scaled_cnt, inp_down_cnt, inp_down_scaled_cnt, gt_cnt):
inp_custom_cnt = func.interpolate(inp_cnt.unsqueeze(0), size=self.custom_resolution, mode='bicubic', align_corners=False).squeeze(0)
inp_custom_scaled_cnt = func.interpolate(inp_scaled_cnt.unsqueeze(0), size=[i * self.scale for i in self.custom_resolution], mode='bicubic', align_corners=False).squeeze(0)
inp_custom_down_cnt = func.interpolate(inp_down_cnt.unsqueeze(0), size=[round(i / self.scale) for i in self.custom_resolution], mode='bicubic', align_corners=False).squeeze(0)
inp_custom_down_scaled_cnt = func.interpolate(inp_down_scaled_cnt.unsqueeze(0), size=self.custom_resolution, mode='bicubic', align_corners=False).squeeze(0)
gt_custom_cnt = func.interpolate(gt_cnt.unsqueeze(0), size=[i * self.scale for i in self.custom_resolution], mode='bicubic', align_corners=False).squeeze(0)
return inp_custom_cnt.round(), inp_custom_scaled_cnt.round(), inp_custom_down_cnt.round(), inp_custom_down_scaled_cnt.round(), gt_custom_cnt.round()
def create_voxel_encoding(self, events, sensor_resolution):
"""
events: torch.tensor, 4xN [x, y, t, p]
return: voxel: torch.tensor, B x H x W
"""
xs, ys, ts, ps = events[0], events[1], events[2], events[3]
# return events_to_voxel_torch(xs, ys, ts, ps, B=self.time_bins, sensor_size=sensor_resolution)
return events_to_voxel(xs, ys, ts, ps, num_bins=self.time_bins, sensor_size=sensor_resolution)
def create_stack_encoding(self, events, sensor_resolution):
"""
events: torch.tensor, 4xN [x, y, t, p]
return: stack: torch.tensor, B x H x W
"""
xs, ys, ts, ps = events[0], events[1], events[2], events[3]
return events_to_stack_no_polarity(xs, ys, ts, ps, B=self.time_bins, sensor_size=sensor_resolution)
def create_cnt_encoding(self, events, sensor_resolution):
"""
events: torch.tensor, 4xN [x, y, t, p]
return: count: torch.tensor, 2 x H x W
"""
xs, ys, ts, ps = events[0], events[1], events[2], events[3]
return events_to_channels(xs, ys, ps, sensor_size=sensor_resolution)
def create_hot_mask(self, events, sensor_resolution):
"""
Creates a one channel tensor that can act as mask to remove pixel with high event rate.
events: torch.tensor, 4xN [x, y, t, p]
return: [H x W] binary mask
"""
xs, ys, ts, ps = events[0], events[1], events[2], events[3]
hot_update = events_to_mask(xs, ys, ps, sensor_size=sensor_resolution)
self.hot_events += hot_update
self.hot_idx += 1
event_rate = self.hot_events / self.hot_idx
return get_hot_event_mask(
event_rate,
self.hot_idx,
max_px=self.config["hot_filter"]["max_px"],
min_obvs=self.config["hot_filter"]["min_obvs"],
max_rate=self.config["hot_filter"]["max_rate"],
)
@staticmethod
def create_polarity_mask(ps):
"""
Creates a two channel tensor that acts as a mask for the input event list.
:param ps: [N] tensor with event polarity ([-1, 1])
:return [N x 2] event representation
"""
return events_polarity_mask(ps)
def augment_event(self, events, sensor_resolution, seed):
xs, ys, ts, ps = events[0], events[1], events[2], events[3]
seed_H, seed_W, seed_P = seed, seed + 1, seed + 2
for i, mechanism in enumerate(self.config['data_augment']['augment']):
if mechanism == 'Horizontal':
random.seed(seed_H)
if random.random() < self.config['data_augment']['augment_prob'][i]:
xs = sensor_resolution[1] - 1 - xs
elif mechanism == 'Vertical':
random.seed(seed_W)
if random.random() < self.config['data_augment']['augment_prob'][i]:
ys = sensor_resolution[0] - 1 - ys
elif mechanism == 'Polarity':
random.seed(seed_P)
if random.random() < self.config['data_augment']['augment_prob'][i]:
ps = ps * -1
return np.concatenate((xs[np.newaxis, ...], ys[np.newaxis, ...], ts[np.newaxis, ...], ps[np.newaxis, ...]), axis=0)
def augment_frame(self, img, seed):
seed_H, seed_W = seed, seed + 1
for i, mechanism in enumerate(self.config['data_augment']['augment']):
if mechanism == 'Horizontal':
random.seed(seed_H)
if random.random() < self.config['data_augment']['augment_prob'][i]:
img = np.flip(img, 1)
elif mechanism == 'Vertical':
random.seed(seed_W)
if random.random() < self.config['data_augment']['augment_prob'][i]:
img = np.flip(img, 0)
return img
@staticmethod
def add_hot_pixels_to_voxel(voxel, hot_pixel_std=1.0, hot_pixel_fraction=0.001):
num_hot_pixels = int(hot_pixel_fraction * voxel.shape[-1] * voxel.shape[-2])
x = torch.randint(0, voxel.shape[-1], (num_hot_pixels,))
y = torch.randint(0, voxel.shape[-2], (num_hot_pixels,))
for i in range(num_hot_pixels):
voxel[..., :, y[i], x[i]] += random.gauss(0, hot_pixel_std)
@staticmethod
def add_noise_cnt(size, seed, noise_std=1.0, noise_fraction=0.1):
torch.manual_seed(seed)
noise = torch.abs(noise_std * torch.randn(size)) # mean = 0, std = noise_std
if noise_fraction < 1.0:
mask = torch.rand(size) >= noise_fraction
noise.masked_fill_(mask, 0)
return noise
@staticmethod
def add_noise_stack(size, seed, noise_std=1.0, noise_fraction=0.1):
torch.manual_seed(seed)
noise = noise_std * torch.randn(size) # mean = 0, std = noise_std
if noise_fraction < 1.0:
mask = torch.rand(size) >= noise_fraction
noise.masked_fill_(mask, 0)
return noise
@staticmethod
def add_noise_event(window, sensor_size, seed, noise_level=0.01):
torch.manual_seed(seed)
noise_num = int(window * noise_level) #* (1 + noise_std * torch.randn(1).abs().max().item())
noise_tmp = torch.rand([4, noise_num])
x = (noise_tmp[[0], :] * sensor_size[1]).int()
y = (noise_tmp[[1], :] * sensor_size[0]).int()
t = torch.ones_like(y)
p = (noise_tmp[[3], :] * 2).int() * 2 - 1
noise = torch.cat([x, y, t, p], dim=0)
return noise
class SequenceDataset(Dataset):
def __init__(self, h5_file_path, config):
super().__init__()
self.config = config
self.L = config['sequence']['sequence_length']
step_size = config['sequence']['step_size']
self.step_size = step_size if step_size is not None else self.L
self.proba_pause_when_running = config['sequence']['pause']['proba_pause_when_running']
self.proba_pause_when_paused = config['sequence']['pause']['proba_pause_when_paused']
assert(self.L > 0)
assert(self.step_size > 0)
self.dataset = H5Dataset(h5_file_path, config)
if self.L >= self.dataset.length:
print(f'Set sequence: {h5_file_path} length {self.L} is bigger than the max length of dataset {self.dataset.length}')
self.length = 1
self.L = self.dataset.length
else:
self.length = (self.dataset.length - self.L) // self.step_size + 1
self.gt_sensor_resolution = self.dataset.gt_sensor_resolution
self.inp_sensor_resolution = self.dataset.inp_sensor_resolution
def __len__(self):
return self.length
def __getitem__(self, i):
assert(i >= 0)
assert(i < self.length)
seed = random.randint(0, 2**32)
sequence = []
k = 0
j = i * self.step_size
item = self.dataset.__getitem__(j, seed=seed)
sequence.append(item)
paused = False
for n in range(self.L - 1):
if self.config['sequence']['pause']['enabled']:
u = random.random()
if paused:
probability_pause = self.proba_pause_when_paused
else:
probability_pause = self.proba_pause_when_running
paused = (u < probability_pause)
if paused:
# add a tensor filled with zeros, paired with the last item
# do not increase the counter
item = self.dataset.__getitem__(j + k, Pause=True, seed=seed)
sequence.append(item)
else:
# normal case: append the next item to the list
k += 1
item = self.dataset.__getitem__(j + k, seed=seed)
sequence.append(item)
return sequence
| WarranWeng/ESR | dataloader/h5dataset.py | h5dataset.py | py | 39,452 | python | en | code | 0 | github-code | 13 |
25033584074 | # Turimas "users" masyvas.
# Parašykite funkcijas, kurios atlikas nurodytas užduotis:
# 1. funkcija "filter_dog_owners" - kaip argumentą priims masyvą ir duoto masyvo
# atveju grąžins "users", kurie turi augintinį.
# 2. funkcija "filter_adults" - kaip argumentą priims masyvą ir duoto masyvo
# atveju grąžins masyvą su "users", kurie yra pilnamečiai.
users = [
{"id": '1', "name": 'John Smith', "age": 20, "hasDog": True},
{"id": '2', "name": 'Ann Smith', "age": 24, "hasDog": False},
{"id": '3', "name": 'Tom Jones', "age": 31, "hasDog": True},
{"id": '4', "name": 'Rose Peterson', "age": 17, "hasDog": False},
{"id": '5', "name": 'Alex John', "age": 25, "hasDog": True},
{"id": '6', "name": 'Ronald Jones', "age": 63, "hasDog": True},
{"id": '7', "name": 'Elton Smith', "age": 16, "hasDog": True},
{"id": '8', "name": 'Simon Peterson', "age": 30, "hasDog": False},
{"id": '9', "name": 'Daniel Cane', "age": 51, "hasDog": True},
]
def filter_dog_owners(x):
print(f"'users', kurie turi augintinį, sąrašas:")
users_has_dog_list = []
users_filtered = list(filter(lambda d: d['hasDog'] is True, x))
for user in users_filtered:
users_has_dog_list.append(user['name'])
print(user['name'])
print(f"'users', kurie turi augintinį, sąrašas masyve: {users_has_dog_list}")
def filter_adults(x):
print(f"'Suaugusiųjų 'users' sąrašas:")
users_adult = []
users_filtered = list(filter(lambda d: d['age'] >= 18, x))
for user in users_filtered:
users_adult.append(user['name'])
print(user['name'])
print(f"Suaugę 'users', pateikiami masyve: {users_adult}")
filter_dog_owners(users)
filter_adults(users)
| TomasSm1978/Python-first-test_2022.06.16 | test1.py | test1.py | py | 1,726 | python | lt | code | 0 | github-code | 13 |
33164103691 | import numpy as np
def read_input(in_file):
new_list = []
with open(in_file, 'r') as f:
for line in f.readlines():
new_list.append([int(char) for char in line.rstrip()])
return np.array(new_list)
def update_array(arr):
arr += 1
bloom_tuple = []
while np.max(arr) > 9:
bloom = np.where(arr > 9)
for coord in list(zip(bloom[0], bloom[1])):
if coord not in bloom_tuple:
bloom_tuple.append(coord)
x_dim, y_dim = coord[0], coord[1]
left = max(0,x_dim-1)
right = max(0,x_dim+1+1)
bottom = max(0,y_dim-1)
top = max(0, y_dim+1+1)
arr[left:right,bottom:top] += 1
for coord in bloom_tuple:
arr[coord[0], coord[1]] = 0
triggers = len(bloom_tuple)
print(triggers)
return arr, triggers
counter = 0
for i in range(100):
arr, tot = update_array(arr)
counter += tot
step, tot = 0, 0
while tot < arr.shape[0]*arr.shape[1]:
step += 1
arr, tot = update_array(arr)
| cbalusekslalom/advent_of_code | 2021/Day11/2021_day11.py | 2021_day11.py | py | 1,060 | python | en | code | 0 | github-code | 13 |
14292497275 | import inspect
import functools
import py
import sys
from _pytest.compat import NOTSET, getlocation, exc_clear
from _pytest.fixtures import FixtureDef, FixtureRequest, scopes, SubRequest
from pytest import fail
class YieldFixtureDef(FixtureDef):
@staticmethod
def finish(self, request):
exceptions = []
try:
_finalizers = getattr(self, '_finalizers_per_item', {}).get(
request.node, self._finalizers)
while _finalizers:
try:
func = _finalizers.pop()
func()
except: # noqa
exceptions.append(sys.exc_info())
if exceptions:
e = exceptions[0]
del exceptions # ensure we don't keep all frames alive because of the traceback
py.builtin._reraise(*e)
finally:
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
# even if finalization fails, we invalidate
# the cached fixture value and remove
# all finalizers because they may be bound methods which will
# keep instances alive
if hasattr(self, "cached_result"):
del self.cached_result
del _finalizers[:]
@staticmethod
def addfinalizer(self, finalizer, colitem=None):
if colitem:
if not hasattr(self, '_finalizers_per_item'):
self._finalizers_per_item = {}
self._finalizers_per_item.setdefault(colitem, []).append(finalizer)
else:
self._finalizers.append(finalizer)
@staticmethod
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
fixturedef.addfinalizer(
functools.partial(self.finish, request=request), colitem=request.node)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish(request)
assert not hasattr(self, "cached_result")
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
return hook.pytest_fixture_setup(fixturedef=self, request=request)
class CachedResultStore(object):
def cached_store_for_function(self):
return self
def cached_store_for_class(self):
return self.node.cls
def cached_store_for_module(self):
return self.node.module
def cached_store_for_session(self):
return self.node.session
def _compute_fixture_value(self, fixturedef):
"""
Creates a SubRequest based on "self" and calls the execute method of the given
fixturedef object. This will force the FixtureDef object to throw away any previous results
and compute a new fixture value, which will be stored into the FixtureDef object itself.
:param FixtureDef fixturedef:
"""
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
if fixturedef.params is not None:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = frameinfo.filename
source_lineno = frameinfo.lineno
source_path = py.path.local(source_path)
if source_path.relto(funcitem.config.rootdir):
source_path = source_path.relto(funcitem.config.rootdir)
msg = (
"The requested fixture has no parameter defined for the "
"current test.\n\nRequested fixture '{0}' defined in:\n{1}"
"\n\nRequested here:\n{2}:{3}".format(
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path,
source_lineno,
)
)
fail(msg)
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = YieldSubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
cache_store = getattr(
self, 'cached_store_for_%s' % scope, lambda: None)()
if cache_store and not hasattr(cache_store, '_fixturedef_cached_results'):
cache_store._fixturedef_cached_results = dict()
if hasattr(fixturedef, 'cached_result'):
fixturedef_cached_result = cache_store._fixturedef_cached_results.get(argname)
if fixturedef_cached_result:
fixturedef.cached_result = fixturedef_cached_result
else:
del fixturedef.cached_result
fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(
functools.partial(
fixturedef.finish, request=subrequest),
subrequest.node)
cached_result = getattr(fixturedef, 'cached_result', None)
if cache_store and cached_result:
cache_store._fixturedef_cached_results[argname] = cached_result
class YieldSubRequest(CachedResultStore, SubRequest):
def __init__(self, *args, **kwargs):
super(YieldSubRequest, self).__init__(*args, **kwargs)
self._fixturedef_finalizers = []
def addfinalizer(self, finalizer):
self._fixturedef_finalizers.append(finalizer)
class YieldFixtureRequest(CachedResultStore, FixtureRequest):
pass
| devova/pytest-yield | pytest_yield/fixtures.py | fixtures.py | py | 7,313 | python | en | code | 15 | github-code | 13 |
72555705938 | #Tomb Raider: Definitive Edition [Orbis] - ".trdemesh" Loader
#By Gh0stblade
#v1.3
#Special thanks: Chrrox
#Options: These are bools that enable/disable certain features! They are global and affect ALL platforms!
#Var Effect
#Misc
#Mesh Global
fDefaultMeshScale = 1.0 #Override mesh scale (default is 1.0)
bOptimizeMesh = 0 #Enable optimization (remove duplicate vertices, optimize lists for drawing) (1 = on, 0 = off)
bMaterialsEnabled = 1 #Materials (1 = on, 0 = off)
bRenderAsPoints = 0 #Render mesh as points without triangles drawn (1 = on, 0 = off)
#Vertex Components
bNORMsEnabled = 1 #Normals (1 = on, 0 = off)
bUVsEnabled = 1 #UVs (1 = on, 0 = off)
bCOLsEnabled = 0 #Vertex colours (1 = on, 0 = off)
bSkinningEnabled = 1 #Enable skin weights (1 = on, 0 = off)
#Gh0stBlade ONLY
debug = 0 #Prints debug info (1 = on, 0 = off)
from inc_noesis import *
import math
def registerNoesisTypes():
handle = noesis.register("Tomb Raider: Definitive Edition [PS4]", ".trdemesh")
noesis.setHandlerTypeCheck(handle, meshCheckType)
noesis.setHandlerLoadModel(handle, meshLoadModel)
handle = noesis.register("Tomb Raider: Definitive Edition [PS4]", ".pcd")
noesis.setHandlerTypeCheck(handle, ps4tCheckType)
noesis.setHandlerLoadRGBA(handle, ps4tLoadDDS)
noesis.logPopup()
return 1
def meshCheckType(data):
bs = NoeBitStream(data)
uiMagic = bs.readUInt()
if uiMagic == 0x6873654D:
return 1
else:
print("Fatal Error: Unknown file magic: " + str(hex(uiMagic) + " expected 'hsem'!"))
return 0
def ps4tCheckType(data):
bs = NoeBitStream(data)
uiMagic = bs.readUInt()
if uiMagic == 0x54345350:
return 1
else:
print("Fatal Error: Unknown file magic: " + str(hex(uiMagic) + " expected PS4T!"))
return 0
def ps4tLoadDDS(data, texList):
bs = NoeBitStream(data)
isBlockCompressed = False
isReOrdered = False
isTiled = False
dataOfs = 0
bitsPerPixel = 8
texFmt = noesis.NOESISTEX_RGBA32
magic = bs.readUInt()
textureDataSize = bs.readUInt()
uiPcdUnk00 = bs.readUInt()
textureType = bs.readUByte()
bs.seek(3, NOESEEK_REL)
uiPcdWidth = bs.readUInt()
uiPcdHeight = bs.readUInt()
uiPcdFlags = bs.readUInt()
uiPcdUnk01 = bs.readUInt()
bPcdData = bs.readBytes(textureDataSize)
print(str(bs.getOffset()))
if textureType == 0x23:
isBlockCompressed = True
isTiled = True
bitsPerPixel = 4
dataOfs = 32
decode = noesis.FOURCC_DXT1
elif textureType == 0x25:
isBlockCompressed = True
isTiled = True
bitsPerPixel = 8
dataOfs = 32
decode = noesis.NOESISTEX_DXT5
else:
print("Fatal Error: Unsupported texture type: " + str(textureType))
if isTiled is True:
w, h = uiPcdWidth, uiPcdHeight
tileW = 32 if isBlockCompressed is True else 8
tileH = 32 if isBlockCompressed is True else 8
w = ((w+(tileW-1)) & ~(tileW-1))
h = ((h+(tileH-1)) & ~(tileH-1))
#organized into tiled rows of morton-ordered blocks
rowSize = (w*tileH*bitsPerPixel) // 8
reorderedImageData = bytearray()
for y in range(0, h//tileH):
if isBlockCompressed is True:
decodedRow = rapi.imageFromMortonOrder(data[dataOfs:dataOfs+rowSize], w>>2, tileH>>2, bitsPerPixel*2)
else:
decodedRow = rapi.imageFromMortonOrder(data[dataOfs:dataOfs+rowSize], w, tileH, bitsPerPixel//8)
dataOfs += rowSize
reorderedImageData += decodedRow
bPcdData = reorderedImageData
bPcdData = rapi.imageDecodeDXT(bPcdData, uiPcdWidth, uiPcdHeight, decode)
if isReOrdered is True:
bPcdData = rapi.imageDecodeRaw(bPcdData, uiPcdWidth, uiPcdHeight, "p8r8g8b8")
tex1 = NoeTexture(str(1), uiPcdWidth, uiPcdHeight, bPcdData, texFmt)
texList.append(tex1)
#if gPcdFmt != None:
# texList.append(NoeTexture("Texture", int(uiPcdWidth), int(uiPcdHeight), bPcdData, gPcdFmt))
return 1
class meshFile(object):
def __init__(self, data):
self.inFile = NoeBitStream(data)
self.boneList = []
self.matNames = []
self.matList = []
self.texList = []
self.numMats = 0
self.offsetBoneInfo = -1
self.offsetBoneInfo2 = -1
self.offsetMeshStart = 0
self.offsetMatInfo = -1
self.offsetStart = 0
self.meshGroupIdx = 0
def loadHeader(self):
bs = self.inFile
numOffsets = bs.readInt()
bs.seek(0x10, NOESEEK_ABS)
numOffsets2 = bs.readInt()
bs.seek(0x18, NOESEEK_ABS)
self.offsetMeshStart = bs.readInt()
bs.seek(0x28, NOESEEK_ABS)
self.offsetMatInfo = bs.readInt()
bs.seek(((numOffsets * 0x8) + 0x4), NOESEEK_ABS)
self.offsetBoneInfo = bs.readInt()
self.offsetBoneInfo2 = bs.readInt()
bs.seek(((0x14 + numOffsets * 0x8) + numOffsets2 * 0x4), NOESEEK_ABS)
self.offsetStart = bs.getOffset()
def loadMeshFile(self):
bs = self.inFile
bs.seek(self.offsetStart + self.offsetMeshStart, NOESEEK_ABS)
uiMagic = bs.readUInt()
uiUnk00 = bs.readUInt()
uiMeshFileSize = bs.readUInt()
uiUnk01 = bs.readUInt()
bs.seek(0x60, NOESEEK_REL)#AABB MIN/MAX?
uiUnk02 = bs.readUInt()
bs.seek(4, NOESEEK_REL)#64bit
uiOffsetMeshGroupInfo = bs.readUInt()
bs.seek(4, NOESEEK_REL)#64bit
uiOffsetMeshInfo = bs.readUInt()
bs.seek(4, NOESEEK_REL)#64bit
uiOffsetBoneMap = bs.readUInt()
bs.seek(4, NOESEEK_REL)#64bit
uiOffsetBoneMap = bs.readUInt()
bs.seek(4, NOESEEK_REL)#64bit
uiOffsetFaceData = bs.readUInt()
bs.seek(4, NOESEEK_REL)#64bit
usNumMeshGroups = bs.readUShort()
usNumMesh = bs.readUShort()
usNumBones = bs.readUShort()
for i in range(usNumMesh):
bs.seek(self.offsetStart + self.offsetMeshStart + uiOffsetMeshInfo + i * 0x50, NOESEEK_ABS)
if debug:
print("Mesh Info Start: " + str(bs.tell()))
meshFile.buildMesh(self, bs.read("20I"), i, uiOffsetMeshGroupInfo, uiOffsetBoneMap, uiOffsetFaceData, usNumBones)
if debug:
print("Mesh Info End: " + str(bs.tell()))
def buildSkeleton(self):
skelFileName = rapi.getDirForFilePath(rapi.getInputName()) + "skeleton.trdemesh"
if (rapi.checkFileExists(skelFileName)):
print("Skeleton file detected!")
print("Building Skeleton....")
sd = rapi.loadIntoByteArray(skelFileName)
sd = NoeBitStream(sd)
sd.seek(0x3630, NOESEEK_ABS)#v2-lara
#sd.seek(0x35E8, NOESEEK_ABS)#v1-lara
#sd.seek(0x1B8, NOESEEK_ABS)
uiNumBones = sd.readUInt()
sd.seek(0x14, NOESEEK_REL)#v2-lara
#sd.seek(0x14, NOESEEK_REL)
#sd.seek(0xC, NOESEEK_REL)
if uiNumBones > 0:
for i in range(uiNumBones):
#print("Bone: " + str(i) + " at: " + str(sd.getOffset()))
sd.seek(0x10, NOESEEK_REL)
sd.seek(0x10, NOESEEK_REL)
fBoneXPos = sd.readFloat()
fBoneYPos = sd.readFloat()
fBoneZPos = sd.readFloat()
boneUnk00 = sd.readFloat()
boneUnk01 = sd.readInt()
boneUnk03 = sd.readShort()
boneUnk04 = sd.readShort()
iBonePID = sd.readInt()
sd.seek(0x14, NOESEEK_REL)
quat = NoeQuat([0, 0, 0, 1])
mat = quat.toMat43()
mat[3] = [fBoneXPos, fBoneZPos, -fBoneYPos]
#print("X: " + str(fBoneXPos) + " Y: " + str(fBoneZPos) + " Z: " + str(fBoneYPos))
if iBonePID == -1:
iBonePID = 0
self.boneList.append(NoeBone(i, "b_" + str(iBonePID) + "_" + str(i), mat, None, iBonePID))
self.boneList = rapi.multiplyBones(self.boneList)
def buildMesh(self, meshInfo, meshIndex, uiOffsetMeshGroupInfo, uiOffsetBoneMap, uiOffsetFaceData, usNumBones):
bs = self.inFile
bs.seek(self.offsetStart + self.offsetMeshStart + meshInfo[12] + 0x8, NOESEEK_ABS)
usNumVertexComponents = bs.readUShort()
ucMeshVertStride = bs.readUByte()
bs.seek(0x5, NOESEEK_REL)
iMeshVertPos = -1
iMeshNrmPos = -1
iMeshTessNrmPos = -1
iMeshTangPos = -1
iMeshBiNrmPos = -1
iMeshPckNTBPos = -1
iMeshBwPos = -1
iMeshBiPos = -1
iMeshCol1Pos = -1
iMeshCol2Pos = -1
iMeshUV1Pos = -1
iMeshUV2Pos = -1
iMeshUV3Pos = -1
iMeshUV4Pos = -1
iMeshIIDPos = -1
for i in range(usNumVertexComponents):
uiEntryHash = bs.readUInt()
usEntryValue = bs.readUShort()
ucEntryType = bs.readUByte()
ucEntryNull = bs.readUByte()
if uiEntryHash == 0xD2F7D823:#Position
iMeshVertPos = usEntryValue
elif uiEntryHash == 0x36F5E414:#Normal
if iMeshNrmPos == -1:
iMeshNrmPos = usEntryValue
elif uiEntryHash == 0x3E7F6149:#TessellationNormal
if debug:
print("Unsupported Vertex Component: TessellationNormal! " + "Pos: " + str(usEntryValue))
# iMeshTessNrmPos = usEntryValue
elif uiEntryHash == 0xF1ED11C3:#Tangent
if iMeshTangPos == -1:
iMeshTangPos = usEntryValue
elif uiEntryHash == 0x64A86F01:#Binormal
if debug:
print("Unsupported Vertex Component: BiNormal! " + "Pos: " + str(usEntryValue))
if iMeshBiNrmPos == -1:
iMeshBiNrmPos = usEntryValue
elif uiEntryHash == 0x9B1D4EA:#PackedNTB
if debug:
print("Unsupported Vertex Component: PackedNTB! " + "Pos: " + str(usEntryValue))
# iMeshPckNTBPos = usEntryValue
elif uiEntryHash == 0x48E691C0:#SkinWeights
iMeshBwPos = usEntryValue
elif uiEntryHash == 0x5156D8D3:#SkinIndices
iMeshBiPos = usEntryValue
elif uiEntryHash == 0x7E7DD623:#Color1
iMeshCol1Pos = usEntryValue
if debug:
print("Unsupported Vertex Component: Color1! " + "Pos: " + str(usEntryValue))
elif uiEntryHash == 0x733EF0FA:#Color2
if debug:
print("Unsupported Vertex Component: Color2! " + "Pos: " + str(usEntryValue))
# iMeshCol2Pos = usEntryValue
elif uiEntryHash == 0x8317902A:#Texcoord1
if iMeshUV1Pos == -1:
iMeshUV1Pos = usEntryValue
elif uiEntryHash == 0x8E54B6F3:#Texcoord2
iMeshUV2Pos = usEntryValue
elif uiEntryHash == 0x8A95AB44:#Texcoord3
if debug:
print("Unsupported Vertex Component: Texcoord3! " + "Pos: " + str(usEntryValue))
# iMeshUV3Pos = usEntryValue
elif uiEntryHash == 0x94D2FB41:#Texcoord4
if debug:
print("Unsupported Vertex Component: Texcoord4! " + "Pos: " + str(usEntryValue))
# iMeshUV4Pos = usEntryValue
elif uiEntryHash == 0xE7623ECF:#InstanceID
if debug:
print("Unsupported Vertex Component: InstanceID! " + "Pos: " + str(usEntryValue))
iMeshUV2Pos = usEntryValue
else:
if debug:
print("Unknown Vertex Component! Hash: " + str(hex((uiEntryHash))) + " value: " + str(usEntryValue))
if meshInfo[2] != 0 and bSkinningEnabled != 0:
bs.seek(self.offsetStart + self.offsetMeshStart + meshInfo[3], NOESEEK_ABS)
boneMap = []
for i in range(meshInfo[2]):
boneMap.append(bs.readInt())
rapi.rpgSetBoneMap(boneMap)
for i in range(meshInfo[0]):
bs.seek(self.offsetStart + self.offsetMeshStart + uiOffsetMeshGroupInfo + self.meshGroupIdx * 0x70, NOESEEK_ABS)
self.meshGroupIdx += 1
meshGroupInfo = bs.read("28I")
print("Mesh_" + "_" + str(self.meshGroupIdx))
print(meshGroupInfo)
#rapi.rpgSetName(str(meshGroupInfo[14]))
#rapi.rpgSetName("Mesh_" + str(self.meshGroupIdx))
rapi.rpgSetName("Mesh_" + str(self.meshGroupIdx-1) + "_" + str(i) + "_Mat_" + str(meshGroupInfo[14]))
rapi.rpgSetPosScaleBias((fDefaultMeshScale, fDefaultMeshScale, fDefaultMeshScale), (0, 0, 0))
if bMaterialsEnabled != 0:
#Create material
material = NoeMaterial("MAT_" + str(meshIndex) + "_" + str(i), "")
material.setTexture("Mesh_" + str(meshIndex) + "_" + str(i) + ".dds")
self.matList.append(material)
rapi.rpgSetMaterial("MAT_" + str(meshIndex) + "_" + str(i))
bs.seek(self.offsetStart + self.offsetMeshStart + uiOffsetFaceData + meshGroupInfo[4] * 0x2, NOESEEK_ABS)
faceBuff = bs.readBytes(meshGroupInfo[5] * 0x6)
bs.seek(self.offsetStart + self.offsetMeshStart + meshInfo[4], NOESEEK_ABS)
vertBuff = bs.readBytes(meshInfo[14] * ucMeshVertStride)
rapi.rpgSetUVScaleBias(NoeVec3 ((16.0, 16.0, 16.0)), NoeVec3 ((16.0, 16.0, 16.0)))
rapi.rpgSetTransform(NoeMat43((NoeVec3((1, 0, 0)), NoeVec3((0, 0, 1)), NoeVec3((0, -1, 0)), NoeVec3((0, 0, 0)))))
if iMeshVertPos != -1:
rapi.rpgBindPositionBufferOfs(vertBuff, noesis.RPGEODATA_FLOAT, ucMeshVertStride, iMeshVertPos)
if iMeshNrmPos != -1 and bNORMsEnabled != 0: #Orbis normals are encoded the same as TR8,TRAS Xenon normals, just little endian.
decodedNormals = rapi.decodeNormals32(vertBuff[iMeshNrmPos:], ucMeshVertStride, -10, -10, -10, NOE_LITTLEENDIAN)
rapi.rpgBindNormalBufferOfs(decodedNormals, noesis.RPGEODATA_FLOAT, 0xC, 0x0)
#normList = []
#for n in range(meshInfo[14]):
# idx = n * 3
# tx = decodedNormals[idx]
# ty = decodedNormals[idx + 1]
# tz = decodedNormals[idx + 2]
# #normList.append(tx/255.0))
# #normList.append(ty/.0))
# #normList.append(tz))
# #normList.append(1.0)
#print(str(decodedNormals[0]))
#print(str(decodedNormals[1]))
#print(str(decodedNormals[2]))
#print(str(normList[0]))
#print(str(normList[1]))
#print(str(normList[2]))
#normBuff = struct.pack("<" + 'f'*len(normList), *normList)
#rapi.rpgBindColorBufferOfs(normBuff, noesis.RPGEODATA_BYTE, 4, 0x0, 4)
#if iMeshTessNrmPos != -1:
# print("Unsupported")
if iMeshTangPos != -1:
decodedTangents = rapi.decodeNormals32(vertBuff[iMeshNrmPos:], ucMeshVertStride, -10, -10, -10, NOE_LITTLEENDIAN)
#rapi.rpgBindNormalBufferOfs(decodedTangents, noesis.RPGEODATA_FLOAT, 0xC, 0x0)
#rapi.rpgBindColorBufferOfs(decodedNormals, noesis.RPGEODATA_FLOAT, 0xC, 0x0, 3)
#if iMeshBiNrmPos != -1:
# print("Unsupported")
#if iMeshPckNTBPos != -1:
# print("Unsupported")
if iMeshBwPos != -1 and bSkinningEnabled != 0:
#weightList = []
#for w in range(meshInfo[14]):
# idx = ucMeshVertStride * w + iMeshBwPos
# weightList.append(float((vertBuff[idx]) / 255.0))
# weightList.append(float((vertBuff[idx + 1]) / 255.0))
# weightList.append(float((vertBuff[idx + 2]) / 255.0))
# weightList.append(float((vertBuff[idx + 3]) / 255.0))
#weightBuff = struct.pack("<" + 'f'*len(weightList), *weightList)
#rapi.rpgBindBoneWeightBufferOfs(weightBuff, noesis.RPGEODATA_FLOAT, 0x10, 0x0, 0x4)
rapi.rpgBindBoneWeightBufferOfs(vertBuff, noesis.RPGEODATA_UBYTE, ucMeshVertStride, iMeshBwPos, 0x4)
if iMeshBiPos != -1 and bSkinningEnabled != 0:
rapi.rpgBindBoneIndexBufferOfs(vertBuff, noesis.RPGEODATA_UBYTE, ucMeshVertStride, iMeshBiPos, 0x4)
#if iMeshCol1Pos != -1 and bCOLsEnabled != 0:
# rapi.rpgBindColorBufferOfs(vertBuff, noesis.RPGEODATA_BYTE, ucMeshVertStride, iMeshCol1Pos, 0x4)
#if iMeshCol2Pos != -1:
# print("Unsupported")
if iMeshUV1Pos != -1 and bUVsEnabled != 0:
#uvList = []
#for w in range(meshInfo[14]):
# idx = ucMeshVertStride * w + iMeshUV1Pos
# uvList.append((struct.unpack('<h',vertBuff[idx:(idx+2)])[0]/2048.0))
# uvList.append(((struct.unpack('<h',vertBuff[(idx+2):(idx+4)])[0]/2048.0)))
# uvList.append(0.0)
#print(uvList)
#uvBuff = struct.pack("<" + 'f'*len(uvList), *uvList)
#rapi.rpgBindUV1BufferOfs(uvBuff, noesis.RPGEODATA_FLOAT, 12, 0)
rapi.rpgBindUV1BufferOfs(vertBuff, noesis.RPGEODATA_SHORT, ucMeshVertStride, iMeshUV1Pos)
#if iMeshUV2Pos != -1 and bUVsEnabled != 0:
# rapi.rpgBindUV2BufferOfs(vertBuff, noesis.RPGEODATA_SHORT, ucMeshVertStride, iMeshUV2Pos)
#if iMeshUV3Pos != -1:
# print("Unsupported")
#if iMeshUV4Pos != -1:
# print("Unsupported")
#if iMeshIIDPos != -1:
# print("Unsupported")
if bRenderAsPoints:
rapi.rpgCommitTriangles(None, noesis.RPGEODATA_USHORT, meshInfo[14], noesis.RPGEO_POINTS, 0x1)
else:
rapi.rpgSetStripEnder(0x10000)
rapi.rpgCommitTriangles(faceBuff, noesis.RPGEODATA_USHORT, int(meshGroupInfo[5] * 0x3), noesis.RPGEO_TRIANGLE, 0x1)
if bOptimizeMesh:
rapi.rpgOptimize()
rapi.rpgClearBufferBinds()
def meshLoadModel(data, mdlList):
ctx = rapi.rpgCreateContext()
mesh = meshFile(data)
#mesh.loadHeader()
mesh.loadMeshFile()
mesh.buildSkeleton()
try:
mdl = rapi.rpgConstructModel()
except:
mdl = NoeModel()
mdl.setBones(mesh.boneList)
mdl.setModelMaterials(NoeModelMaterials(mesh.texList, mesh.matList))
mdlList.append(mdl);
return 1
| DickBlackshack/NoesisPlugins | Python/Gh0stBlade/fmt_TRDE_mesh_1_3_1.py | fmt_TRDE_mesh_1_3_1.py | py | 15,983 | python | en | code | 17 | github-code | 13 |
38776635169 | import copy
import math
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
from landscapegen.tileset import Tileset_wfc
from landscapegen.wavefunction import Wavefunction
# from typing import deprecated
def flatten_list_of_lists(list_of_lists):
return [item for sublist in list_of_lists for item in sublist]
# @deprecated("Use plotting_thing")
def plot_landscape(landscape, tileset_info):
size0 = len(landscape)
size1 = len(landscape[1])
char_list = list(
tileset_info.keys()
) # Position in this is value, We do this once so the value is locked for each tile
char_dict = {c: i for i, c in enumerate(char_list)} # tile: value
values = np.vectorize(char_dict.get)(landscape)
colors = np.array([tileset_info[char_list[i]] for i, c in enumerate(char_list)])
cmap = ListedColormap(colors)
fig, ax = plt.subplots()
cax = ax.imshow(values, cmap, rasterized=True, vmin=0, vmax=len(tileset_info))
cbar = fig.colorbar(cax, cmap=cmap, ticks=np.arange(0, len(tileset_info)) + 0.5)
cbar.ax.set_yticklabels(char_list)
# ax.set_xticks(np.arange(-.5, 10, 1), minor=True)
# ax.set_yticks(np.arange(-.5, 10, 1), minor=True)
ax.set_xticks(np.arange(-0.5, size1, 1), minor=True)
ax.set_yticks(np.arange(-0.5, size0, 1), minor=True)
ax.grid(which="minor", color="w", linestyle="-", linewidth=2)
# ax.grid()
return fig, ax
# @deprecated("Use plotting_thing")
def plot_incomplete(wavefunction, tileset):
info = copy.deepcopy(tileset.info)
info["Void"] = [1, 1, 1, 1]
info["impossible"] = [1, 0, 1, 1]
wavefunc2 = copy.deepcopy(wavefunction)
size0 = len(wavefunc2)
size1 = len(wavefunc2[0])
for jj in range(size0):
for ii in range(size1):
cell = wavefunc2[jj][ii]
# if len(cell) == 1:
# print(f"{jj}, {ii} is {cell}")
if len(cell) == 0:
wavefunc2[jj][ii] = ["Void"]
# print(f"{jj}, {ii} is void")
if len(cell) > 1:
wavefunc2[jj][ii] = ["impossible"]
# print(f"{jj}, {ii} is impossible")
landscape = np.array(wavefunc2)
plot_landscape(landscape=landscape, tileset_info=info)
def get_mini_grid_size(tileset_info):
# Given a tileset info, returns the smallest square number bigger than the
# number of different tile types.
n_tiles = len(tileset_info.keys())
# return math.pow(math.ceil(math.sqrt(n_tiles)), 2)
return math.ceil(math.sqrt(n_tiles))
def plotting_thing_landscape(wavefunction, tileset, minor_grid_size=1):
char_dict = {c: i for i, c in enumerate(tileset.characters)} # tile: value
# Issue is that now the wavefunction might also contain "__BLANK__" contains 1 more color than tileset.characters,
# so the -1 gets interpreted as the closest value(0)
contains_blank = wavefunction.contains_blank
if contains_blank:
# char_dict.update({"__BLANK__": -1})
char_dict = {"__BLANK__": -1, **char_dict}
tileset_characters = copy.deepcopy(tileset.characters)
tileset_characters.insert(0, "__BLANK__")
tileset_info = copy.deepcopy(tileset.info)
# tileset_info.update({"__BLANK__": [1,0,1,1]})
tileset_info = {"__BLANK__": [1, 0, 1, 1], **tileset_info}
# Uh maybe add to the end instead of top?
else:
tileset_characters = tileset.characters
tileset_info = tileset.info
colors = np.array(
[tileset_info[tileset_characters[i]] for i, c in enumerate(tileset_characters)]
) # must change
vectorfunc = np.vectorize(char_dict.get)
values = vectorfunc(wavefunction.wf)
cmap = ListedColormap(colors)
fig, ax = plt.subplots()
minval = min(char_dict.values())
maxval = max(char_dict.values())
cax = ax.imshow(values, cmap, rasterized=True, vmin=minval, vmax=maxval)
cbar = fig.colorbar(cax, cmap=cmap, ticks=np.arange(minval, maxval + 1) + 0.5)
cbar.ax.set_yticklabels(tileset_characters)
# dontuse ax.set_xticks(np.arange(-.5, 10, 1), minor=True)
# dontuse ax.set_yticks(np.arange(-.5, 10, 1), minor=True)
ax.set_xticks(np.arange(-0.5, wavefunction.size1, minor_grid_size), minor=True)
ax.set_yticks(np.arange(-0.5, wavefunction.size0, minor_grid_size), minor=True)
ax.grid(which="minor", color="w", linestyle="-", linewidth=2)
# ax.grid()
return fig, ax
def subdivide_grid(wavefunction: Wavefunction, tileset: Tileset_wfc):
mini_grid_size = get_mini_grid_size(tileset_info=tileset.info)
new_size0 = wavefunction.size0 * mini_grid_size
new_size1 = wavefunction.size1 * mini_grid_size
mylist = [[[] for i in range(new_size1)] for j in range(new_size0)]
tileset_characters = list(tileset.info.keys())
for j in range(wavefunction.size0):
for i in range(wavefunction.size1):
jj = j * mini_grid_size
ii = i * mini_grid_size
if len(wavefunction.wf[j][i]) == 1:
# Just replace everything in there with the same as the cell
for k0 in range(mini_grid_size):
for k1 in range(mini_grid_size):
mgj = jj + k0 # mini_grid_jj
mgi = ii + k1 # mini_grid_ii
mylist[mgj][mgi] = wavefunction.wf[j][i]
else:
# Go through each square in the mini-grid and assign the
# corresponding character if present, or blank if not present:
square_ind = 0
for k0 in range(mini_grid_size):
for k1 in range(mini_grid_size):
mgj = jj + k0 # mini_grid_jj
mgi = ii + k1 # mini_grid_ii
if square_ind >= len(tileset_characters):
mylist[mgj][mgi] = ["__BLANK__"]
elif tileset_characters[square_ind] in wavefunction.wf[j][i]:
mylist[mgj][mgi] = [tileset_characters[square_ind]]
else:
mylist[mgj][mgi] = ["__BLANK__"]
square_ind = square_ind + 1
return Wavefunction(mylist), mini_grid_size
def plotting_thing(wavefunction: Wavefunction, tileset: Tileset_wfc):
# If we dont need to split, the wavefunction is fully determined and we can
# just plot it as normally
determined = wavefunction.collapsed
if determined: # Use a normal plotting function
fig, ax = plotting_thing_landscape(wavefunction=wavefunction, tileset=tileset)
return fig, ax
subdivided, grid_size = subdivide_grid(wavefunction=wavefunction, tileset=tileset)
fig, ax = plotting_thing_landscape(
wavefunction=subdivided, tileset=tileset, minor_grid_size=grid_size
)
return fig, ax
# dims:
# Split the incoming 3d arrays into a 2d array of 1d arrays.
# If the array has length 1, plot the plotting matrix should only plot that one thing.
# If there are more elements in the array, get the "mini_grid_size" of the wavefunction.
# 2,3,4-> 4
# 5,...9-> 9
# etc
# For each array, if it has length 1, it means the cell has been collapsed,
# plot that whole grid in that color.
# If the array is longer, color the first square in one color, then the next
# in the next color, etc. If any leftover, color with a "void" color.
| ebbestubbe/landscapegen | landscapegen/utils.py | utils.py | py | 7,467 | python | en | code | 0 | github-code | 13 |
70977207058 | # -*- coding = utf-8 -*-
# @File Name : extract_features.
# @Date : 2023/6/8 12:17
# @Author : zhiweideng
# @E-mail : zhiweide@usc.edu
import os
import torch
import dataset
import network
import argparse
from tqdm import tqdm
from datetime import date
from train import read_json
from torch.utils.data import DataLoader
def extract_features(config_file, output_folder, model_path, split='train', cuda=True):
config = read_json(config_file)
# define the dataset and model
data_loader = DataLoader(getattr(dataset, config[split]['type'])(**config[split]['args']), batch_size=2)
model = getattr(network, config['model']['type'])(**config['model']['args'])
# send to gpu devices
model = model.cuda() if cuda else model
checkpoint = torch.load(model_path, map_location=next(model.parameters()).device)
model.load_state_dict(checkpoint['model'])
model.eval()
# feature hooks and construct the
features = {}
output_folder = os.path.join(output_folder, str(date.today()), config['name'])
os.makedirs(output_folder, exist_ok=True)
def feature_hook(name):
def hook(mod, inp, out):
features[name] = out.detach()
return hook
# register the hook of the model
model.recon_conv.conv1.register_forward_hook(feature_hook('sem_feat'))
model.direction_conv.conv1.register_forward_hook(feature_hook('dir_feat'))
model.radius_conv.conv1.register_forward_hook(feature_hook('rad_feat'))
# forward process to extract features
print('Start Feature Extraction Process')
for idx, batch in enumerate(tqdm(data_loader, desc='0', unit='b')):
images = batch['image']
images = images.cuda() if cuda else images
image_indices = batch['image_id']
patch_indices = batch['patch_id']
_ = model(images)
for i in range(images.size(0)):
patch_loc = '{}-{}'.format(image_indices[i], patch_indices[i])
sem_output_file = os.path.join(output_folder, '{}-sem-{}.pt'.format(split, patch_loc))
dir_output_file = os.path.join(output_folder, '{}-dir-{}.pt'.format(split, patch_loc))
rad_output_file = os.path.join(output_folder, '{}-rad-{}.pt'.format(split, patch_loc))
torch.save(features['sem_feat'][i], sem_output_file)
torch.save(features['dir_feat'][i], dir_output_file)
torch.save(features['sem_feat'][i], rad_output_file)
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config_file', type=str, default='./configs/drive/adaptive_lc.json')
parser.add_argument('-o', '--output_folder', type=str, default='../features')
parser.add_argument('-s', '--split', type=str, default='train')
parser.add_argument('-g', '--gpu', type=bool, default=True)
parser.add_argument('-p', '--model_path', type=str, default='/ifs/loni/faculty/shi/spectrum/zdeng/MSA_Data/' +
'SpectralVessel/trained_models/ADAPTIVE_LC/2023-06-09/' +
'ADAPTIVE_LC-1000-epoch-2023-06-09.pt')
if __name__ == '__main__':
args = parser.parse_args()
extract_features(args.config_file, args.output_folder, args.model_path, args.split, args.gpu)
| dengchihwei/SpectralVessel | extract_features.py | extract_features.py | py | 3,256 | python | en | code | 0 | github-code | 13 |
6397772484 | from random import randint
from time import sleep
from dic import dic_accents
from sys import stderr, executable, exit
from subprocess import check_call, CalledProcessError
# Support des couleurs ANSI dans windows
from os import system
system("")
COLOR = {
"RED": "\x1b[91m",
"GREEN": "\x1b[92m",
"BLUE": "\x1b[94m",
"BOLD": "\x1b[1m",
"BOLDR": "\x1b[1;91m",
"BOLDG": "\x1b[1;92m",
"ENDC": "\x1b[0m"
}
def clear():
print("\x1b[2J\x1b[H")
def wprint(text, *args, **kwargs) -> None:
print(COLOR["BOLD"] + "Warning :", text +
COLOR["ENDC"], *args, file=stderr, **kwargs)
def binput(prompt) -> bool:
str_input = input(COLOR['BOLD'] + prompt + COLOR['ENDC'])
bool_input = ['true', '1', 't', 'y', 'yes', 'i',
'false', '0', 'f', 'n', 'no', 'p']
while str_input not in bool_input:
str_input = input("\x1b[1F\x1b[K" +
COLOR["BOLD"] + prompt + COLOR["ENDC"])
if str_input.lower() in bool_input[:6]:
return True
return False
clear()
try:
from unidecode import unidecode
except ImportError:
wprint("Unidecode module necessary but not found.")
b_install = binput(
"Do you want to install it with pip or quit this game ?\n(y : install / n : quit))")
if b_install is True:
try:
check_call(
[executable, "-m", "pip", "install", "unidecode"])
except CalledProcessError:
wprint("Unable to install unidecode.")
print("Quitting...")
exit(1)
from unidecode import unidecode
print("Unidecode installed.")
sleep(1)
clear()
else:
wprint("Unidecode is necessary to run this game.")
print("Quitting...")
exit(1)
def input_valide(secret: str, tour: int) -> tuple:
"""Prend une entrée standard et la renvoie quand elle satisfait les conditions, sauf si "stop"
Args:
secret (str): mot à deviner
Returns:
tuple: mot sans accents et mot avec accents du dic
"""
prop = unidecode(input(f"Proposition {tour}/6 :\n")).lower()
prop_in_dic = prop in dic_sans_accents
while (not prop_in_dic or len(prop) != len(secret)) and prop != "stop":
if not prop_in_dic:
prop = unidecode(input(
"Votre mot n'est pas dans notre dictionnaire, réessayez :\n")).lower()
elif len(prop) < len(secret):
prop = unidecode(input("Mot trop court :\n")).lower()
elif len(prop) > len(secret):
prop = unidecode(input("Mot trop long :\n")).lower()
prop_in_dic = prop in dic_sans_accents
if prop == "stop":
return "stop"
else:
print("\x1b[2F")
return (list(prop), list(dic_accents[dic_sans_accents.index(prop)]))
def output(secret: list, prop: list) -> list:
"""Donne les indices pour chaque lettre
Args:
secret (list): mot secret à deviner
prop (list): mot proposé par le joueur
Returns:
output (list): rouge mauvaise lettre, bleu mauvais emplacement, vert bon emplacement
"""
output = []
secret_sans_accent = [letter for letter in secret]
for i in range(len(secret)):
if prop[i] == secret_sans_accent[i]:
output.append(COLOR["GREEN"] + "◉" + COLOR["ENDC"])
secret_sans_accent[i] = "*"
elif prop[i] in secret_sans_accent:
output.append(COLOR["BLUE"] + "◉" + COLOR["ENDC"])
secret_sans_accent[i] = "*"
else:
output.append(COLOR["RED"] + "◉" + COLOR["ENDC"])
return output
def main():
"""Exécute une partie
Args:
mot_secret (str): mot à deviner
Returns:
jeu (str): 'oui' pour rejouer, autre pour quitter
"""
mot_secret_accents = dic_accents[randint(0, len(dic_accents)-1)]
mot_secret = unidecode(mot_secret_accents)
mot_secret_accents = list(mot_secret_accents)
mot_secret = list(mot_secret)
print("Votre mot est composé de ", len(mot_secret), " lettres.\n")
for chance in range(1, 7):
prop_mot = input_valide(mot_secret, chance)
if prop_mot == "stop":
return "non"
if prop_mot[0] == mot_secret:
print(COLOR["GREEN"] + "\nBravo !" + COLOR["ENDC"], "Vous avez deviné le mot " +
COLOR["BOLDG"] + ''.join(map(str, mot_secret_accents)) + COLOR["ENDC"], ".")
return input("\nPour rejouer entrez 'oui'.\n")
indices = output(mot_secret, prop_mot[0])
print(COLOR["BOLD"] + ' '.join(map(str, prop_mot[1])))
print(*indices, sep=' ')
print(COLOR["RED"] + "\nDomage, " + COLOR["ENDC"] + "vous avez épuisé votre nombre de chances.\nLe mot était ",
COLOR["BOLDR"] + ' '.join(map(str, mot_secret_accents)) + COLOR["ENDC"])
return input("Pour rejouer entrez 'oui'.\n")
dic_sans_accents = []
for e in dic_accents:
dic_sans_accents.append(unidecode(e))
print(COLOR["BOLD"] + "Trouvez le mot secret en proposant des mots de même taille !" + COLOR["ENDC"] +
"\nUne lettre est absente du mot secret si marquée" + COLOR["RED"] + " rouge" + COLOR["ENDC"] +
", présente mais au mauvais emplacement avec" + COLOR["BLUE"] + " bleu" + COLOR["ENDC"] +
", et au bon emplacement avec" + COLOR["GREEN"] + " vert" + COLOR["ENDC"] +
".\nEntrez 'stop' à tout moment pour quitter le jeu.",
"\nBonne chance !")
jeu = 'oui'
while jeu == 'oui':
try:
jeu = main()
except KeyboardInterrupt:
print("\rA bientôt !")
exit(1)
print("A bientôt !")
| comejv/utils-and-games | wordle/wordle.py | wordle.py | py | 5,632 | python | fr | code | 3 | github-code | 13 |
2449056737 | class Solution:
def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:
array = defaultdict(set)
# ans = True
row = len(matrix)
col = len(matrix[0])
for i in range(row):
for j in range(col):
array[i-j].add(matrix[i][j])
if len(array[i-j]) > 1:
return False
return True | asnakeassefa/A2SV_programming | 0766-toeplitz-matrix/0766-toeplitz-matrix.py | 0766-toeplitz-matrix.py | py | 407 | python | en | code | 1 | github-code | 13 |
39148479248 | #!/bin/python3
#https://www.hackerrank.com/challenges/hackerrank-in-a-string/problem
import sys
answer_list = []
string = "hackerrank"
q = int(input().strip())
for i in range(q):
flag = 0
s = input().strip()
list_element = []
for element in s:
list_element.append(element)
index_list = []
for element in string:
if element in list_element:
index = list_element.index(element)
index_list.append(index)
for i in range(index + 1):
list_element.pop(0)
else:
flag = 1
break
if flag == 0:
answer_list.append("YES")
elif flag == 1:
answer_list.append("NO")
for element in answer_list:
print(element) | saumya-singh/CodeLab | HackerRank/Strings/HackerRank_In_A_String.py | HackerRank_In_A_String.py | py | 822 | python | en | code | 0 | github-code | 13 |
74679521296 | import itertools
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from owlready2 import get_ontology
from sklearn.metrics import f1_score
def read_ontology(path):
onto = get_ontology(path)
onto.load()
# Read classes
classes = []
for cl in onto.classes():
classes.append(cl)
classes = list(set(classes))
# Read properties
properties = []
for prop in onto.properties():
properties.append(prop)
properties = list(set(properties))
return classes, properties
def get_mappings(filename):
mappings = []
with open(filename) as f:
soup = BeautifulSoup(f, 'xml')
cells = soup.find_all('Cell')
for cell in cells:
entity1 = cell.find('entity1').attrs['rdf:resource'].split('#')[1]
entity2 = cell.find('entity2').attrs['rdf:resource'].split('#')[1]
mappings.append((entity1, entity2))
return mappings
def get_dataset(ont1_path, ont2_path, alignment_path):
data = []
mappings = get_mappings(alignment_path)
mappings = [tuple(x) for x in mappings]
# print('Number of mappings', len(mappings))
all_mappings = []
# Parse ontologies
classes1, properties1 = read_ontology(ont1_path)
classes2, properties2 = read_ontology(ont2_path)
# Generate pairs of classes
class_pairs = list(itertools.product(classes1, classes2))
for class_pair in class_pairs:
pair = (class_pair[0].name, class_pair[1].name)
if pair in mappings:
match = 1
all_mappings.append(pair)
mappings.remove(pair)
else:
match = 0
data.append((ont1_path, ont2_path, pair[0], pair[1],
class_pair[0].is_a[0].name, class_pair[1].is_a[0].name,
get_path(class_pair[0]), get_path(class_pair[1]), match,
'Class'))
# Generate pairs of properties
properties_pairs = list(itertools.product(properties1, properties2))
for prop_pair in properties_pairs:
pair = (prop_pair[0].name, prop_pair[1].name)
if pair in mappings:
match = 1
all_mappings.append(pair)
mappings.remove(pair)
else:
match = 0
data.append((ont1_path, ont2_path, pair[0], pair[1],
class_pair[0].is_a[0].name, class_pair[1].is_a[0].name,
get_path(class_pair[0]), get_path(class_pair[1]), match,
'Property'))
# print('Readed mappings', len(all_mappings), '\n')
dataset = pd.DataFrame(data, columns=['Ontology1', 'Ontology2', 'Entity1',
'Entity2', 'Parent1', 'Parent2',
'Path1', 'Path2', 'Match', 'Type'])
return dataset
def get_path(cl):
path = cl.name
while True:
try:
path = path + '/' + cl.is_a[0].name
except IndexError:
break
cl = cl.is_a[0]
if cl == 'owl.Thing':
break
return '/'.join(path.split('/')[::-1])
def f1_eval(y_pred, dtrain):
y_true = dtrain.get_label()
err = 1 - f1_score(y_true, np.round(y_pred))
return 'f1_err', err
| lbulygin/machine-learning-ontology-matching | utils_datasets.py | utils_datasets.py | py | 3,229 | python | en | code | 11 | github-code | 13 |
12345058325 | # program to return first and last occurence of element x in sorted array
# logic is to use the information for stored array and use modified binary search algorithm.
# Idea is to whenever we find the required element, then we should not stop ,
# but rather go on in left for finding first occurence or go on in right
# for finding last occurence.
# We can construct two separate functions for acheiving this two positions by changing just at the time when arr[mid] == x,
# so that we don;t stop there and keep exploring from there either in left space or in right space depending upon which occcurence is needed.
# We can also combine both the condition in one line as finding greater than or
# equal to x, and then for x + 1 (now this will be start of some other element)
# greater than x but the position will indicate the last occurence of x
# TIME 0(lg(n)), SPACE : 0(1)
# the function for returning required index
def first_and_last(arr, x):
# we set this as array size, (max) as in the case of all the elements being
# same in the array, this should return the length of array (last index)
low, high = 0, len(arr) - 1
index = len(arr)
# modified bin search
while low <= high:
mid = low + (high - low) // 2
# we keep on looking
if arr[mid] >= x:
index = mid
high = mid - 1
else:
low = mid + 1
return index
# main driver function
if __name__ == '__main__':
arr = [1, 1, 1, 1] # this is the case which is why need to set index = len(arr)
x = 1
n = 2
first_pos = first_and_last(arr, x)
print(first_pos)
last_pos = first_and_last(arr, x + 1) - 1
print(last_pos)
# now if we do not find any occurence, then last pos will be lesser than
# first , then we need to return [-1, -1], ex. searching for 9 in above array
# returns 4, 3 which fails for below if condition and so [-1, -1]
if first_pos <= last_pos:
print([first_pos, last_pos])
else:
print([-1, -1])
| souravs17031999/100dayscodingchallenge | strings/find_first_and_last_occurence.py | find_first_and_last_occurence.py | py | 2,021 | python | en | code | 43 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.