text stringlengths 38 1.54M |
|---|
# !/usr/bin/python
# Finalized group 1 code
# COMPILED BY HANG and ANDRES
#
#
# STEP 1: YUN TAO
# This step is independent from STEP 1 and STEP 2
# Blasts the ebola genome against the human genome
# Return hits for STEP 4
import os
def ebola_conservative_regions (infile, outfile):
current_region =0
for line in open (infile):
if line[0] == '>':
current_region += 1
current_file = open ("%d.%s" % (current_region, outfile), "w")
current_file.write(line)
def runBlast (infile, outfile):
os.system ("blastn -word_size 11 -query %s -db blastdata/human_genomic -out %s.output -outfmt 0" % (infile,outfile))
#ebola_conservative_regions ('teflon_segs.txt', 'teflon_segment')
#no_segments =11
#for segment in range(no_segments):
runBlast ('username.fas', 'ebola')
# parser of alignment from blastn output
# based on the currently deprecated biopython/NCBIStandalone.py
# Biopython's old parser of blastall output
# Small parts of the original codes are modified to fit our needs
"""
blastn_scanner: input a blastn_output file, looking for info we want to parse.
"""
class blastn_scanner (object):
def __init__ (me,
version, #what version of blastn used?
database_name, #what db used for blastn?
database_sequences, #number of sequences in the target db
database_letters, #total size (in basepairs) of the db
counts_hits, #number of targets hit
results): #holder for all the info we want parsed for next step
#results contains info produced from class blastn_result
me.version = version
me.database_name = database_name
me.database_sequences = database_sequences
me.database_letters = database_letters
me.counts_hits = counts_hits
me.results = results
"""
blastn_results: holder of all info from HSP
"""
class blastn_results (object):
def __init__ (me,
identities, #% identities; 100% for siRNA
strand, #Plus or Minus
query_sequence, #sequences matched to target, saved for next step
query_start,
query_end): #coordinates of the query HSP - not much meaningful for us
me.identities = identities
me.strand = strand
me.query_sequence = query_sequence
me.query_start = query_start
me.query_end = query_end
def scan_blastn (blastn_output_file, identity_cutoff = 90):
line = blastn_output_file.readline()
if not line.startswith("BLASTN"):
raise TypeError ('Not a BLASTN output file')
version = line.rstrip() #store "BLATN 2.2.30+"
while True:
line = blastn_output_file.readline()
if line.startswith("Database:"):
database_name = line.rstrip().split()[1] #store the name of HG
break
while True:
line = blastn_output_file.readline()
if "sequences" in line or "total letters" in line:
words = line.rstrip().split()
database_sequences = int (words[0].replace (",","") ) #how many sequences in db?
database_letters = int (words[2].replace (",","") ) #how big is the db?
break
while True: #locate the benchmark of hits or no hits
line = blastn_output_file.readline()
if "No hits found" in line:
break #exit()? to terminate
elif line.startswith ("Sequences producing significant alignments:"):
break
elif not line:
raise TypeError ("No description section in blastn output")
"""
Move to each entry of HSP
"""
results = []
counts_hits =0
parse_flag = False
while True:
hsp_line = blastn_output_file.readline()
if not hsp_line:
break
if "Identities" in hsp_line: #extract %identities
parse_flag = False
counts_hits += 1
temp = hsp_line.rstrip().split()[3].replace("(","")
identities = int (temp.replace("%),", ""))
if "Strand" in hsp_line: #extract strand polarity
temp = hsp_line.rstrip().replace("/", " ")
strand = temp.split()[-1]
if hsp_line.startswith ("Query"): #reach Query line
temp = hsp_line.rstrip().split()
query_start = int(temp[1])
query_sequence = temp[2]
query_end = int(temp[3])
while True:
hsp_line = blastn_output_file.readline()
if not hsp_line:
break
if hsp_line.startswith("Query"):
temp = hsp_line.rstrip().split()
start = int (temp[1])
#if abs (start - query_end) !=1:
#raise TypeError ("something wrong with the Query sequence positions")
if start < query_start:
query_start = start
raise TypeError ("start: minus Query or something wrong")
query_sequence = query_sequence + temp[2]
end = int (temp[3])
if end > query_end:
query_end = end
else:
raise TypeError ("end: minus Query or something wrong")
elif "Score" in hsp_line:
parse_flag = True
break
elif "Lambda" in hsp_line:
parse_flag = True
break
"""
record the info extracted from each hsp entry to results
"""
if parse_flag:
result = blastn_results (identities, strand, query_sequence, query_start, query_end)
results.append (result)
return blastn_scanner (version, database_name, database_sequences, database_letters, counts_hits, results)
def fasta_output (records, fasta_output_file):
target = open (fasta_output_file, 'w')
for i in range (records):
target.write (">hits%d \n" % i)
target.write (test.results[i].query_sequence.replace("-",""))
target.write ("\n")
"""
main program
"""
handle = open ("ebola.output")
#handle = open ("blastp.txt")
test= scan_blastn(handle, identity_cutoff = 90)
fasta_output (test.counts_hits, 'ebola_blastn.fas')
"""
check results:
test.counts_hits
test.results[0].query_sequence
"""
# Part 1 Step 1 : Hang Huynh
# pillomavirus_type_63_uid15486 This assume that the "virus genome files" is saved manually file by file
# grabbing files with common ".fas" files in the same directories
# input "common name" by user
def blastin():
pre_file = open('ebola_blastn.fas','r')
DNA = list('GACT')
blastned = []
temp_seq = ''
temp_seq2 = ''
for line in pre_file:
if line[0] != ">":
temp_seq = temp_seq + str(line)
for item in temp_seq:
if item in DNA:
temp_seq2 = temp_seq2 + str(item)
blastned.append(temp_seq2)
temp_seq = ''
temp_seq2 = ''
pre_file.close()
return blastned
def editin():
pre_file = open('username.fas','r')
DNA = list('GACT')
pre_viral_genome = ''
viral_genome = ''
for line in pre_file:
if line[0] != ">":
pre_viral_genome += str(line)
for item in pre_viral_genome:
if item in DNA:
viral_genome += str(item)
pre_viral_genome = ''
pre_file.close()
return viral_genome
def reducir():
segment_list = blastin()
viral_genome = editin()
genoma_editado = ''
for segment in segment_list:
if segment in viral_genome:
genoma_editado = re.sub(segment, '\n', viral_genome)
viral_genome = genoma_editado
return viral_genome
def fasta():
modified = reducir()
merge = open('ebola_edited.fas','w')
merge.write('>user_input\n')
for line in modified:
merge.write(line)
merge.close()
# Finding the common name of the virus genome in the file to be scan against
# the first letter should be capitalized (based on how i saved the file)
def fileDirectory():
file1 = '*.fna'
# file2 = raw_input('input name of virus in lowercase:') #ANDREW LOOK HERE PLEASE
file2 = 'ebola'
file3 = (file2) + (file1)
file4 = '*' + (file3) # and if the user select "none" then it assume that the user
filelist = glob.glob(file4) # is having all the virus family genome in one file
with open ("mergeall.fas", "w") as outfile: # or do not have the common name such as new virus type ect.
for f in filelist:
with open(f, "rU") as infile:
seq_records = SeqIO.parse(infile, "fasta")
SeqIO.write(seq_records, outfile, "fasta")
# This part create the file called "mergeall.fas" which merge all the file...
# with the common name "user input common name"
# This part merge the "Mergeall.fas" file with "userinput.fas" file to...
# create the merging.fas file
def mergeDirectory():
file5 = ["ebola_edited.fas","mergeall.fas"] # ".fas" is user input file
with open ("virus.fas", "w") as outfile:
for f in file5:
with open(f, "rU") as infile:
seq_records = SeqIO.parse(infile,"fasta")
SeqIO.write(seq_records, outfile,"fasta")
# the final file ouput called "virus.fas" as the input for the ClustalW alignment
# Part 1 Step 2 : Andres Wong
# This will take the user input merged with deposited database genomes
# As a fasta file
# It will perform a multiple sequencfe alignment with ClustalW
# Then look at conserved regions
# It will scan a window moving over 1 base every time
# And provide conserved regions accounting for half coverage
# Or accounting for regions that have over n% identity
# As a FASTA format
# Then will be fed by yun's step
# And sequences will be removed as needed
# Then the last file will be sent to Group 2
# Written by Andres W
def reformat():
shoe = open('virus.fas','r')
zapato = open('ebola.fas', 'w')
header = 0
for line in shoe:
if line[0] == '>':
header += 1
zapato.write('>virus_%s\n' % (header))
else:
zapato.write(line)
zapato.close()
def alineacion():
MERGED = "ebola.fas"
ALIGNED = "ebola.aln"
INPUT = ClustalwCommandline("clustalw", infile = MERGED)
INPUT()
def asterix():
zapato = open('ebola.aln', 'r')
blank = 1
howmuch = 0
WHERE_IS = []
location = 0
for line in zapato:
if line[0] == ' ' and line[1] == ' ' and line[2] == ' ':
blank += 17
for item in line:
howmuch += 1
if item == '*':
location = howmuch - blank
WHERE_IS.append(location)
zapato.close()
return WHERE_IS
def stringed_genome():
zapato = open('ebola.aln', 'r')
WALDO = ''
valid = ['C','T','G','A','-']
for line in zapato:
if 'virus_1' in line:
for item in line:
if item in valid:
WALDO += str(item)
zapato.close()
return WALDO
def slip_n_slide():
WHERE_IS = asterix()
lower = WHERE_IS
WALDO = stringed_genome()
dresifus = ''
conde = 0.0
cuenta = 0.0
GANAMOS = []
campeones = []
grones = []
lima = 0.0
sunny = 1
window = 0
alianza = 100.0 #$$$
hold_em = alianza #$$$
min_length = 19.0 #$$$
while hold_em >= min_length:
for nucleotide in WALDO:
cuenta += 1
dresifus += nucleotide
if nucleotide == '-' and cuenta <= alianza:
dresifus = dresifus[:-1]
recuenta = cuenta - 1.0
if len(dresifus) >= min_length:
campeones.append(dresifus)
for star in lower:
asterisco = star + conde
if asterisco >= sunny and asterisco < (window + cuenta):
lima += 1
abajo = lower.index(star)
if asterisco >= (window + cuenta):
break
homology = lima/recuenta
grones.append(homology)
lower = lower[abajo:]
sunny += cuenta
window += cuenta
lima = 0
dresifus = ''
cuenta = 0
elif cuenta == alianza:
campeones.append(dresifus)
for star in lower:
asterisco = star + conde
if asterisco >= sunny and asterisco <= (window + alianza):
lima += 1
abajo = lower.index(star)
if asterisco > (window + alianza):
break
homology = lima/alianza
grones.append(homology)
lower = lower[abajo:]
sunny += alianza
window += alianza
lima = 0
dresifus = ''
cuenta = 0
lower = WHERE_IS
cuenta = 0.0
sunny = 1.0
window = 0.0
dresifus = ''
WALDO = WALDO[1:]
hold_em = len(WALDO)
conde += 1.0
if hold_em < min_length:
criteria = numpy.median(grones)
win = -1
for candidate in grones:
win += 1
if candidate >= .6: #$$$
i_did_it = campeones[win]
GANAMOS.append(i_did_it)
return GANAMOS
def find_unix():
GANAMOS = slip_n_slide()
longway = []
for item in GANAMOS:
if item not in longway:
longway.append(item)
GANAMOS = longway
return GANAMOS
def have_a_blast():
reformat()
alineacion()
GANAMOS = find_unix()
INCA = open('andres.fas','w')
fasta_id = -1
for GOL in GANAMOS:
fasta_id += 1
INCA.write('>weapon_%s\n' % (fasta_id))
INCA.write('%s\n' % (GOL))
INCA.close()
def group_one():
fasta()
fileDirectory()
mergeDirectory()
have_a_blast()
# DONT FORGET TO RUN THE PROGRAM!
import re
import os, glob
from Bio import SeqIO
import fileinput
from Bio import SeqIO
from Bio.Align.Applications import ClustalwCommandline
from Bio import AlignIO
import numpy
group_one()
raw_input('Your file has been saved as andres.fas\nPress ENTER if you are done')
# END GROUP 1 CODE
|
from ..advans import *
from ..core.shader import calc_viewdir
@ti.data_oriented
class SSR:
def __init__(self, res, norm, coor, mtlid, mtltab, taa=False):
self.res = tovector(res)
self.img = ti.Vector.field(4, float, self.res)
self.nsamples = ti.field(int, ())
self.nsteps = ti.field(int, ())
self.stepsize = ti.field(float, ())
self.tolerance = ti.field(float, ())
self.blurring = ti.field(int, ())
self.norm = norm
self.coor = coor
self.mtlid = mtlid
self.mtltab = mtltab
self.taa = taa
@ti.materialize_callback
def init_params():
self.nsamples[None] = 32 if not taa else 12
self.nsteps[None] = 32 if not taa else 64
self.stepsize[None] = 2
self.tolerance[None] = 15
self.blurring[None] = 4
@ti.kernel
def apply(self, image: ti.template()):
for i, j in self.img:
res = V(0., 0., 0., 0.)
if ti.static(self.taa):
res = self.img[i, j]
else:
rad = self.blurring[None]
offs = rad // 2
for k, l in ti.ndrange(rad, rad):
res += self.img[i + k - offs, j + l - offs]
res /= rad**2
image[i, j] *= 1 - res.w
image[i, j] += res.xyz
@ti.kernel
def render(self, engine: ti.template(), image: ti.template()):
for P in ti.grouped(image):
if self.norm[P].norm_sqr() < eps:
self.img[P] = 0
else:
self.render_at(engine, image, P)
@ti.func
def render_at(self, engine, image: ti.template(), P):
normal = self.norm[P]
texcoord = self.coor[P]
mtlid = self.mtlid[P]
p = P + engine.bias[None]
vpos = V23(engine.from_viewport(p), engine.depth[P] / engine.maxdepth)
pos = mapply_pos(engine.V2W[None], vpos)
viewdir = calc_viewdir(engine, p)
material = self.mtltab.get(mtlid)
res = V(0., 0., 0., 0.)
tina.Input.spec_g_pars({
'pos': pos,
'color': V(1., 1., 1.),
'normal': normal,
'texcoord': texcoord,
})
rng = tina.TaichiRNG()
if ti.static(not self.taa):
pid = P % self.blurring[None]
rng = ti.static(tina.WangHashRNG(pid))
nsamples = self.nsamples[None]
nsteps = self.nsteps[None]
for i in range(nsamples):
odir, wei, rough = material.sample(viewdir, normal, 1, rng)
step = self.stepsize[None] / (
ti.sqrt(1 - odir.dot(viewdir)**2) * nsteps)
vtol = self.tolerance[None] * (
mapply_pos(engine.W2V[None], pos - viewdir / nsteps
).z - mapply_pos(engine.W2V[None], pos).z)
ro = pos + odir * rng.random() * step
for j in range(nsteps):
ro += odir * step
vro = mapply_pos(engine.W2V[None], ro)
if not all(-1 <= vro <= 1):
break
D = engine.to_viewport(vro)
depth = engine.depth[int(D)] / engine.maxdepth
if vro.z - vtol < depth < vro.z:
clr = bilerp(image, D) * wei
res += V34(clr, 1.0)
break
tina.Input.clear_g_pars()
self.img[P] = res / nsamples
|
from analytics.lib.counts import CountStat
from analytics.lib.fixtures import generate_time_series_data
from zerver.lib.test_classes import ZulipTestCase
# A very light test suite; the code being tested is not run in production.
class TestFixtures(ZulipTestCase):
def test_deterministic_settings(self) -> None:
# test basic business_hour / non_business_hour calculation
# test we get an array of the right length with frequency=CountStat.DAY
data = generate_time_series_data(
days=7, business_hours_base=20, non_business_hours_base=15, spikiness=0
)
self.assertEqual(data, [400, 400, 400, 400, 400, 360, 360])
data = generate_time_series_data(
days=1,
business_hours_base=2000,
non_business_hours_base=1500,
growth=2,
spikiness=0,
frequency=CountStat.HOUR,
)
# test we get an array of the right length with frequency=CountStat.HOUR
self.assert_length(data, 24)
# test that growth doesn't affect the first data point
self.assertEqual(data[0], 2000)
# test that the last data point is growth times what it otherwise would be
self.assertEqual(data[-1], 1500 * 2)
# test autocorrelation == 1, since that's the easiest value to test
data = generate_time_series_data(
days=1,
business_hours_base=2000,
non_business_hours_base=2000,
autocorrelation=1,
frequency=CountStat.HOUR,
)
self.assertEqual(data[0], data[1])
self.assertEqual(data[0], data[-1])
|
import random
import sys
def check_if_player_is_dead():
if Current_hit_point == 0:
print("You have died.")
sys.exit()
def skeleton_damage(skeleton_hitpoints):
player_damage = random.randint(1, int(potential_attack_damage))
print("You deal " + str(player_damage) + " damage towards the skeleton")
skeleton_hitpoints = skeleton_hitpoints - player_damage
if skeleton_hitpoints <= 0:
print("you killed the skeleton! he returns to the under.")
else:
print("The skeleton staggers, but is 'alive'.")
print("")
return skeleton_hitpoints
def sword_bow_attack(enemy):
if "bow" in player_attack:
print("\nyou take out an arrow with a silent confidence that speaks to your skill \n"
"and pull it against the string. You let it fly")
enemy.skel_current_hitpoints = skeleton_damage(enemy.skel_current_hitpoints)
else:
print("You hold your sword as a fire burns hot inside you and you strike!")
enemy.skel_current_hitpoints = skeleton_damage(enemy.skel_current_hitpoints)
return enemy.skel_current_hitpoints
def skel_attack_to_player(skeleton, current_hit_points):
enemy_damage = random.randint(1, skeleton.skel_attack)
print("The skeleton hits you with his sword, you take " + str(enemy_damage) + " damage")
current_hit_points = int(current_hit_points) - enemy_damage
print("You have " + str(Current_hit_point) + " hitpoints\n")
return current_hit_points
# main
print("Welcome to my text-based DnD adventure" + "\n")
with open("Eldar_Dracarys.txt", "r") as character_sheet:
data = character_sheet.readlines()
# assignment unpacking
# for getting the player sheet into the game, character sheet must follow this format
name, Strength, S_modifier,\
Dexterity, D_modifier, Constitution, C_modifier, Intelligence,\
I_modifier, Wisdom, W_modifier, Charisma, CH_modifier, Hit_points,\
Current_hit_point, potential_attack_damage\
= [d.split(":")[1].split("/n")[0] for d in data]
# for getting a skeleton (game enemy) into the game
with open("Skeleton.txt", "r") as skeleton_sheet:
skel_data = skeleton_sheet.readlines()
skel_attack, skel_hitpoints, skel_current_hitpoints = [d.split(":")[1].split("/n")[0] for d in skel_data]
class Skeletons:
def __init__(self, attack, hitpoints, current_hitpoints, distance):
self.skel_attack = int(attack)
self.skel_hitpoints = int(hitpoints)
self.skel_current_hitpoints = int(current_hitpoints)
self.skel_distance = int(distance)
print("Your character that you have loaded in" + "\n")
print(name)
game_action = input("you approach a cave with a wide mouth. It is very mysterious and you feel danger emanating "
"from it. do you enter? (y) or (n) : ")
print("")
if "n" in game_action:
print("You go home to your mom and she makes you cookies, the end")
sys.exit()
print("you enter the cave and it's dark with few torches lighting your way. you're not sure how the torches stay \n"\
"lit as the cave looks looks like it hasn't been entered in centuries. you also notice that the cave seems.. \n"\
"artificial. The cave is still rock and has random bumps to it, but the ground is relatively level and \n"\
"easy to walk across. It is also follows an slight incline down into the ground." + "\n")
print("you hear a shuffling slowly, but steadily getting louder. after walking towards it, you see a skeleton with a "
"sword! It's starts walking towards you and it looks like it's going to attack!")
first_skel = Skeletons(skel_attack, skel_hitpoints, skel_current_hitpoints, 1)
# first encounter with a skeleton
while first_skel.skel_current_hitpoints > 0:
player_attack = input("do you use your bow or sword? : ")
sword_bow_attack(first_skel)
if first_skel.skel_distance <= 0 & first_skel.skel_current_hitpoints > 0:
Current_hit_point = skel_attack_to_player(first_skel, Current_hit_point)
first_skel.skel_distance -= 1
check_if_player_is_dead()
print("After your victory, you continue deeper into the cave. You realize you haven't made any\n"
"kind of turn or twist, it just goes further. Eventually you come across a door on your right.\n"
"It's an ancient door and you're not sure it will even open due to its age.\n")
first_door = input("Do you try to open and enter the room? (y) or (n) : ")
has_read_note = False
# second encounter with enemies
if "y" in first_door:
if int(Strength) > 10:
print("You open the door even though its hinges were rusted and the door was heavy.\n"
"You walk in and see a chest at the back of the room. As you approach it, two skeletons\n"
"appear from the ground. they both have swords with one being 15 ft away. and the other\n"
"being 30 ft away. You must fight.\n")
room_skel1 = Skeletons(skel_attack, skel_hitpoints, skel_current_hitpoints, 1)
room_skel2 = Skeletons(skel_attack, skel_hitpoints, skel_current_hitpoints, 2)
while room_skel1.skel_current_hitpoints > 0 or room_skel2.skel_current_hitpoints > 0:
player_attack = input("do you use your bow or sword? : ")
if room_skel1.skel_current_hitpoints > 0 and room_skel2.skel_current_hitpoints > 0:
attack_which = int(input("Which skeleton? (1) for first one (he is closest), (2) for second one : "))
elif room_skel2.skel_current_hitpoints > 0 and room_skel1.skel_current_hitpoints <= 0:
attack_which = 2
elif room_skel2.skel_current_hitpoints <= 0 and room_skel1.skel_current_hitpoints > 0:
attack_which = 1
if attack_which == 1:
room_skel1.skel_current_hitpoints = sword_bow_attack(room_skel1)
else:
room_skel2.skel_current_hitpoints = sword_bow_attack(room_skel2)
if room_skel1.skel_distance <= 0 and room_skel1.skel_current_hitpoints > 0:
Current_hit_point = skel_attack_to_player(room_skel1, Current_hit_point)
if room_skel2.skel_distance <= 0 and room_skel2.skel_current_hitpoints > 0:
Current_hit_point = skel_attack_to_player(room_skel2, Current_hit_point)
room_skel1.skel_distance -= 1
room_skel2.skel_distance -= 1
check_if_player_is_dead()
print("\nYou defeated the skeletons and you open the chest to find a note that read\n"
"'The Dragon loves when you compliment him on his gold stash.'")
has_read_note = True
else:
print("you could not open the door, it was too heavy. You continue on.\n")
print("You start your way down the cave again.\n")
print("As you make your way further, you notice the cave starts the widen gradually. You see a light far into the \n"
"distance. When you reach near the end of the tunnel, you stare at the mountains of gold, diamonds, rubies, \n"
"Then you see the dragon. She's massive. It dwarfs war elephants. Its scales are a beautiful dark red with its \n"
"wings on its two front 'arms'. It doesnt notice you.\n")
does_player_sneak = input(int("You see two ways to go about this.\n\n"
"1) You sneak closer to the dragon to either talk to it or attempt to kill it\n\n"
"2) You can run in, bows blazing and attempt to scare it. Doing so might intimidate the\n"
"dragon or it might not, it's a risk you'd have to take.\n\n"
"What do you chose to do? (1) for sneaking (2) for running in : "))
# Third and final encounter with the dragon
if does_player_sneak == 1:
print("\nYou choose to sneak closer to the dragon. He does not see or hear you.\n")
player_attack_or_negotiate = input(int(("You are now close to the dragon. You think of two options before you\n"
"1) You could choose to attack the dragon 2) You could choose to negotiate"
"with the dragon : ")))
if player_attack_or_negotiate == 1:
else:
else:
print("You charge in and yell with a battle cry that echos the cave. The dragon hears you and turns to face you,\n"
" ready for battle.")
print("As you get closer the dragon rears its head and its throat begins to bellow and turn into a searing blue\n"
" color. The dragon unleashes hell from its mouth and the gates welcome you with open arms as you are\n"
" incinerated instantly. Your hitpoints are reduced to zero.")
Current_hit_point = 0
check_if_player_is_dead() |
#Exercício Python 060: Faça um programa que leia um número qualquer e mostre o seu fatorial. Exemplo:
#5! = 5 x 4 x 3 x 2 x 1 = 120
from math import sqrt fatorial
f = factorial(x)
x = int(input('\033[1;34mDigite um número para calcular seu Fatorial:\033[m '))
print('Calculando {}! = '.format(x), end='')
total = x
while x > 1:
print('{} x '.format(x), end='')
x -= 1
total = total * x
print('1 = {}'.format(total))
|
import numpy as np
from sklearn import metrics
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os.path
import numba
class RBFN(object):
def __init__(self, experiment_id, in_dim, basis_fxns):
self.id = "{}.txt".format(experiment_id)
self.in_dim = in_dim #in_dim: dimension of the input data
self.basis_fxns = basis_fxns #basis_fxns: number of hidden radial basis functions
self.centers = np.random.uniform(low=-2.3, high=2.3, size=(self.basis_fxns, self.in_dim))
self.weights = np.random.uniform(low=-.1, high=.1, size=(basis_fxns, 1)) # initially random
self.epochs = 3 # number of times the network compares a
self.ada = .2 # a scalar for the corrections to be made to weights in the network
self.sigma = 1.69 #Sigma is the spread of the basis function
self.lossHistory = []
self.iteration=1
'''
The below method takes N number of M-dimensional vectors (as the matrix X)
for each input vector, each radial basis function is applied and put into the matrix G
The rows of g represent an input vector and each column represents a radial basis function
Therefore the element g[0,0] represents the first radial basis function applied to the first input vector
The dot product of the matrix g and the RBFN's weights yields the output or predicted value from the network
'''
@numba.jit
def feed_forward(self, X):
g = np.empty((len(X), self.basis_fxns))
for data_point_arg in range(0, len(X)):
for center_arg in range(0, self.basis_fxns):
# The below applies the basis function to each input vector
g[data_point_arg, center_arg] = np.exp((-np.linalg.norm(self.centers[center_arg]-X[data_point_arg])**2)/(2*(self.sigma**2)))
# The matrix g represents each basis function applied to each input vector
# The dot product of g with the network's weights yields the predicted outputs for each input vector
return g
"""
The train_wgts() method trains self.weights using gradient descent
Where X is a numpy matrix of the input training samples
with dimensions (number of data samples)x(input dimensions)
And Y is a numpy matrix of the target output
with dimensions (number of data samples)x(1)
"""
def train_wgts(self,X,Y):
self.grad_descent(X, Y)
self.iteration+=1
'''
Chose to implement gradient descent as an auxiliary function called by train_wgts()
This function takes a set of inputs X, predicts their outputs, compares that prediction to the actual outputs
calculates the error
'''
def grad_descent(self, X, Y):
for i in range(0, self.epochs):
print("epoch{}".format(i))
#The "inputs by functions" matrix is the same matrix g from the
#feed_forward() method
input_x_fxn_matrix = self.feed_forward(X)
#The matrix of predicted outputs is calculated by taking the dot product
#of the "inputs by functions" matrix and the weights of the network
predicted = input_x_fxn_matrix.dot(self.weights)
error = (predicted-Y)
loss = np.sum(np.square(error))
self.lossHistory.append(loss)
# divide by number of inputs to scale the gradients
gradient = input_x_fxn_matrix.T.dot(error)
gradient = -1*self.ada*gradient/X.shape[0]
self.weights += gradient#update the weights
"""
The test() function takes a set of input vectors and predicts the output
X is a numpy matrix of test data points
dimensions of X are (number input samples)x(number input dimensions)
"""
def test(self, X, Y):
i = self.iteration-1
G = self.feed_forward(X).dot(self.weights)
mae = metrics.mean_absolute_error(Y, G)
rmse = metrics.mean_squared_error(Y, G)
A = np.hstack((X,G)) #set of vectors of predicted points
B = np.hstack((X,Y)) #set of vectors for actual points
res = 1 - np.dot(A / np.linalg.norm(A, axis=1)[..., None], (B / np.linalg.norm(B, axis=1)[..., None]).T)# compute cosine distance between vectors
cos_dist = res.mean()# mean cosine distance
reults_string = "\nIteration{}\n\nRMSE:{}\nMAE:{}\nMean Cosine similarity{}\n\n".format(i, rmse, mae, cos_dist)
if not os.path.isfile(self.id):
f = open(self.id, "w")
header = "{}:\nAda:{}\nEpochs:{}\nBasis functions:{}\nSigma:{}\n".format(self.id, self.ada, self.epochs, self.basis_fxns, self.sigma)
f.write(header)
else:
f = open(self.id, "a")
f.write(reults_string)
f.close()
'''
Prints a plot of the error versus training iterations
'''
def print_results(self):
file_name = "{}.png".format(self.id)
fig = plt.figure()
plt.plot(np.arange(0, len(self.lossHistory)), self.lossHistory)
fig.suptitle("Training Loss")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
fig.savefig(file_name)
plt.close(fig)
self.lossHistory = [] |
#!/usr/bin/python
import sys
import os
# import time
import fcntl
import struct
import socket
import subprocess
# These modules will be loaded later after downloading
iniparse = None
psutil = None
def kill_process(process_name):
for proc in psutil.process_iter():
if proc.name == process_name:
proc.kill()
def get_ip_address(ifname):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except Exception:
print "Cannot get IP Address for Interface %s" % ifname
sys.exit(1)
def delete_file(file_path):
if os.path.isfile(file_path):
os.remove(file_path)
else:
print("Error: %s file not found" % file_path)
def write_to_file(file_path, content):
open(file_path, "a").write(content)
def add_to_conf(conf_file, section, param, val):
config = iniparse.ConfigParser()
config.readfp(open(conf_file))
if not config.has_section(section):
config.add_section(section)
val += '\n'
config.set(section, param, val)
with open(conf_file, 'w') as f:
config.write(f)
def delete_from_conf(conf_file, section, param):
config = iniparse.ConfigParser()
config.readfp(open(conf_file))
if param is None:
config.remove_section(section)
else:
config.remove_option(section, param)
with open(conf_file, 'w') as f:
config.write(f)
def get_from_conf(conf_file, section, param):
config = iniparse.ConfigParser()
config.readfp(open(conf_file))
if param is None:
raise Exception("parameter missing")
else:
return config.get(section, param)
def print_format(string):
print "+%s+" % ("-" * len(string))
print "|%s|" % string
print "+%s+" % ("-" * len(string))
def execute(command, display=False):
print_format("Executing : %s " % command)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if display:
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
sys.stdout.write(nextline)
sys.stdout.flush()
output, stderr = process.communicate()
exitCode = process.returncode
else:
output, stderr = process.communicate()
exitCode = process.returncode
if (exitCode == 0):
return output.strip()
else:
print "Error", stderr
print "Failed to execute command %s" % command
print exitCode, output
raise Exception(output)
def initialize_system():
if not os.geteuid() == 0:
sys.exit('Please re-run the script with root user')
execute("apt-get clean", True)
execute("apt-get autoclean -y", True)
execute("apt-get update -y", True)
execute("apt-get install ubuntu-cloud-keyring python-setuptools python-iniparse python-psutil -y", True)
delete_file("/etc/apt/sources.list.d/icehouse.list")
execute("echo deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main >> /etc/apt/sources.list.d/juno.list")
execute("apt-get update -y", True)
execute("apt-get install vlan bridge-utils ethtool -y", True)
execute("sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf")
global iniparse
if iniparse is None:
iniparse = __import__('iniparse')
global psutil
if psutil is None:
psutil = __import__('psutil')
|
from data_generator.tokenizer_wo_tf import EncoderUnitOld
class EncoderForNLI(EncoderUnitOld):
def __init__(self, max_sequence, voca_path):
super(EncoderForNLI, self).__init__(max_sequence, voca_path)
CLS_ID = self.encoder.ft.convert_tokens_to_ids(["[CLS]"])[0]
SEP_ID = self.encoder.ft.convert_tokens_to_ids(["[SEP]"])[0]
self.CLS_ID = CLS_ID
self.SEP_ID = SEP_ID
def encode_token_pairs(self, maybe_query, maybe_document):
tokens_a = maybe_document
tokens_b = maybe_query
ids_1 = self.encoder.ft.convert_tokens_to_ids(tokens_a)
ids_2 = self.encoder.ft.convert_tokens_to_ids(tokens_b)
d = self.encode_inner(ids_1, ids_2)
return d["input_ids"], d["input_mask"], d["segment_ids"] |
from django.shortcuts import render
from rest_framework import viewsets
from restapp.serializers import TaskSerializers
from restapp.models import Task
from rest_framework import filters
# Create your views here.
class TaskViewSet(viewsets.ModelViewSet):
# replaced by filters
queryset = Task.objects.all().order_by('date_created')
# queryset = Task.objects.all()
serializer_class = TaskSerializers
# filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter)
# filter_fields = ('completed', )
# ordering = ('date_created',)
# replaced by filters
class DueTaskViewSet(viewsets.ModelViewSet):
queryset = Task.objects.order_by('date_created').filter(completed=False)
serializer_class = TaskSerializers
class CompletedTaskViewSet(viewsets.ModelViewSet):
queryset = Task.objects.order_by('date_created').filter(completed=True)
serializer_class = TaskSerializers
|
import core.ffmpeg_decoder as fd
import core.sub_generator as sg
import core.spectrum as sp
from core.naive_vad2 import *
import VLC.vlc_wx as vlc_wx
import sys
import wx
class MainFrame(vlc_wx.MyFrame):
def __init__(self,title):
fram=vlc_wx.MyFrame.__init__(self,title)
self.subtitle=None
self.ohandle=None
# variable
self.end=0
def OnOpen(self, evt):
super(MainFrame, self).OnOpen(self)
lan={"English":"en" ,"Chinese":"zh-cn" ,"Japanese":"ja"}
lang_from = None
lang_to = None
source = None
target = None
if self.select_dialog.isrecognize==True:
# Set recognize parameter
lang_from=lan[self.select_dialog.sorcelan]
if self.select_dialog.istranslate==True:
# Set translation parameter
lang_to=lan[self.select_dialog.targetlan]
source=self.mediapath
# Set target name
if not target:
target = source[:source.rfind('.')] + '.srt'
self.subtitle=target
self.currentfile=None
dec = fd.ffmpeg_decoder(source,output_rate = 8000)
vad = naive_vad(dec.ostream.get_handle())
sub = sg.sub_generator(vad.ostream.get_handle(), source, target, lang_from = lang_from, lang_to = lang_to)
self.ohandle = sub.ostream.get_handle()
spec = sp.spectrum(dec.ostream.get_handle(), window_size = 1024)
handle = spec.ostream.get_handle()
#self.Spec.OpenData(self.Spec,self.ohandle)
dec.start()
vad.start()
sub.start()
spec.start()
self.Spec.OpenData(self.Spec,handle)
# float second to time string
def OnFormatTime(self,floattime):
time=int(floattime)
str_decimaltime=str(int((floattime-time)*1000))
sec=time%60
all_min=time/60
hour=all_min/60
mini=all_min%60
if sec<9:
str_sec='0'+str(sec)
else:
str_sec=str(sec)
if mini<9:
str_min='0'+str(mini)
else:
str_min=str(mini)
if hour<9:
str_hour='0'+str(hour)
else:
str_hour=str(hour)
string_time=str_hour+':'+str_min+':'+str_sec+'.'+str_decimaltime
return string_time
def OnTimer(self,evt):
super(MainFrame, self).OnTimer(self)
if self.ohandle.has_data(1):
(start,self.end,text)=self.ohandle.read(1)[2][0][0]
self.player.video_set_subtitle_file(self.subtitle)
str_start=self.OnFormatTime(start)
str_end=self.OnFormatTime(self.end)
self.subpanel.AddSub(self.subpanel,str_start,str_end,text)
# Set buffer time
if self.player.get_length()!=0:
self.buffergauge.SetValue(self.end*self.buffergauge.GetRange()*1000/self.player.get_length())
# Link with subtitle
#self.subpanel.OpenFile(self.subtitle)
if __name__ == '__main__':
# Create a wx.App(), which handles the windowing system event loop
app = wx.PySimpleApp()
# Create the window containing our small media player
PlayerFrame = MainFrame("AutoSub")
# Subtitle(PlayerFrame, title='Subtitle',positon=(1100,300))
PlayerFrame.SetPosition((0,0))
app.SetTopWindow(PlayerFrame)
# show the player window centred and run the application
PlayerFrame.Centre()
PlayerFrame.Show()
app.MainLoop()
|
# -*- coding: utf-8 -*-
"""
Purpose
=======
ogs5py is A python-API for the OpenGeoSys 5 scientific modeling package.
The following functionalities are directly provided on module-level.
Subpackages
===========
.. autosummary::
:toctree: api
fileclasses
reader
tools
Classes
=======
OGS model Base Class
^^^^^^^^^^^^^^^^^^^^
Class to setup an ogs model
.. autosummary::
:toctree: api
OGS
File Classes
^^^^^^^^^^^^
Classes for all OGS5 Files. See: :any:`ogs5py.fileclasses`
.. currentmodule:: ogs5py.fileclasses
.. autosummary::
ASC
BC
CCT
DDC
FCT
GEM
GEMinit
GLI
GLIext
IC
RFR
KRC
MCP
MFP
MMP
MPD
MSH
MSP
NUM
OUT
PCS
PCT
PQC
PQCdat
REI
RFD
ST
TIM
Functions
=========
.. currentmodule:: ogs5py.tools.tools
Geometric
^^^^^^^^^
Geometric routines
.. autosummary::
hull_deform
Searching
^^^^^^^^^
Routine to search for a valid ogs id in a directory
.. autosummary::
search_task_id
Formatting
^^^^^^^^^^
Routines to format/generate data in the right way for the input
.. autosummary::
by_id
specialrange
generate_time
Downloading
^^^^^^^^^^^
.. currentmodule:: ogs5py.tools.download
Routine to download OGS5.
.. autosummary::
download_ogs
add_exe
reset_download
OGS5PY_CONFIG
Plotting
^^^^^^^^
.. currentmodule:: ogs5py.tools.vtk_viewer
Routine to download OGS5.
.. autosummary::
show_vtk
Information
^^^^^^^^^^^
.. currentmodule:: ogs5py.tools.types
.. autosummary::
OGS_EXT
PCS_TYP
PRIM_VAR_BY_PCS
"""
from ogs5py.fileclasses import (
ASC,
BC,
CCT,
DDC,
FCT,
GEM,
GLI,
IC,
KRC,
MCP,
MFP,
MMP,
MPD,
MSH,
MSP,
NUM,
OUT,
PCS,
PCT,
PQC,
REI,
RFD,
RFR,
ST,
TIM,
GEMinit,
GLIext,
PQCdat,
)
from ogs5py.ogs import OGS
from ogs5py.tools.download import (
OGS5PY_CONFIG,
add_exe,
download_ogs,
reset_download,
)
from ogs5py.tools.tools import (
by_id,
generate_time,
hull_deform,
search_task_id,
specialrange,
)
from ogs5py.tools.types import OGS_EXT, PCS_TYP, PRIM_VAR_BY_PCS
from ogs5py.tools.vtk_viewer import show_vtk
try:
from ogs5py._version import __version__
except ModuleNotFoundError: # pragma: nocover
# package is not installed
__version__ = "0.0.0.dev0"
# indentation of subkeywords
SUB_IND = " "
"""str: Indentation of subkeys."""
# indentation of content
CON_IND = " "
"""str: Indentation of content."""
__all__ = ["__version__"]
__all__ += ["OGS"]
__all__ += [
"ASC",
"BC",
"CCT",
"DDC",
"FCT",
"GEM",
"GEMinit",
"GLI",
"GLIext",
"IC",
"RFR",
"KRC",
"MCP",
"MFP",
"MMP",
"MPD",
"MSH",
"MSP",
"NUM",
"OUT",
"PCS",
"PCT",
"PQC",
"PQCdat",
"REI",
"RFD",
"ST",
"TIM",
]
__all__ += ["search_task_id", "by_id", "hull_deform"]
__all__ += ["specialrange", "generate_time"]
__all__ += ["download_ogs", "add_exe", "reset_download", "OGS5PY_CONFIG"]
__all__ += ["show_vtk"]
__all__ += ["OGS_EXT", "PCS_TYP", "PRIM_VAR_BY_PCS"]
__all__ += ["SUB_IND", "CON_IND"]
# __all__ += ["readvtk", "readpvd", "readtec_point", "readtec_polyline"]
|
s = input()
V = ['a','e','i','o','u']
l = []
for x in s:
if x in V:
s = s.replace(x,'')
print(s)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ops
import data
import utils
import models
import argparse
import numpy as np
import tensorflow as tf
import image_utils as im
from glob import glob
""" param """
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset', dest='dataset', default='road', help='which dataset to use')
parser.add_argument('--load_size', dest='load_size', type=int, default=128, help='scale images to this size')
parser.add_argument('--crop_size', dest='crop_size', type=int, default=128, help='then crop to this size')
parser.add_argument('--epoch', dest='epoch', type=int, default=200, help='# of epoch')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=1, help='# images in a batch')
parser.add_argument('--lr', dest='lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--gpu_id', dest='gpu_id', type=int, default=0, help='GPU ID')
parser.add_argument('--channel', dest='channel', type=int, default=3, help='image channel')
parser.add_argument('--lambda_', dest='lambda_', type=float, default=10.0, help='lambda')
parser.add_argument('--ratio', dest='ratio', type=int, default=1, help='width/height ratio')
args = parser.parse_args()
dataset = args.dataset
load_size = args.load_size
crop_size = args.crop_size
epoch = args.epoch
batch_size = args.batch_size
lr = args.lr
gpu_id = args.gpu_id
channel = args.channel
lambda_ = args.lambda_
ratio = args.ratio
""" graphs """
with tf.device('/gpu:%d' % gpu_id):
# nodes
x = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size * ratio, channel])
y = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size * ratio, channel])
R_x_history = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size * ratio, channel])
R_x = models.refiner_cyc(x, 'R_x')
D_y = models.discriminator_global(y, 'd')
D_R_x = models.discriminator_global(R_x, 'd', reuse=True)
D_R_x_history = models.discriminator_global(R_x_history, 'd', reuse=True)
# losses
realism_loss = tf.identity(ops.l2_loss(D_R_x, tf.ones_like(D_R_x)), name='realism_loss')
regularization_loss = tf.identity(ops.l1_loss(R_x, x) * lambda_, name='regularization_loss')
generator_loss = tf.identity((realism_loss + regularization_loss) / 2.0, name="generator_loss")
refiner_d_loss = tf.identity(ops.l2_loss(D_R_x, tf.zeros_like(D_R_x)), name='refiner_d_loss')
real_d_loss = tf.identity(ops.l2_loss(D_y, tf.ones_like(D_y)), name='real_d_loss')
discrim_loss = tf.identity((refiner_d_loss + real_d_loss) / 2.0, name="discriminator_loss")
# with history
refiner_d_loss_with_history = tf.identity(ops.l2_loss(D_R_x_history, tf.zeros_like(D_R_x_history)),
name='refiner_d_loss_with_history')
discrim_loss_with_history = tf.identity((refiner_d_loss_with_history + real_d_loss) / 2.0,
name="discrim_loss_with_history")
# summaries
refiner_summary = ops.summary_tensors([realism_loss, regularization_loss, generator_loss])
#refiner_summary_all = ops.summary(generator_loss)
discrim_summary = ops.summary_tensors([refiner_d_loss, real_d_loss, discrim_loss])
#discrim_summary_all = ops.summary(discrim_loss)
discrim_summary_with_history = ops.summary_tensors([refiner_d_loss_with_history, real_d_loss, discrim_loss_with_history])
#discrim_summary_with_history_all = ops.summary(discrim_loss_with_history)
# optimizer
t_var = tf.trainable_variables()
d_a_var = [var for var in t_var if 'd_discriminator' in var.name]
g_var = [var for var in t_var if 'R_x_generator' in var.name]
d_a_train_op = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(discrim_loss_with_history, var_list=d_a_var)
g_train_op = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(generator_loss, var_list=g_var)
""" train """
# session
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# counter
it_cnt, update_cnt = ops.counter()
# load data
x_img_paths = glob('./datasets/' + dataset + '/trainA/*.png')
y_img_paths = glob('./datasets/' + dataset + '/trainB/*.png')
x_data_pool = data.ImageData(sess, x_img_paths, batch_size, channels=channel, load_size=load_size, crop_size=crop_size,
ratio=ratio)
y_data_pool = data.ImageData(sess, y_img_paths, batch_size, channels=channel, load_size=load_size, crop_size=crop_size,
ratio=ratio)
x_test_img_paths = glob('./datasets/' + dataset + '/trainA/*.png')
y_test_img_paths = glob('./datasets/' + dataset + '/trainB/*.png')
x_test_pool = data.ImageData(sess, x_test_img_paths, batch_size, channels=channel, load_size=load_size,
crop_size=crop_size, ratio=ratio)
y_test_pool = data.ImageData(sess, y_test_img_paths, batch_size, channels=channel, load_size=load_size,
crop_size=crop_size, ratio=ratio)
R_x_pool = utils.ItemPool()
# summary
summary_writer = tf.summary.FileWriter('./summaries/' + dataset + "_" + str(lambda_), sess.graph)
# checkpoint
ckpt_dir = './checkpoints/' + dataset + "_" + str(lambda_)
utils.mkdir(ckpt_dir + '/')
saver = tf.train.Saver(max_to_keep=5)
ckpt_path = utils.load_checkpoint(ckpt_dir, sess, saver)
# pre-train
if ckpt_path is None:
sess.run(tf.global_variables_initializer())
try:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
print("Pretrain refiner")
for it in range(200):
x_real_ipt = x_data_pool.batch()
refiner_summary_opt, _ = sess.run([refiner_summary, g_train_op], feed_dict={x: x_real_ipt})
summary_writer.add_summary(refiner_summary_opt, it)
save_path = saver.save(sess, '%s/pretrained_refiner.ckpt' % (ckpt_dir))
print("Pretrain descriminator")
for it in range(50):
# prepare data
x_real_ipt = x_data_pool.batch()
y_real_ipt = y_data_pool.batch()
R_x_opt = sess.run(R_x, feed_dict={x: x_real_ipt})
R_x_sample_ipt = np.array(R_x_pool(list(R_x_opt)))
# train D
discrim_summary_opt, _ = sess.run([discrim_summary_with_history, d_a_train_op],
feed_dict={y: y_real_ipt, R_x_history: R_x_sample_ipt})
summary_writer.add_summary(discrim_summary_opt, it)
print("Finish pretrain")
save_path = saver.save(sess, '%s/pretrained_discriminator.ckpt' % (ckpt_dir))
except Exception, e:
coord.request_stop(e)
print(e)
else:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
print('Copy variables from % s' % ckpt_path)
# train
try:
batch_epoch = min(len(x_data_pool), len(y_data_pool)) // batch_size
max_it = epoch * batch_epoch
for it in range(sess.run(it_cnt), max_it):
sess.run(update_cnt)
# prepare data
x_real_ipt = x_data_pool.batch()
y_real_ipt = y_data_pool.batch()
R_x_opt = sess.run(R_x, feed_dict={x: x_real_ipt})
R_x_sample_ipt = np.array(R_x_pool(list(R_x_opt)))
# train G
for k in xrange(2):
refiner_summary_opt, _ = sess.run([refiner_summary, g_train_op], feed_dict={x: x_real_ipt})
summary_writer.add_summary(refiner_summary_opt, it * 2 + k)
# train D
for k in xrange(1):
discrim_summary_opt, _ = sess.run([discrim_summary_with_history, d_a_train_op],
feed_dict={y: y_real_ipt, R_x_history: R_x_sample_ipt})
summary_writer.add_summary(discrim_summary_opt, it)
# which epoch
epoch = it // batch_epoch
it_epoch = it % batch_epoch + 1
# display
if it % 10 == 0:
print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, batch_epoch))
# save
if (it + 1) % 100 == 0:
save_path = saver.save(sess, '%s/Epoch_(%d)_(%dof%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch))
print('Model saved in file: % s' % save_path)
if (it + 1) % 100 == 0:
x_real_ipt = x_test_pool.batch()
R_x_opt = sess.run(R_x, feed_dict={x: x_real_ipt})
sample_opt = np.concatenate((x_real_ipt[0:2], R_x_opt[0:2]), axis=0)
print(sample_opt.shape)
save_dir = './sample_images_while_training/' + dataset + "_" + str(lambda_)
utils.mkdir(save_dir + '/')
im.imwrite(im.immerge(sample_opt, 2, 2),
'%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, epoch, it_epoch, batch_epoch))
except Exception, e:
coord.request_stop(e)
finally:
print("Stop threads and close session!")
coord.request_stop()
coord.join(threads)
sess.close()
|
'''
By counting carefully it can be seen that a rectangular grid measuring 3 by 2
contains eighteen rectangles.
Although there exists no rectangular grid that contains exactly two million
rectangles, find the area of the grid with the nearest solution.
'''
def countRectangles(x, y):
x, y = x+1, y+1
totalCombos = 0
for cX in xrange(1, x):
for cY in xrange(1, y):
totalCombos += (x-cX)*(y-cY)
return totalCombos
target = 2000000
difference = target
diffX, diffY = -1, -1
for x in xrange(1,100):
y = x
if countRectangles(x,x) > target:
break
while True:
combos = countRectangles(x, y)
if combos < target:
prevCombos = combos
prevCombosY = y
else:
break
y += 1
difference1 = combos-target
difference2 = target-prevCombos
if difference1 < difference2:
if difference1 < difference:
difference = difference1
diffX, diffY = x, y
else:
if difference2 < difference:
difference = difference2
diffX, diffY = x, prevCombosY
print diffX*diffY
|
class FifoQueue(list):
capacity = 20
def enqueue(self, item):
if len(self) < self.capacity:
self.append(item)
def dequeue(self):
if self:
result = self.pop(0)
else:
result = None
return result
@property
def available_spots(self):
return self.capacity - len(self) |
import os
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
from Train import train_model
from sklearn.externals import joblib
app = Flask(__name__)
api = Api(app)
if not os.path.isfile('pcod1final.model'):
train_model()
model = joblib.load('pcod1final.model')
class MakePrediction(Resource):
@staticmethod
def post():
posted_data = request.get_json()
age = posted_data['age']
weight = posted_data['weight']
height = posted_data['height']
sugar_Level = posted_data['sugar_Level']
bp_Level = posted_data['bp_Level']
androgen_Level = posted_data['androgen_Level']
sleep = posted_data['sleep']
child_Count = posted_data['child_Count']
gap_Mrg_Child = posted_data['gap_Mrg_Child']
periods_long_week = posted_data['periods_long_week']
irregular_periods = posted_data['irregular_periods']
fast_food = posted_data['fast_food']
loose_Weight = posted_data['loose_Weight']
Hair_Growth = posted_data['Hair_Growth']
dark_Patches = posted_data['dark_Patches']
stress = posted_data['stress']
any_Drugs = posted_data['any_Drugs']
thyroid_problem = posted_data['thyroid_problem']
treatment_Taken = posted_data['treatment_Taken']
prediction = model.predict([[age,weight,height,sugar_Level,bp_Level,androgen_Level,sleep,child_Count,gap_Mrg_Child,periods_long_week,irregular_periods,fast_food,loose_Weight,Hair_Growth,dark_Patches,stress,any_Drugs,thyroid_problem,treatment_Taken]])[0]
if prediction == 0:
predicted_class = 'high_risk'
elif prediction == 1:
predicted_class = 'low_risk'
else:
predicted_class = 'mid_risk'
return jsonify({
'Prediction': predicted_class
})
api.add_resource(MakePrediction, '/predict')
if __name__ == '__main__':
app.run(debug=True)
|
from flask import Flask, jsonify, json, request
from cryptography.fernet import Fernet
app = Flask(__name__)
#create a key in python
#key = Fernet.generate_key()
key = "MZ8Z4I6XHzd_jx1M8hMs6K8WS2SIGCsrMSMK5oZkKnw="
crypto = Fernet(key)
def encrypt(plain_text):
# convert plain_text to string
string_text = str(plain_text)
# convert string to bytesarray
bytes = bytearray(string_text)
return crypto.encrypt(str(bytes))
def decrypt(cipher_text,debug=False):
if debug:
return cipher_text
# convert cipher_text into bytesarray
bytes = bytearray(cipher_text)
# convert bytesarray to string
bytestring = str(bytes)
# decrypt string
decrypted_content = crypto.decrypt(bytestring)
return decrypted_content
def load_tasks(path="tasks.json",debug=False):
#
with open(path,"r") as task_list:
content = task_list.read()
data = decrypt(content,debug=debug)
data = json.loads(data)
return data
def dump_tasks(tasks,path="tasks.json"):
content = json.dumps(tasks)
encrpted_content = encrypt(content)
# bytes = to_bytes(content)
# enc_content = encrypt(bytes)
with open(path,"w") as task_list:
task_list.write(encrpted_content)
tasks = load_tasks(debug=False)
@app.route('/')
def home():
return jsonify({'tasks': tasks})
@app.route('/todo/tasks', methods=['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
@app.route('/todo/tasks/', methods=['POST'])
def create_task():
if not request.json or not 'title' in request.json:
abort(400)
_id = tasks[-1]['id'] + 1
task = {
'id': _id,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
tasks.append(task)
dump_tasks(tasks)
return jsonify({'task': task})
@app.route('/todo/tasks/<int:task_id>', methods=['DELETE'])
def delete_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
task.remove(tasks[0])
return jsonify({'result': True})
if __name__ == '__main__':
app.run("127.0.0.1", 8080, debug=True);
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 30 18:31:06 2016
@author: marioromero
"""
import areas
lados = [1,2,3,4,10,100,500]
for lado in lados:
print("Area del cuadrado de lado {} es {}".format(lado,areas.cuadrado(lado)))
for radio in lados:
print("Area del cirulo de radio {} es {}".format(radio,areas.circulo(radio)))
if __name__ == 'areas':
import sys
print ('Numero de parametros =', len (sys.argv))
for s in sys.argv:
print (s, '->', type(s)) |
# Read an integer . For all non-negative integers i < N , print i2. See the sample for details.
if __name__ == '__main__':
n = int(input())
for x in range(n):
print(x*x)
|
#functions in python
#dont repeat code
#cleans up code
#can be reused elsewhere
def say_hi():
print('Hi!')
say_hi()
print()
#return values from functions
def say_hi2():
return 'Hi!'
greeting = say_hi2()
print(greeting)
print()
#create function that returns list of even numbers from 1 to 50 (not including 50)
def generate_evens():
return [num for num in range(1,50) if num % 2 == 0]
print(generate_evens())
print()
#functions that take parameters
def square(num):
return num * num
print(square(2))
print()
def happy_birthday(name):
print(f"Happy Birthday {name}")
happy_birthday('Tom')
print()
def print_full_name(first, last):
return(f'Your full name is {first} {last}')
print(print_full_name('Dallas','McGroarty'))
print()
#take a string and uppercase it and add a ! at the end
def yell(word):
return word.upper() + "!"
print(yell('i like eggs'))
# Default parameters, power is defaulted to 2 if no value entered
def exponent(num, power=2):
return num ** power
print(exponent(2,3))
print(exponent(3,2))
print(exponent(7))
def add(a,b):
return a + b
def subtract(a,b):
return a - b
# can have default parameters or parameters in general be functions
def math(a,b,fn=add):
return fn(a,b)
print(math(2,2))
print(math(2,2,subtract)) |
import os
import random
from shutil import copyfile
"""
"""
def getDirAndFileDict(imgDir):
# Get subdirectories
dirTree = list(os.walk(imgDir))
subDirs = dirTree[0][1]
corrFiles = dirTree[1:]
dirFileDict = {}
for subDir, files in zip(subDirs, corrFiles):
dirFileDict[subDir] = files[2]
return dirFileDict
def getCount(dict):
counts = {}
for dir, files in dict.items():
print("For class {}, the count is {}".format(dir, str(len(files))))
counts[dir] = files
return counts
def sampleAndSelect(dirFileDict, trainSplitPercent):
train = {}
test = {}
for dir, files in dirFileDict.items():
files = set(files)
k = int(trainSplitPercent * len(files))
train_set = set(random.sample(files,k))
test_set = files.difference(train_set)
train[dir] = train_set
test[dir] = test_set
return (train, test)
def writeToDisk(targetDir, imgDir, train, test):
for label, set_dict in [("train", train), ("test", test)]:
os.mkdir(os.path.join(targetDir, label))
for dir, files in set_dict.items():
os.mkdir(os.path.join(targetDir,label,dir))
for file in files:
src = os.path.join(imgDir, dir, file)
dest = os.path.join(targetDir, label, dir, file)
copyfile(src, dest)
if __name__ == '__main__':
imgDir = "/Users/ptrivedi/git-repos/ravdess-images/ravdess-data"
targetDir = "/Users/ptrivedi/git-repos/ravdess-images/ravdess-train-test"
dict = getDirAndFileDict(imgDir)
train, test = sampleAndSelect(dict, 0.8)
print("Training set counts:")
train_counts = getCount(train)
print("Testing set counts:")
test_counts = getCount(test)
print("Writing to disk...")
writeToDisk(targetDir, imgDir, train, test)
print("Split completed!")
|
# https://codeforces.com/problemset/problem/1/A
import math
data = input().split()
print(math.ceil(int(data[0]) / int(data[2])) * math.ceil(int(data[1]) / int(data[2])))
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import unittest
from paa_code.big_numbers_multiplication import Karatsuba
class KaratsubaTest(unittest.TestCase):
def test_divide_number_in_two_parts(self):
number = 12345
k = Karatsuba()
(left_part_returned,right_part_returned) = k.divide_number_in_two_parts(number,2)
self.assertEquals(123,left_part_returned)
self.assertEquals(45,right_part_returned)
def test_divide_number_in_two_parts_two_digits(self):
number = 12
k = Karatsuba()
(left_part_returned,right_part_returned) = k.divide_number_in_two_parts(number,1)
self.assertEquals(1,left_part_returned)
self.assertEquals(2,right_part_returned)
def test_define_m_power(self):
number1 = 123
number2 = 12345
k = Karatsuba()
self.assertEquals(k.define_m_power(number1,number2),1)
def test_number_of_digits(self):
number = 123
k = Karatsuba()
resultado = k.number_of_digits(number)
self.assertEquals(resultado,3)
def test_multiply(self):
number_1 = 1234
number_2 = 5678
expected_result = 7006652
k = Karatsuba()
returned_result = k.multiply(number_1,number_2)
self.assertEquals(expected_result,returned_result)
def test_multiply_numbers_with_different_number_of_digits(self):
number_1 = 12000
number_2 = 56
expected_result = 672000
k = Karatsuba()
returned_result = k.multiply(number_1,number_2)
self.assertEquals(expected_result,returned_result)
def test_multiply_one_number_with_one_digit(self):
number_1 = 1200
number_2 = 2
expected_result = 2400
k = Karatsuba()
returned_result = k.multiply(number_1,number_2)
self.assertEquals(expected_result,returned_result)
if __name__ == "__main__":
unittest.main()
|
from cmd import Cmd
class Node:
def __init__(self,data):
"""
This constructor is to initialise the node object with data for the node and an empty dictionary denoting the
children of the current node
:param data: This accepts a string which will be the name of the directory
"""
self.data=data
self.children={}
def insert(self,root=None,path=None,folder=None):
"""
This method is to create a directory in the file system logically by inserting a node in the appropriate
position in the Tree.
:param root: The root of the tree which of type Node :param path: The path where
insertion is to be made
:param folder:The name of the directory or folder to be created
:return: returns a
boolean value True if creation is successful else False
"""
keys=path.split("/")
keys=list(filter(lambda x: x != "", keys))
node=root
for key in keys:
try:
node=node.children[key]
self.print_children(node)
if node.data!=key:
return False
except KeyError as e:
return False
obj=Node(data=folder)
if folder not in node.children.keys():
node.children[folder]=obj
else:
print("ERR: DIRECTORY ALREADY EXISTS")
return False
return True
def remove(self,root=None,path=None,folder=None):
"""
This method is used to remove a directory from the given path.
:param root: The root of the tree which of type Node
:param path: The path where deletion is to be made
:param folder:The name of the directory or folder to be created
:return: returns a boolean value True if creation is successful else False
"""
keys = path.split("/")
keys = list(filter(lambda x: x != "", keys))
node = root
for key in keys:
try:
node = node.children[key]
except KeyError as e:
print("Err Invalid Path")
return False
try:
del node.children[folder]
return True
except KeyError as e:
print("Invalid path")
return False
def list_dirs(self,path=None,root=None):
"""
This method is used to list all directories within a given path
:param path: The path where the directories are to be listed
:param root: The root of the tree which of type Node
:return: This method does not return any value. It only prints directories on current path
"""
keys = path.split("/")
keys = list(filter(lambda x: x != "", keys))
node = root
for key in keys:
try:
node = node.children[key]
except KeyError as e:
print("This path does not exist",e)
print("DIRS: "," ".join(self.print_children(node)))
def print_children(self,node):
"""
This method is to get all the children of a node
:param node: The node of the tree which of type Node
:return: returns the children directory name of current node as a list
"""
return [node.children[n].data for n in node.children]
def check_valid(self,path=None,root=None):
"""
This method is used to check the validity of a path within the file system and return suitable boolean values
:param path: The path to be checked for validity or existence
:param root: The root of the tree which of type Node
:return: Returns a boolean value denoting the validity of the path provided
"""
keys = path.split("/")
keys = list(filter(lambda x: x != "", keys))
node = root
for key in keys:
try:
if key not in node.children.keys():
return False
node=node.children[key]
except KeyError as e:
print("ERR: INVALID PATH")
return False
return True
class MyPrompt(Cmd):
prompt = "$ "
intro = "Application Started ..."
def __init__(self):
"""
Constructor of MyPrompt that initialises the method attributed pwd (denoting present working directory) and
fat ( denoting the file allocation table ) which keeps track of all the directories.
"""
self.pwd="/"
self.fat=self.get_FAT(data=self.pwd)
super(MyPrompt, self).__init__()
def get_FAT(self,data):
"""
This method is to create an object for the class Node.
:param data: It accepts a string which it to be made as value for data attribute for the object of class Node
:return: This returns the node object that is newly created
"""
return Node(data=data)
def do_exit(self, inp=None):
"""
This method is used to exit the application
:param inp: This is optional parameter
:return: returns True which will exit the application
"""
print("Bye")
return True
def help_exit(self):
"""
This method is used to display help messages regarding exit command
:return: This method does not return anything
"""
print('exit the application. Shorthand: Ctrl-D.')
def do_cd(self,inp):
"""
This method is used to change the current directory to any valid place within the file system.
:param inp: This parameter denotes the destination directory to change to . ".." is used to move to the
previous directory. This accepts both relative and absolute path.
:return: This function does not return anything
"""
path=""
if inp=="..":
path = self.pwd.split("/")
path=list(filter(lambda x: x!="",path))
path.pop(-1)
try:
if path[0]!="/":
path.insert(0,"/")
except IndexError as e:
pass
path = "/".join(path)
else:
if inp=="/":
path="/"
else:
if inp[0]=="/":
if inp[-1]!="/":
path=inp+"/"
else:
path=inp
else:
if inp[-1] !="/":
path=self.pwd+inp+"/"
else:
path=self.pwd+inp
status=self.fat.check_valid(path=path,root=self.fat)
if status:
if path =="":
self.pwd="/"
else:
self.pwd=path
print("SUCC: REACHED")
else:
print("ERR: INVALID PATH")
def do_mkdir(self,inp):
"""
This method is used to create a new directory inside any valid directory.
:param inp: This denotes The directory that is to be created. This can be either relative or absolute path
:return: This does not return anything
"""
if inp[-1]=="/":
inp=inp[:-1]
if inp[0]=="/":
folder = inp.split("/")[-1]
path=inp.split("/")
path.pop(-1)
path="/".join(path)
else:
path=self.pwd
folder=inp
status=self.fat.insert(root=self.fat,path=path,folder=folder)
if status:
print("SUCC: CREATED")
def do_rm(self,inp):
"""
This method is to remove any directory in the logical file system created, provided the directory exists.
:param inp: This denotes the directory that is to be removed. This can be either relative or absolute path
:return: This does not return anything
"""
path = ""
folder = ""
if inp[0] == "/":
folder = inp.split("/")[-1]
path = inp.split("/")
path.pop(-1)
path = "/".join(path)
else:
path = self.pwd
folder = inp
status = self.fat.remove(root=self.fat, path=path, folder=folder)
if status:
print("SUCC: DELETED")
else:
print("ERR: INVALID PATH")
def do_pwd(self,inp=None):
"""
This method is to get the present working directory
:param inp: This is an optional parameter
:return: This does not return anything
"""
print("PATH: ",self.pwd)
def do_ls(self,inp=None):
"""
This method is used to list all directories in current working directory
:param inp: This is an optional parameter
:return: This does not return anything
"""
self.fat.list_dirs(path=self.pwd+inp,root=self.fat)
def do_session(self,inp):
"""
This method is used to clear the session values like reseting the Tree maintaining the logical directory
structure and changing present working directory to root (/)
:param inp: This parameter takes only the value "clear" as input.
:return: This does not return anything
"""
if inp!="clear":
print(inp+" is not a valid option")
self.pwd="/"
self.fat=self.get_FAT(data="/")
print("SUCC: CLEARED: RESET TO ROOT")
def default(self, inp):
"""
This method is used to handle all cases other than ls,pwd,cd,rm,mkdir
:param inp: this input can be any string.
:return: this does not return anything
"""
print("ERR: CANNOT RECOGNIZE INPUT.")
do_EOF = do_exit
help_EOF = help_exit
if __name__ == '__main__':
prompt=MyPrompt()
prompt.cmdloop() |
""" Constant values for recovery/BTC """
import decimal
import sys
PY3 = sys.version_info.major > 2
SATOSHI_PER_BTC = decimal.Decimal(1e8)
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# BIP32 hardened derivation flag
HARDENED = 0x80000000
P2PKH_MAINNET = 0x00
P2SH_MAINNET = 0x05
P2PKH_TESTNET = 0x6f
P2SH_TESTNET = 0xc4
ADDR_VERSIONS_MAINNET = [P2PKH_MAINNET, P2SH_MAINNET]
ADDR_VERSIONS_TESTNET = [P2PKH_TESTNET, P2SH_TESTNET]
def get_address_versions(is_testnet):
return ADDR_VERSIONS_TESTNET if is_testnet else ADDR_VERSIONS_MAINNET
# GreenAddress script type for standard p2sh multisig UTXOs
P2SH_FORTIFIED_OUT = 10
# GreenAddress script type for p2sh-p2wsh multisig segwit UTXOs
P2SH_P2WSH_FORTIFIED_OUT = 14
# GreenAddress xpubs for mainnet/testnet
GA_KEY_DATA_MAINNET = {
'chaincode': 'e9a563d68686999af372a33157209c6860fe79197a4dafd9ec1dbaa49523351d',
'pubkey': '0322c5f5c9c4b9d1c3e22ca995e200d724c2d7d8b6953f7b38fddf9296053c961f',
}
GA_KEY_DATA_TESTNET = {
'chaincode': 'b60befcc619bb1c212732770fe181f2f1aa824ab89f8aab49f2e13e3a56f0f04',
'pubkey': '036307e560072ed6ce0aa5465534fb5c258a2ccfbc257f369e8e7a181b16d897b3',
}
|
class RequestsConsumeBaseBackend(object):
def consume_request(self, request, start_callback=None, success_callback=None, fail_callback=None):
raise NotImplementedError
|
# entrada: numéro inteiro (n)
# saída: fatorial deste número (n!)
# entra com o numero
num = int(input("Digite o valor de n:"))
# indice i e variaveis
i = 0
fatorial = 1
n = num
# loop
while i < num:
fatorial *= n
n -= 1
i += 1
print(fatorial)
|
import heapq
import itertools
import numpy as np
__all__ = ['FixedToVariableCode', 'VariableToFixedCode',
'HuffmanCode', 'TunstallCode']
class FixedToVariableCode:
"""
Binary (prefix-free) fixed-to-variable length code. Let :math:`\\mathcal{X}` be the alphabet of some discrete source. A *binary fixed-to-variable length code* of source block size :math:`k` is defined by an encoding mapping :math:`\\mathrm{Enc} : \\mathcal{X}^k \\to \\{ 0, 1 \\}^+`, where :math:`\\{ 0, 1 \\}^+` denotes the set of all finite-length, non-empty binary strings. Here, for simplicity, the source alphabet is always taken as :math:`\\mathcal{X} = \\{0, 1, \\ldots, |\\mathcal{X} - 1| \\}`. The elements in the image of :math:`\\mathrm{Enc}` are called *codewords*.
Also, we only consider *prefix-free* codes, in which no codeword is a prefix of any other codeword.
"""
def __init__(self, codewords, source_cardinality=None):
"""
Constructor for the class. It expects the following parameters:
:code:`codewords` : :obj:`list` of :obj:`tuple` of :obj:`int`
The codewords of the code. Must be a list of length :math:`|\\mathcal{X}|^k` containing tuples of integers in :math:`\\{ 0, 1 \\}`. The tuple in position :math:`i` of :code:`codewords` should be equal to :math:`\\mathrm{Enc}(u)`, where :math:`u` is the :math:`i`-th element in the lexicographic ordering of :math:`\\mathcal{X}^k`.
:code:`source_cardinality` : :obj:`int`, optional
The cardinality :math:`|\\mathcal{X}|` of the source alphabet. The default value is :code:`len(codewords)`, yielding a source block size :math:`k = 1`.
*Note:* The source block size :math:`k` is inferred from :code:`len(codewords)` and :code:`source_cardinality`.
.. rubric:: Examples
>>> code = komm.FixedToVariableCode(codewords=[(0,), (1,0), (1,1)])
>>> pprint(code.enc_mapping)
{(0,): (0,), (1,): (1, 0), (2,): (1, 1)}
>>> pprint(code.dec_mapping)
{(0,): (0,), (1, 0): (1,), (1, 1): (2,)}
>>> code = komm.FixedToVariableCode(codewords=[(0,), (1,0,0), (1,1), (1,0,1)], source_cardinality=2)
>>> pprint(code.enc_mapping)
{(0, 0): (0,), (0, 1): (1, 0, 0), (1, 0): (1, 1), (1, 1): (1, 0, 1)}
>>> pprint(code.dec_mapping)
{(0,): (0, 0), (1, 0, 0): (0, 1), (1, 0, 1): (1, 1), (1, 1): (1, 0)}
"""
# TODO: Assert prefix-free
self._codewords = codewords
self._source_cardinality = len(codewords) if source_cardinality is None else int(source_cardinality)
self._source_block_size = 1
while self._source_cardinality ** self._source_block_size < len(codewords):
self._source_block_size += 1
if self._source_cardinality ** self._source_block_size != len(codewords):
raise ValueError("Invalid number of codewords")
self._enc_mapping = {}
self._dec_mapping = {}
for symbols, bits in zip(itertools.product(range(self._source_cardinality), repeat=self._source_block_size), codewords):
self._enc_mapping[symbols] = tuple(bits)
self._dec_mapping[tuple(bits)] = symbols
@property
def source_cardinality(self):
"""
The cardinality :math:`|\\mathcal{X}|` of the source alphabet.
"""
return self._source_cardinality
@property
def source_block_size(self):
"""
The source block size :math:`k`.
"""
return self._source_block_size
@property
def enc_mapping(self):
"""
The encoding mapping :math:`\\mathrm{Enc}` of the code.
"""
return self._enc_mapping
@property
def dec_mapping(self):
"""
The decoding mapping :math:`\\mathrm{Dec}` of the code.
"""
return self._dec_mapping
def rate(self, pmf):
"""
Computes the expected rate :math:`R` of the code, assuming a given :term:`pmf`. It is given in bits per source symbol.
**Input:**
:code:`pmf` : 1D-array of :obj:`float`
The (first-order) probability mass function :math:`p_X(x)` to be assumed.
**Output:**
:code:`rate` : :obj:`float`
The expected rate :math:`R` of the code.
.. rubric:: Examples
>>> code = komm.FixedToVariableCode([(0,), (1,0), (1,1)])
>>> code.rate([0.5, 0.25, 0.25])
1.5
"""
probabilities = np.array([np.prod(ps) for ps in itertools.product(pmf, repeat=self._source_block_size)])
lengths = [len(bits) for bits in self._codewords]
return np.dot(lengths, probabilities) / self._source_block_size
def encode(self, symbol_sequence):
"""
Encodes a given sequence of symbols to its corresponding sequence of bits.
**Input:**
:code:`symbol_sequence` : 1D-array of :obj:`int`
The sequence of symbols to be encoded. Must be a 1D-array with elements in :math:`\\mathcal{X} = \\{0, 1, \\ldots, |\\mathcal{X} - 1| \\}`. Its length must be a multiple of :math:`k`.
**Output:**
:code:`bit_sequence` : 1D-array of :obj:`int`
The sequence of bits corresponding to :code:`symbol_sequence`.
.. rubric:: Examples
>>> code = komm.FixedToVariableCode([(0,), (1,0), (1,1)])
>>> code.encode([1, 0, 1, 0, 2, 0])
array([1, 0, 0, 1, 0, 0, 1, 1, 0])
"""
symbols_reshaped = np.reshape(symbol_sequence, newshape=(-1, self._source_block_size))
return np.concatenate([self._enc_mapping[tuple(symbols)] for symbols in symbols_reshaped])
def decode(self, bit_sequence):
"""
Decodes a given sequence of bits to its corresponding sequence of symbols.
**Input:**
:code:`bit_sequence` : 1D-array of :obj:`int`
The sequence of bits to be decoded. Must be a 1D-array with elements in :math:`\\{ 0, 1 \\}`.
**Output:**
:code:`symbol_sequence` : 1D-array of :obj:`int`
The sequence of symbols corresponding to :code:`bits`.
.. rubric:: Examples
>>> code = komm.FixedToVariableCode([(0,), (1,0), (1,1)])
>>> code.decode([1, 0, 0, 1, 0, 0, 1, 1, 0])
array([1, 0, 1, 0, 2, 0])
"""
return np.array(_parse_prefix_free(bit_sequence, self._dec_mapping))
def __repr__(self):
args = 'codewords={}'.format(self._codewords)
return '{}({})'.format(self.__class__.__name__, args)
class HuffmanCode(FixedToVariableCode):
"""
Huffman code. It is an optimal (minimal expected rate) fixed-to-variable length code (:class:`FixedToVariableCode`) for a given probability mass function.
.. rubric:: Examples
>>> code = komm.HuffmanCode([0.7, 0.15, 0.15])
>>> pprint(code.enc_mapping)
{(0,): (0,), (1,): (1, 1), (2,): (1, 0)}
>>> code = komm.HuffmanCode([0.7, 0.15, 0.15], source_block_size=2)
>>> pprint(code.enc_mapping)
{(0, 0): (1,),
(0, 1): (0, 0, 0, 0),
(0, 2): (0, 1, 1),
(1, 0): (0, 1, 0),
(1, 1): (0, 0, 0, 1, 1, 1),
(1, 2): (0, 0, 0, 1, 1, 0),
(2, 0): (0, 0, 1),
(2, 1): (0, 0, 0, 1, 0, 1),
(2, 2): (0, 0, 0, 1, 0, 0)}
"""
def __init__(self, pmf, source_block_size=1, policy='high'):
"""
Constructor for the class. It expects the following parameters:
:code:`pmf` : 1D-array of :obj:`float`
The probability mass function used to construct the code.
:code:`source_block_size` : :obj:`int`, optional
The source block size :math:`k`. The default value is :math:`k = 1`.
:code:`policy` : :obj:`str`, optional
The policy to be used when constructing the code. It must be either :code:`'high'` (move combined symbols as high as possible) or :code:`'low'` (move combined symbols as low as possible). The default value is :code:`'high'`.
"""
self._pmf = np.array(pmf)
self._policy = policy
if policy not in ['high', 'low']:
raise ValueError("Parameter 'policy' must be in {'high', 'low'}")
super().__init__(codewords=HuffmanCode._huffman_algorithm(pmf, source_block_size, policy),
source_cardinality=self._pmf.size)
@property
def pmf(self):
"""
The probability mass function used to construct the code. This property is read-only.
"""
return self._pmf
@staticmethod
def _huffman_algorithm(pmf, source_block_size, policy):
class Node:
def __init__(self, index, probability):
self.index = index
self.probability = probability
self.parent = None
self.bit = None
def __lt__(self, other):
if policy == 'high':
return (self.probability, self.index) < (other.probability, other.index)
elif policy == 'low':
return (self.probability, -self.index) < (other.probability, -other.index)
tree = [Node(i, np.prod(probs)) for (i, probs) in enumerate(itertools.product(pmf, repeat=source_block_size))]
queue = [node for node in tree]
heapq.heapify(queue)
while len(queue) > 1:
node1 = heapq.heappop(queue)
node0 = heapq.heappop(queue)
node1.bit = 1
node0.bit = 0
node = Node(index=len(tree), probability=node0.probability + node1.probability)
node0.parent = node1.parent = node.index
heapq.heappush(queue, node)
tree.append(node)
codewords = []
for symbol in range(len(pmf)**source_block_size):
node = tree[symbol]
bits = []
while node.parent is not None:
bits.insert(0, node.bit)
node = tree[node.parent]
codewords.append(tuple(bits))
return codewords
def __repr__(self):
args = 'pmf={}, source_block_size={}'.format(self._pmf.tolist(), self._source_block_size)
return '{}({})'.format(self.__class__.__name__, args)
class VariableToFixedCode:
"""
Binary (prefix-free) variable-to-fixed length code. Let :math:`\\mathcal{X}` be the alphabet of some discrete source. A *binary variable-to-fixed length code* of code block size :math:`n` is defined by a (possibly partial) decoding mapping :math:`\\mathrm{Dec} : \\{ 0, 1 \\}^n \\to \\mathcal{X}^+`, where :math:`\\mathcal{X}^+` denotes the set of all finite-length, non-empty strings from the source alphabet. Here, for simplicity, the source alphabet is always taken as :math:`\\mathcal{X} = \\{0, 1, \\ldots, |\\mathcal{X} - 1| \\}`. The elements in the image of :math:`\\mathrm{Enc}` are called *sourcewords*.
Also, we only consider *prefix-free* codes, in which no sourceword is a prefix of any other sourceword.
"""
def __init__(self, sourcewords):
"""
Constructor for the class. It expects the following parameters:
:code:`sourcewords` : :obj:`list` of :obj:`tuple` of :obj:`int`
The sourcewords of the code. Must be a list of length at most :math:`2^n` containing tuples of integers in :math:`\\mathcal{X}`. The tuple in position :math:`i` of :code:`sourcewords` should be equal to :math:`\\mathrm{Dec}(v)`, where :math:`v` is the :math:`i`-th element in the lexicographic ordering of :math:`\\{ 0, 1 \\}^n`.
*Note:* The code block size :math:`n` is inferred from :code:`len(sourcewords)`.
.. rubric:: Examples
>>> code = komm.VariableToFixedCode(sourcewords=[(1,), (2,), (0,1), (0,2), (0,0,0), (0,0,1), (0,0,2)])
>>> pprint(code.enc_mapping)
{(0, 0, 0): (1, 0, 0),
(0, 0, 1): (1, 0, 1),
(0, 0, 2): (1, 1, 0),
(0, 1): (0, 1, 0),
(0, 2): (0, 1, 1),
(1,): (0, 0, 0),
(2,): (0, 0, 1)}
>>> pprint(code.dec_mapping)
{(0, 0, 0): (1,),
(0, 0, 1): (2,),
(0, 1, 0): (0, 1),
(0, 1, 1): (0, 2),
(1, 0, 0): (0, 0, 0),
(1, 0, 1): (0, 0, 1),
(1, 1, 0): (0, 0, 2)}
"""
# TODO: Assert prefix-free
self._sourcewords = sourcewords
self._source_cardinality = max(itertools.chain(*sourcewords)) + 1
self._code_block_size = (len(sourcewords) - 1).bit_length()
self._enc_mapping = {}
self._dec_mapping = {}
for symbols, bits in zip(itertools.product(range(2), repeat=self._code_block_size), sourcewords):
self._enc_mapping[bits] = tuple(symbols)
self._dec_mapping[tuple(symbols)] = bits
@property
def source_cardinality(self):
"""
The cardinality :math:`|\\mathcal{X}|` of the source alphabet.
"""
return self._source_cardinality
@property
def code_block_size(self):
"""
The code block size :math:`n`.
"""
return self._code_block_size
@property
def enc_mapping(self):
"""
The encoding mapping :math:`\\mathrm{Enc}` of the code.
"""
return self._enc_mapping
@property
def dec_mapping(self):
"""
The decoding mapping :math:`\\mathrm{Dec}` of the code.
"""
return self._dec_mapping
def rate(self, pmf):
"""
Computes the expected rate :math:`R` of the code, assuming a given :term:`pmf`. It is given in bits per source symbol.
**Input:**
:code:`pmf` : 1D-array of :obj:`float`
The (first-order) probability mass function :math:`p_X(x)` to be assumed.
**Output:**
:code:`rate` : :obj:`float`
The expected rate :math:`R` of the code.
.. rubric:: Examples
>>> code = komm.VariableToFixedCode([(0,0,0), (0,0,1), (0,1), (1,)])
>>> code.rate([2/3, 1/3])
0.9473684210526315
"""
probabilities = np.array([np.prod([pmf[x] for x in symbols]) for symbols in self._sourcewords])
lengths = [len(symbols) for symbols in self._sourcewords]
return self._code_block_size / np.dot(lengths, probabilities)
def encode(self, symbol_sequence):
"""
Encodes a given sequence of symbols to its corresponding sequence of bits.
**Input:**
:code:`symbol_sequence` : 1D-array of :obj:`int`
The sequence of symbols to be encoded. Must be a 1D-array with elements in :math:`\\mathcal{X} = \\{0, 1, \\ldots, |\\mathcal{X} - 1| \\}`.
**Output:**
:code:`bit_sequence` : 1D-array of :obj:`int`
The sequence of bits corresponding to :code:`symbol_sequence`.
.. rubric:: Examples
>>> code = komm.VariableToFixedCode([(0,0,0), (0,0,1), (0,1), (1,)])
>>> code.encode([0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0])
array([0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0])
"""
return np.array(_parse_prefix_free(symbol_sequence, self._enc_mapping))
def decode(self, bit_sequence):
"""
Decodes a given sequence of bits to its corresponding sequence of symbols.
**Input:**
:code:`bit_sequence` : 1D-array of :obj:`int`
The sequence of bits to be decoded. Must be a 1D-array with elements in :math:`\\{ 0, 1 \\}`. Its length must be a multiple of :math:`n`.
**Output:**
:code:`symbol_sequence` : 1D-array of :obj:`int`
The sequence of symbols corresponding to :code:`bits`.
.. rubric:: Examples
>>> code = komm.VariableToFixedCode([(0,0,0), (0,0,1), (0,1), (1,)])
>>> code.decode([0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0])
array([0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0])
"""
bits_reshaped = np.reshape(bit_sequence, newshape=(-1, self._code_block_size))
return np.concatenate([self._dec_mapping[tuple(bits)] for bits in bits_reshaped])
def __repr__(self):
args = 'sourcewords={}'.format(self._sourcewords)
return '{}({})'.format(self.__class__.__name__, args)
class TunstallCode(VariableToFixedCode):
"""
Tunstall code. It is an optimal (minimal expected rate) variable-to-fixed length code (:class:`VariableToFixedCode`) for a given probability mass function.
.. rubric:: Examples
>>> code = komm.TunstallCode([0.6, 0.3, 0.1], code_block_size=3)
>>> pprint(code.enc_mapping)
{(0, 0, 0): (0, 0, 0),
(0, 0, 1): (0, 0, 1),
(0, 0, 2): (0, 1, 0),
(0, 1): (0, 1, 1),
(0, 2): (1, 0, 0),
(1,): (1, 0, 1),
(2,): (1, 1, 0)}
"""
def __init__(self, pmf, code_block_size):
"""
Constructor for the class. It expects the following parameters:
:code:`pmf` : 1D-array of :obj:`float`
The probability mass function used to construct the code.
:code:`code_block_size` : :obj:`int`, optional
The code block size :math:`n`.
"""
self._pmf = np.array(pmf)
super().__init__(sourcewords=TunstallCode._tunstall_algorithm(pmf, code_block_size))
@property
def pmf(self):
"""
The probability mass function used to construct the code. This property is read-only.
"""
return self._pmf
@staticmethod
def _tunstall_algorithm(pmf, code_block_size):
class Node:
def __init__(self, symbols, probability):
self.symbols = symbols
self.probability = probability
def __lt__(self, other):
return -self.probability < -other.probability
queue = [Node((symbol,), probability) for (symbol, probability) in enumerate(pmf)]
heapq.heapify(queue)
while len(queue) + len(pmf) - 1 < 2**code_block_size:
node = heapq.heappop(queue)
for (symbol, probability) in enumerate(pmf):
new_node = Node(node.symbols + (symbol,), node.probability * probability)
heapq.heappush(queue, new_node)
sourcewords = sorted(node.symbols for node in queue)
return sourcewords
def __repr__(self):
args = 'pmf={}, code_block_size={}'.format(self._pmf.tolist(), self._code_block_size)
return '{}({})'.format(self.__class__.__name__, args)
def _parse_prefix_free(input_sequence, dictionary):
output_sequence = []
i = 0
while i < len(input_sequence):
j = 1
while i + j <= len(input_sequence):
try:
key = tuple(input_sequence[i : i + j])
output_sequence.extend(dictionary[key])
break
except KeyError:
j += 1
i += j
return output_sequence
|
import numpy as np
import keras
import random, math
import unittest
class GeneralUtils():
def __init__(self):
pass
'''
Return True with prob
Input: probability within [0, 1]
Ouput: True or False
'''
def decision(self, prob):
assert prob >= 0, 'Probability should in the range of [0, 1]'
assert prob <= 1, 'Probability should in the range of [0, 1]'
return random.random() < prob
def generate_permutation(self, size_of_permutation, extract_portion):
assert extract_portion <= 1
num_of_extraction = math.floor(size_of_permutation * extract_portion)
permutation = np.random.permutation(size_of_permutation)
permutation = permutation[:num_of_extraction]
return permutation
def shuffle(self, a):
shuffled_a = np.empty(a.shape, dtype=a.dtype)
length = len(a)
permutation = np.random.permutation(length)
index_permutation = np.arange(length)
shuffled_a[permutation] = a[index_permutation]
return shuffled_a
def shuffle_in_uni(self, a, b):
assert len(a) == len(b)
shuffled_a = np.empty(a.shape, dtype=a.dtype)
shuffled_b = np.empty(b.shape, dtype=b.dtype)
length = len(a)
permutation = np.random.permutation(length)
index_permutation = np.arange(length)
shuffled_a[permutation] = a[index_permutation]
shuffled_b[permutation] = b[index_permutation]
return shuffled_a, shuffled_b
def shuffle_in_uni_with_permutation(self, a, b, permutation):
assert len(a) == len(b)
shuffled_a, shuffled_b = a.copy(), b.copy()
shuffled_permutation = self.shuffle(permutation)
shuffled_a[shuffled_permutation] = a[permutation]
shuffled_b[shuffled_permutation] = b[permutation]
return shuffled_a, shuffled_b
'''
SMM stands for source-level mutated model
This function looks quite terrible and messy, should be simplified
'''
def print_messages_SMO(self, mode, train_datas=None, train_labels=None, mutated_datas=None, mutated_labels=None, model=None, mutated_model=None, mutation_ratio=0):
if mode in ['DR', 'DM']:
print('Before ' + mode)
print('Train data shape:', train_datas.shape)
print('Train labels shape:', train_labels.shape)
print('')
print('After ' + mode + ', where the mutation ratio is', mutation_ratio)
print('Train data shape:', mutated_datas.shape)
print('Train labels shape:', mutated_labels.shape)
print('')
elif mode in ['LE', 'DF', 'NP']:
pass
elif mode in ['LR', 'LAs', 'AFRs']:
print('Original untrained model architecture:')
model.summary()
print('')
print('Mutated untrained model architecture:')
mutated_model.summary()
print('')
else:
pass
'''
MMM stands for model-level mutated model
'''
def print_messages_MMM_generators(self, mode, network=None, test_datas=None, test_labels=None, model=None, mutated_model=None, STD=0.1, mutation_ratio=0):
if mode in ['GF', 'WS', 'NEB', 'NAI', 'NS']:
print('Before ' + mode)
network.evaluate_model(model, test_datas, test_labels)
print('After ' + mode + ', where the mutation ratio is', mutation_ratio)
network.evaluate_model(mutated_model, test_datas, test_labels, mode)
elif mode in ['LD', 'LAm', 'AFRm']:
print('Before ' + mode)
model.summary()
network.evaluate_model(model, test_datas, test_labels)
print('After ' + mode)
mutated_model.summary()
network.evaluate_model(mutated_model, test_datas, test_labels, mode)
else:
pass
class ModelUtils():
def __init__(self):
pass
def print_layer_info(self, layer):
layer_config = layer.get_config()
print('Print layer configuration information:')
for key, value in layer_config.items():
print(key, value)
def model_copy(self, model, mode=''):
original_layers = [l for l in model.layers]
suffix = '_copy_' + mode
new_model = keras.models.clone_model(model)
for index, layer in enumerate(new_model.layers):
original_layer = original_layers[index]
original_weights = original_layer.get_weights()
layer.name = layer.name + suffix
layer.set_weights(original_weights)
new_model.name = new_model.name + suffix
return new_model
def get_booleans_of_layers_should_be_mutated(self, num_of_layers, indices):
if indices == None:
booleans_for_layers = np.full(num_of_layers, True)
else:
booleans_for_layers = np.full(num_of_layers, False)
for index in indices:
booleans_for_layers[index] = True
return booleans_for_layers
def print_comparision_of_layer_weights(self, old_model, new_model):
old_layers = [l for l in old_model.layers]
new_layers = [l for l in new_model.layers]
assert len(old_layers) == len(new_layers)
num_of_layers = len(old_layers)
booleans_for_layers = np.full(num_of_layers, True)
names_for_layers = []
for index in range(num_of_layers):
old_layer, new_layer = old_layers[index], new_layers[index]
names_for_layers.append(type(old_layer).__name__)
old_layer_weights, new_layer_weights = old_layer.get_weights(), new_layer.get_weights()
if len(old_layer_weights) == 0:
continue
is_equal_connections = np.array_equal(old_layer_weights[0], new_layer_weights[0])
is_equal_biases = np.array_equal(old_layer_weights[1], new_layer_weights[1])
is_equal = is_equal_connections and is_equal_biases
if not is_equal:
booleans_for_layers[index] = False
print('Comparision of weights between original model and mutated model,')
print('If the weights of specific layer is modified, return True. Otherwise, return False')
print('')
print(' Layer index | Layer name | Is mutated ?')
print(' -------------------------------------------')
for index, result in enumerate(booleans_for_layers):
name = names_for_layers[index]
print(' {index} | {name} | {result}'.format(index=str(index).rjust(11), name=name.rjust(14), result=(not result)))
print('')
class ExaminationalUtils():
def __init__(self):
pass
def mutation_ratio_range_check(self, mutation_ratio):
assert mutation_ratio >= 0, 'Mutation ratio attribute should in the range [0, 1]'
assert mutation_ratio <= 1, 'Mutation ratio attribute should in the range [0, 1]'
pass
def training_dataset_consistent_length_check(self, lst_a, lst_b):
assert len(lst_a) == len(lst_b), 'Training datas and labels should have the same length'
pass
def valid_indices_of_mutated_layers_check(self, num_of_layers, indices):
if indices is not None:
for index in indices:
assert index >= 0, 'Index should be positive'
assert index < num_of_layers, 'Index should not be out of range, where index should be smaller than ' + str(num_of_layers)
pass
def in_suitable_indices_check(self, suitable_indices, indices):
if indices is not None:
for index in indices:
assert index in suitable_indices, 'Index ' + str(index) + ' is an invalid index for this mutation'
pass |
__author__ = 'shuai'
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
if not s or len(s) % 2:
return False
def _comapare(left, right):
if left == "(" and right == ")":
return True
elif left == "[" and right == "]":
return True
elif left == "{" and right == "}":
return True
else:
return False
stack = []
list_s = list(s)
stack.append(list_s[0])
pos = 1
length = len(list_s) - 1
while stack:
if pos > length:
break
mark = stack[-1]
if _comapare(mark, list_s[pos]):
stack.pop()
else:
stack.append(list_s[pos])
pos += 1
if not stack:
return True
return False
sol = Solution()
print sol.isValid("(((}]))")
|
# -*- coding: utf-8 -*-
"""
wendt_mitchell_p4.py
Created on Sun Oct 15 19:23:05 2018
Created by: Mitchell Wendt
Revised: 10/15/2018
People who helped me:
Edward Hughes (base code as provided in lecture, help with setting up the differential equations and plotting the realistic pendulum)
"""
import numpy as np #import numpy to calculate sin, cos, matrices, and other general mathematical functions
import matplotlib.pyplot as plt #import matplotlib to plot the calculated pendulum positions in an animation
import scipy.integrate as integrate #import scipy.integrate to include the differential equation solver odeint
def pendulum(thetaZero = 30, damp = 0, timeSpan = 20, length = 0.45, gravity = 9.8):
'''
This function calculates the realistic pendulum differential equation (Eq 1
in the homework document) by splitting it into two simpler differential
equations (as in Eq 3 in the homework doc). The differential equations are
solved, including the Jacobian in the solution, using the odeint solver.
For comparison, the simple 'choir boy' solution which uses the
approximation that sin(theta) ~= theta (at small theta) is also included.
thetaZero is the initial position of both pendulum in degrees. This is
converted to radians later. 30 degrees is chosen by default
damp is the damping coefficient, mu, which describes how quickly a
pendulum's velocity slows down over time. No damping (0) is selected
by default.
timeSpan is the length that the simulation is ran, in seconds. The
simulation is run for about 20 seconds by default (Note: there is a small
amount of error in this value).
length is the length of the pendulum in meters. 0.45m is used by default
gravity is the coefficient of gravity in meters per second squared. By
default, the earth's gravity (9.8) is used
'''
if (damp < 0 or timeSpan < 0 or length < 0 or gravity < 0): #conditions to handle value errors: if a negative damping coefficient, timeSpan, length, or gravity are input
raise ValueError("'damp', 'timeSpan', 'length', and 'gravity' must all be greater than zero.") #raise a value error that describes the issue to the user
thetaZero = np.deg2rad(thetaZero) #convert initial angle theta zero from degrees to radians
def dadt(a,t):
'''
This function defines the two equations in Eq 3 that make up the
equation in Eq 1. Namely,
dtheta/dt = a
da/dt = -mu*a - g*sin(theta)/L
Note that a is simply dtheta/dt, which is set to zero at the start of
the simulation.
The function receives two inputs: a which contains a list that stores
dtheta/dt and theta respectively and then time t
The function simply returns the two differential equations to be solved
by odeint in a two element list.
'''
da = [a[1], -damp*a[1]-(gravity*np.sin(a[0])/length)] #Store the two equations in a list da
return da #return that list
def jacobian(a,t):
'''
This function defines the Jacobian of the two equations in Eq 3 that
make up the equation in Eq 1. Namely,
dtheta/dt = a
da/dt = -mu*a - g*sin(theta)/L
The Jacobian is simply defined as the matrix of partial derivatives of
each equation with respect to each parameter of the equations (a and t)
The function receives two inputs: a which contains a list that stores
dtheta/dt and theta respectively and then time t
The function simply returns the Jacobian of the two equation system
'''
a1,a2 = a #retrieves the two parameters of a: dtheta/dt and theta respectively.
dada1 = [0, 1] #stores the partial derivatives of teh first equation (dtheta/dt = a) with respect to t and then a
dada2 = [((-gravity/length)*np.cos(a1)), -damp] #stores the partial derivatives of the second equation (da/dt = -mu*a - g*sin(theta)/L) with respect to t and then a
jacobian = np.matrix([dada1, dada2]) #store the lists of partial derivatives into a jacobian matrix
return jacobian #return the calculated jacobian matrix
theta0 = [thetaZero, 0] #defines the initial conditions of the system of differential equations: theta begins at thetaZero and dtheta/dt begins at zero
t = np.linspace(0, timeSpan, timeSpan*30) #calculates the values of time t at each timestep. Evenly spaced using linspace. The 30 in this equation defines the framerate in fps that the simulation runs at
theta = integrate.odeint(dadt, theta0, t, Dfun=jacobian) #calls the odeint differential equation solver to solve the two equations in Eq 3, calls dadt as the two equations to be solved and jacobian as the jacobian matrix to be used
x = length*np.sin(theta[:,0]) #calculates the x coordinate at each timestep based on its position theta. This is determined through classic "SOH CAH TOA" analysis of the coordinates and length being the hypotenuse. Theta returns 2 things: the list of theta values and a dictionary that stores other info about the solution process. Only the values are desired and extracted here.
y = -length*np.cos(theta[:,0]) #calculates the y coordinate at each timestep based on its position theta. Since the pivot of the pendulum is located at (0,0), a negative sign is used.
choir_boy_theta = thetaZero*np.cos(np.sqrt(gravity/length)*t) #calculates theta values at each timestep using the simplified solution of the pendulum given by Eq 2 (which assumes that sin(theta) = theta)
choir_boy_x = length*np.sin(choir_boy_theta) #calculates the x coordinate at each timestep as before in the real solution
choir_boy_y = -length*np.cos(choir_boy_theta) #calculates the y coordinate at each timestep as before
ax = plt.axes(xlim = (-1.25*length, 1.25*length), #defines the size of the plotting window to be used by matplotlib. Note that this variable is used in pyplot but is not called explicitly in the program, which gives a warning in Spyder that the variable isn't used, even though it is used by pyplot
ylim = (-1.25*length, 0.25*length))
pivot, = plt.plot(0,0) #plots the pivot point of the pendulum at the origin of the plotting window. The trailing comma operator unpacks the tuple to be used by pyplot
plt.hold(True) #holds axes to allow continuous plotting on the same set of axes so that both the pendulums can be compared on the same plotting window
point, = plt.plot([],[], 'r-', marker='o', label='real') #defines the list in which the x and y points of the realistic simulation at each timestep will be put in. ALso defines line and marker style, and label for the legend
choirpoint, = plt.plot([], [], 'b--', marker='o', label='choir boy') #defines the list in which the x and y points of the choir boy simulation at each timestep will be put in. ALso defines line and marker style, and label for the legend
plt.legend() #displays a legend to differentiate the realistic pendulum simulation from the choir boy simulation
for xpoint,ypoint,choirxpoint,choirypoint in zip(x, y, choir_boy_x, choir_boy_y): #iterate through each x and y point of the realistic and choir boy simulations. This also effectively iterates through timesteps
point.set_data([0, xpoint], [0, ypoint]) #update the x and y coordinate of the realistic solution at the given timestep
choirpoint.set_data([0, choirxpoint], [0, choirypoint]) #update the x and y coordinate of the choir boy solution at the given timestep
plt.pause(0.034) #pauses the plotting for 1/30 frames per second = 0.034s, the amount of time in between frames of a 30 fps simulation
plt.hold(False) #stop hold axes to allow plotting in other figure windows outside of each instance of running this program
return #finish running the function pendulum() without returning anything
|
from odoo import models, fields, api
class hr_job(models.Model):
_inherit = 'hr.job'
approving_manager_id = fields.Many2one('hr.job',string='Approving Manager',index=True,on_delete="restrict")
|
def disable_autoreload():
pass
def enable_autoreload():
pass
def reload():
pass
runtime = None
def set_next_stack_limit():
pass
def set_rgb_status_brightness():
pass
|
from scoring.score_components.physchem.mol_weight import MolWeight
from scoring.score_components.physchem.tpsa import PSA
from scoring.score_components.physchem.rot_bonds import RotatableBonds
from scoring.score_components.physchem.hbd import HBD_Lipinski
from scoring.score_components.physchem.num_rings import NumRings
from scoring.score_components.physchem.hba import HBA_Lipinski
from scoring.score_components.physchem.slogp import SlogP
from scoring.score_components.physchem.bertzct import BCT |
import logging
import socket
import struct
import meta
log = meta.log
class SlimDiscovery(object):
deviceid = meta.deviceid
revision = meta.revision
mac = meta.mac
buffersize = 1024
def __init__(self, port=meta.SLIMPORT):
self.port = port
def pack(self):
"""byte pack a discovery package, see Slim/Networking/Discovery.pm"""
# 18bytes
# 1 byte - 'd' - discovery
# 1 byte - ? - reserved
# 1 byte - ? - deviceid, 1 is an old slimp3, >=2 <= 4 is a squeezebox
# 1 byte - ? - firmware revision
# 8 byte - ? - reserved
# 6 byte - ? - mac address of client
result = struct.pack(
'B x B B 8x 6s',
ord('d'), # discovery
#, # reserverd
self.deviceid, # deviceid 1 is an old slimp3, >=2 <= 4 is a squeezebox
self.revision, # firmware version
#, # 8 reserved bytes
self.mac) # mac address
return result
def unpack(self, data):
"""unpack a reply package. see Slim/Networking/Discovery.pm
As we send version 4 as firmware, we expect a D + 17char hostname.
Function returns unicode string hostname or None if wrong data"""
try:
(packtype, hostname) = struct.unpack('c17s', data)
except:
log.debug('unable to parse as discovery reply: %s' % data)
return None
if packtype != b'D':
log.debug('not a discovery reply: %s' % data)
return None
else:
return str(hostname, 'utf-8').rstrip('\0')
def find(self, singleshot=True, timeout=10):
"""find slim server on the subnet via broadcast.
@param singleshot, return first who answers, otherwise wait for more replies
@param timeout, timeout to wait for reply/replies
@return list of [ ((ip, port), name) ]"""
result = []
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.settimeout(timeout)
log.debug('sending discovery broadcast')
s.sendto(self.pack(), (('<broadcast>', self.port)))
log.debug('waiting for discovery reply')
try:
while True:
(data, (ip, port)) = s.recvfrom(self.buffersize)
log.debug('received reply from %s: %s' % (ip, data))
name = self.unpack(data)
if name:
log.debug('found host %s:%s named %s' % (ip, port, name))
result.append(((ip, port), name))
else:
log.debug('package ignored')
if result and singleshot:
break
except socket.timeout:
pass
log.info('slim discovery: %s' % result)
return result
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
disco = SlimDiscovery()
log.info(disco.find(singleshot=False))
|
import unittest
class ExampleTests(unittest.TestCase):
def test_1(self):
self.assertTrue(1==1)
|
class Person():
# 直接写到class中的属性叫类属性(可以通过类名直接调用)
name = "lily"
def __init__(self, name, age):
# 写在init中的属性叫对象/实例属性
# self.name = name
self.age = age
# 1、调用类属性
# 类名.属性名
print(Person.name)
# 2、对象属性的优先级高于类属性
# 当对象调用属性时,如果存在对象属性,直接使用对象属性;如
# 果不存在对象属性,去找相同名称的类属性;如果没有相同名称
# 的类属性,返回错误。
p1 = Person("lucy", 24)
print(p1.name)
# 3、对象动态添加属性,动态添加的属性在取值之前必须有赋值的过程
# 对象动态添加的属性对其他对象无效。
p1.weight = 50.0
print(p1.weight)
# p2 = Person("tom", 45)
# print(p2.weight)
print("----------------------------------")
# 4、删除对象属性
# 格式: del 对象名.属性名
# 当对象属性存在时,第一次删除对象属性;当
# 对象属性不存在时,第一次删除类属性。
p3 = Person("花花", 12)
print(p3.name)
# del p3.name
# print(p3.name)
# 注:以后尽量不要将类属性与对象属性重名,对象属性会覆盖类
# 属性,只有当对象属性删除时,才能找到类属性。
|
'''
https://open.kattis.com/contests/kgvxvo/problems/8queens
'''
board = []
def split(word):
return [char for char in word]
line1 = split(input())
line2 = split(input())
line3 = split(input())
line4 = split(input())
line5 = split(input())
line6 = split(input())
line7 = split(input())
line8 = split(input())
board.append(line1)
board.append(line2)
board.append(line3)
board.append(line4)
board.append(line5)
board.append(line6)
board.append(line7)
board.append(line8)
def check(board):
valid = True
num_queens = 0
for row in range(8):
for column in range(8):
if board[row][column] == '*':
num_queens += 1
if num_queens != 8:
valid = False
return valid
def rows(board):
for c in range(8):
count = 0
for r in range(8):
if board[c][r] == '*':
count += 1
if count >= 2:
return False
return True
def column(board):
for r in range(8):
count = 0
for c in range(8):
if board[c][r] == '*':
count += 1
if count >= 2:
return False
return True
def diag(board):
for c in range(8):
for r in range(8):
if board[c][r] == '*':
i, j = c,r
while( i < 7 and j < 7): # bottom right
i+=1
j+=1
if board[i][j] == '*':
return False
i, j = c,r
while( i > 0 and j < 7): # top right
i-=1
j+=1
if board[i][j] == '*':
return False
i, j = c,r
while( i > 0 and j > 0): # top left
i-=1
j-=1
if board[i][j] == '*':
return False
i, j = c,r
while( i < 7 and j > 0): # bottom right
i+=1
j-=1
if board[i][j] == '*':
return False
return True
w = check(board)
x = rows(board)
y = column(board)
z = diag(board)
if (w == True and x == True and y == True and z == True):
print('valid')
else:
print('invalid') |
# 1. Write a recursive search function that receives as input an array of integers and a target integer value. This function should return True if the target element exists in the array, and False otherwise.
def binary_search_recursive(arr, target, low, high):
middle = (low + high) // 2
if len(arr) == 0:
return -1
if arr[middle] == target:
return middle
elif arr[middle] > target:
high = middle - 1
return binary_search_recursive(arr, target, low, high)
else:
low = middle + 1
return binary_search_recursive(arr, target, low, high)
# 2. What would be the base case(s) we’d have to consider for implementing this function?
#line 7 would be our base case such that when the length of the array is 0, execution stops.
# 3. How should our recursive solution converge on our base case(s)?
|
print('The program supports three types of alerts, "namely danger zone", "humidity", and "temperature"')
print('The temperature range is between 21 c (70 f) and 28 c (82 f)')
print('The humidity range is between 40 and 70 percentage')
humidity_high = 70
humidity_low = 40
temperature_high = 21
temperature_low = 28
while True:
baby_name = input('Enter baby name : ')
if not baby_name :
baby_name = 'baby'
#print(baby_name)
alert_type = input('Enter alert type : ("namely danger zone", "humidity", and "temperature")')
if alert_type == 'danger zone' :
zone_name = input('Enter zone name : ')
print(baby_name + 'is entering the ' + zone_name + '. Take a look now!')
elif alert_type == 'humidity' :
humidity_now = input('Enter humidity : ')
if humidity_now :
if int(humidity_now) > 70 :
print(baby_name + ' room humidity '+ humidity_now + ' seems quite high')
elif int(humidity_now) < 40 :
print(baby_name + ' room humidity '+ humidity_now + ' seems quite low')
elif alert_type == 'temperature' :
temperature = input('Enter temperature in Celsius : ')
if temperature :
temperature_uint = input('Enter user temperature uint, the value shold be "c" or "f" :')
if temperature_uint == 'c' :
if int(temperature) > temperature_high :
print(baby_name + ' room temperature ' + temperature + ' seems quite high')
elif int(temperature) < temperature_high :
print(baby_name + ' room temperature ' + temperature + ' seems quite low')
elif temperature_uint == 'f' :
temperature = int(temperature)*(9/5) + 32
temperature_high = int(temperature_high)*(9/5) + 32
temperature_low = int(temperature_low)*(9/5) + 32
if temperature > temperature_high :
print(baby_name + ' room temperature ' + str(int(temperature)) + ' seems quite high')
elif temperature < temperature_low :
print(baby_name + ' room temperature ' + str(int(temperature)) + ' seems quite low')
|
#!/usr/bin/python
"""#### LING 570: Homework #2 - Ryan Timbrook ############
NFA to DFA -> Converts an input NFA to an equivalent DFA
Author: Ryan Timbrook
Date: 10/18/2018
Format: nfa_to_dfa.sh input_file > output_file
Ran as:
$ nfa_to_dfa.sh hw3/examples/nfa1 > q4/ex2.fst1
$ nfa_to_dfa.sh hw3/examples/nfa2 > q4/ex2.fst2
"""
import sys, collections
from collections import OrderedDict
from functools import reduce
##--------------------------------------
# Class Objects
##--------------------------------------
class NFA(object):
def __init__(self,name):
self.name = name
self.numberOfStates = 0
#'state': [(symbol, new_state)]
self.states = {}
self.isymbols = []
self.numberOfAcceptingStates = 0
self.initalState = None
self.finalState = set()
self.moveNFA = {}
self.stack = []
self.eClosureSets = []
def addState(self,state,symbol,newState):
#add new state
if state not in self.states.keys():
self.states[state]=[]
self.states[state].append((symbol,newState))
def setInitalState(self,stateName):
self.initalState = stateName
#if state not in self.states: self.addState(state)
def setFinalState(self,stateName):
self.finalState = stateName
def getVocSymbols(self):
voc = set()
for state in self.states:
for c, q in self.states[state]:
voc.add(c)
if -1 in voc:
voc.remove(-1)
self.isymbols.append(list(voc))
return list(voc)
def eClosure(self,states,consumable,eSet):
for state in states:
if consumable == -1:
eSet.add(state)
if state in self.stack:
self.stack.remove(state)
#check for final state
try:
if state == "":
if isTest: print("reached final state {0}".format(state));continue
nfaState = self.states[state]
for c,q in nfaState:
if c == consumable:
eSet.add(q)
self.stack.append(q)
self.eClosure(self.stack,consumable,eSet)
except KeyError:
print("Caught KeyError on state[{0}]".format(state))
pass
return eSet
def setTransitionFunctionDeltaMoveNFA(self,unmarkedState,symbol):
#everywhere you could possibly get to on the symbol for the given state
T_set = set()
if type(unmarkedState) == str:
paths = self.states[unmarkedState]
else:
for s in unmarkedState:
paths = self.states[s]
if len(paths) > 1:
for p in paths:
if p[0] == symbol:
T_set.add(p[1])
return T_set
def subTransFun(self,state,voc):
states = set([state])
for e in voc:
newStates = set([])
for state in states:
try:
newStates = self.moveNFA[state][e]
except KeyError:
if isTest: print("caught KeyError in nfa.subTransFun state[%s] symbol[%s]"%(state,e))
pass
states = newStates
return states
##------------------------------------
class DFA(object):
def __init__(self,name):
self.name = name
self.numberOfStates = 0
#'state': [(symbol, new_state)]
self.states = {}
self.initalState = None
self.finalState = set()
self.moveDFA = {}
def setInitalState(self,stateName):
self.initalState = stateName
def setFinalState(self,stateName):
self.finalState = stateName
def subTransFun(self,state,voc):
for e in voc:
state = self.moveDFA[state][e]
return state
def inVoc(self,voc):
return self.subTransFun(self.initalState,voc) in self.finalStates
def setTransitionFunctionMoveDFA(self,delta):
if delta not in self.moveDFA.keys():
self.moveDFA[delta]=[]
self.moveDFA[delta].append(delta)
def addState(self,state,nfaStates):
#add new state
if state not in self.states.keys():
self.states[state]=[]
self.states[state].append(nfaStates)
##------------------------------------
#---- GLOBALS -----#
isTest = True
isLocal = True
cmdArgs = []
keyStates = {'START':"",'FINAL':""}
NULL_E ="*e*"
abecedary = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
S_DFA = set()
##------------------------------------
# Main Procedural Function
##------------------------------------
def main():
if isTest: print("main: Entering")
if isTest: print("main: Cmd Arg 1:[%s]"%(cmdArgs[0]))
if isLocal:
inputFile= "hw3/examples/nfa1"
else:
inputFile = cmdArgs[0]
nfa = createNFA(inputFile)
vocab = nfa.getVocSymbols()
if isTest: print(vocab)
nfa.eClosureSets.append(nfa.eClosure([nfa.initalState], -1, set()))
for eSet in nfa.eClosureSets:
for symbl in vocab:
symblSet = nfa.eClosure(eSet, symbl, set())
resSet = nfa.eClosure(symblSet, -1, set())
if resSet not in nfa.eClosureSets:
nfa.eClosureSets.append(resSet)
#set MoveDFA to S
#S_DFA.add((frozenset(eSet),symbl))
#final set
S_dfa = dict(zip(abecedary[:len(nfa.eClosureSets)],nfa.eClosureSets))
if isTest: print(S_dfa)
dfaFinalState = None
dfa = []
#Set MoveDFA to S
for s in S_dfa:
for e in S_dfa[s]:
if e == nfa.finalState:
dfaFinalState = e
state = nfa.states[e]
if e == nfa.initalState:
#dfa.append([m_fromState,"("+ss_a_toState, '"'+a+'"',EMPTY_STRING,"))"])
print(e)
#convert NFA to DFA
#TODO: INCOMPLETE - Issues with DFA conversions
#convertNFAtoDFA(nfa)
if isTest: print("main: Exiting")
##--------------------------------------
##Convert NFA to DFA
##--------------------------------------
def convertNFAtoDFA(nfa):
if isTest: print("convertNFAtoDFA: Entering")
initialState = frozenset([nfa.initalState])
stateSet = set([initialState])
unmarkedQ = stateSet.copy()
delta = {}
finalStates = []
language = nfa.isymbols
#loop over state queue
while len(unmarkedQ) > 0:
qSet = unmarkedQ.pop()
##TODO: ISSUE HERE*****
#for each symbol in the sigma(alphabet)
for symbol in language[0]:
S = nfa.setTransitionFunctionDeltaMoveNFA(qSet,symbol)
if len(S) == 0: continue
#nStates = reduce(lambda x,y: x|y, [nfa.subTransFun(q,symbol) for q in qSet])
nStates = frozenset(S)
if not nStates in stateSet:
stateSet.add(nStates)
unmarkedQ.add(nStates)
##---------------------------------------------------------------------------
for qSet in stateSet:
if len(qSet & nfa.finalStates) > 0:
finalStates.append(qSet)
dfa = DFA(nfa.name+"_"+'dfa')
dfa.setTransitionFunctionMoveDFA(delta)
dfa.setInitalState(initialState)
dfa.setFinalState(finalStates)
return dfa
if isTest: print("convertNFAtoDFA: Exiting")
##--------------------------------------
##--------------------------------------
# Create NFA
##--------------------------------------
def createNFA(nfaFile):
if isTest: print("createNFA: Entering")
nfa = NFA(nfaFile.split("/")[1])
lineCount = 1
arcID = 0
with open(nfaFile, 'r') as f:
for line in f.readlines():
if line == '\n': continue
line = line.strip()
line = line.replace("(",'').replace(")",'')
line = line.split(" ")
if '' in line:
line.remove('')
if isTest: print("fstFile: line[%s]"%line)
#first line is final state
if lineCount == 1:
keyStates['FINAL'] = line[0]
nfa.setFinalState(line[0])
nfa.addState(line[0],NULL_E,"")
#second line is initial-state to next-state;
#i0=initial-state, i1=to-state, i2=input-symbol
elif lineCount == 2:
keyStates['START'] = line[0]
nfa.setInitalState(line[0])
nfa.addState(line[0],line[2],line[1])
#i0=from-state
else:
nfa.addState(line[0],line[2],line[1])
lineCount += 1
nfa.numberOfStates = lineCount
#--------------------------------------------------------
if isTest: print("createNFA: Exiting")
return nfa
##--------------------------------------
##--------------------------------------
## Format Output Function
##--------------------------------------
##------------------------------------
# Execute Main Function
##------------------------------------
if __name__ == "__main__":
if isTest: print(len(sys.argv));
#remove program file name from input command list
sys.argv.remove(sys.argv[0])
if len(sys.argv) > 0:
for arg in sys.argv:
if isTest: print(arg)
cmdArgs.append(arg.strip())
main()
|
#get_age function takes integers
def get_age():
age = int(input("Enter your age: "))
return (print("you are" ,age, "years old"))
print(get_age())
#get_name function takes strings
def get_name():
name = str(input("Enter youe name: "))
return (print("your name is",name,))
print(get_name())
#this function tell the users you know both about them
def get_age():
age = int(input("Enter your age: "))
return (print("you are" ,age, "years old"))
def get_name():
name = str(input("Enter youe name: "))
return (print("your name is" ,name,))
print(get_age(),get_name())
|
#!/usr/bin/env python
#
# Copyright (c) 2012-2014 Poul-Henning Kamp <phk@phk.freebsd.dk>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
'''
Address Space and Memory Classes
These classes can be configured to act as memory for images to be
analysed.
In addition to the numerical value of each location, it is also
possible to associate up to seven attributes with each location.
These can be used to record structural information in the media,
for instance relocation flags, write-ability etc.
XXX: Better test-cases needed
XXX: Need resolution of who controls rendering...
'''
import os.path
import ctypes
from pyreveng import tree, leaf
DEFINED = (1 << 7)
def mapped(x):
''' Decorator to mark methods which should be "exported" up to MemMapper'''
x.ismapped = True
return x
class MemError(Exception):
def __init__(self, adr, reason):
super().__init__()
self.adr = adr
self.reason = reason
self.value = ("0x%x:" % adr + str(self.reason),)
def __str__(self):
return repr(self.value)
class Range():
''' A range grouping '''
def __init__(self, lo, hi, txt, indent=False, visible=True):
self.lo = lo
self.hi = hi
self.txt = txt
self.indent = indent
self.visible = visible
def __repr__(self):
return "<range 0x%x-0x%x %s>" % (self.lo, self.hi, self.txt)
def __lt__(self, other):
return self.lo < other.lo
class AddressSpace():
'''
A vacuous address-space, memory-mapper and base-class for actual
address-spaces and memory types.
'''
def __init__(self, lo, hi, name="", apfx="", asfx=""):
assert lo <= hi
self.lo = lo
self.hi = hi
self.name = name
self.lbl_d = dict()
self.bcmt_d = dict()
self.lcmt_d = dict()
self.rangelist = []
self.t = tree.Tree(self.lo, self.hi)
nxdig = max(len("%x" % self.lo), len("%x" % (self.hi - 1)))
self.afmtpct = apfx + "%%0%dx" % nxdig + asfx
self.apct = apfx + "0x%%0%dx" % nxdig + asfx
self.apct = apfx + "0x%x" + asfx
def __repr__(self):
return "<address_space %s 0x%x-0x%x>" % (
self.name, self.lo, self.hi
)
def __getitem__(self, adr):
_b = self._off(adr)
raise MemError(adr, "Undefined")
def __setitem__(self, adr, data):
_b = self._off(adr)
raise MemError(adr, "Undefined")
def __iter__(self):
for i in self.t:
yield i
def adr(self, dst):
''' Render an address '''
lbl = list(self.get_labels(dst))
if lbl:
return lbl[0]
return self.apct % dst
def afmt(self, adr):
''' Format address, fixed width, for addr col of listing '''
return self.afmtpct % adr
def dfmt(self, adr):
''' Format data word at address '''
raise MemError(adr, "Undefined")
def tfmt(self, adr):
''' Split data word at address into characters '''
raise MemError(adr, "Undefined")
def gaps(self):
ll = self.lo
for i in sorted(self):
if i.lo > ll:
yield ll, i.lo
ll = i.hi
if self.hi > ll:
yield ll, self.hi
def segments(self):
yield self, self.lo, self.hi
def _off(self, adr):
if adr < self.lo:
raise MemError(adr, "Address too low")
if adr >= self.hi:
raise MemError(adr, "Address too high")
return adr - self.lo
def set_label(self, adr, lbl):
assert isinstance(lbl, str)
self.lbl_d.setdefault(adr, []).append(lbl)
def set_first_label(self, adr, lbl):
''' Set label, if none exists already '''
assert isinstance(lbl, str)
i = self.lbl_d.get(adr)
if not i:
self.lbl_d.setdefault(adr, []).append(lbl)
def get_labels(self, adr):
i = self.lbl_d.get(adr)
if i:
yield from i
def get_all_labels(self):
yield from self.lbl_d.items()
def set_line_comment(self, adr, lcmt):
assert isinstance(lcmt, str)
self.lcmt_d.setdefault(adr, []).append(lcmt)
def get_line_comments(self, adr):
i = self.lcmt_d.get(adr)
if i:
yield from i
def get_all_line_comments(self):
yield from self.lcmt_d.items()
def set_block_comment(self, adr, bcmt):
assert isinstance(bcmt, str)
self.bcmt_d.setdefault(adr, []).append(bcmt)
def get_block_comments(self, adr):
i = self.bcmt_d.get(adr)
if i:
yield from i
def get_all_block_comments(self):
yield from self.bcmt_d.items()
def add_range(self, lo, hi, **kwargs):
r = Range(lo, hi, **kwargs)
self.rangelist.append(r)
return r
def ranges(self):
yield from self.rangelist
def insert(self, lf):
lf.aspace = self
return self.t.insert(lf)
def find(self, *args, **kwargs):
yield from self.t.find(*args, **kwargs)
def occupied(self, *args, **kwargs):
for _i in self.find(*args, **kwargs):
return True
return False
class MemMapper(AddressSpace):
def __init__(self, lo, hi, **kwargs):
super().__init__(lo, hi, **kwargs)
self.mapping = []
self.seglist = []
self.bits = 0
self.xlat = self.xlat0
def __repr__(self):
return "<MemMapper %s 0x%x-0x%x>" % (self.name, self.lo, self.hi)
class Link(leaf.Leaf):
def __init__(self, lo, hi, link):
super().__init__(lo, hi, "LinkLeaf")
self.link = link
def __repr__(self):
return "<" + super().__repr__() + " -> " + self.link.__repr__() + ">"
def render(self):
self.compact = self.link.compact
return self.link.render()
def __eq__(self, other):
return super().__eq__(other) and self.link.__class__ == other.link.__class__
class Alien(leaf.Leaf):
def __init__(self, lo, hi, them):
super().__init__(lo, hi, "Alien")
self.them = them
self.compact = True
def render(self):
t = "ALIEN"
t += " %s" % self.them.aspace.afmt(self.them.lo)
t += "-%s" % self.them.aspace.afmt(self.them.hi)
t += " " + self.them.render()
return t
def map(self, mem, lo, hi=None, offset=None, shared=False):
if offset is None:
offset = 0
if hi is None:
hi = lo + mem.hi - offset
assert hi > lo
self.seglist.append([lo, hi, offset, mem, shared])
self.mapping.append([lo, hi, offset, mem, shared])
if len(self.mapping) == 1:
self.xlat = self.xlat1
if len(self.mapping) > 1:
self.xlat = self.xlatn
else:
self.bits = mem.bits
self.map_methods(mem)
def map_methods(self, mem):
''' Create wrapper methods in this class, for the @mapped decorated
methods of the memory we map.
'''
for f in dir(mem):
if getattr(self, f, False):
continue
a = getattr(mem, f)
if callable(a) and getattr(a, "ismapped", False):
#print("MAP", f, "from", mem)
src = "def %s(self, adr, *args, **kwargs):\n" % f
src += " ms, sa, _sh = self.xlat(adr)\n"
src += " return ms.%s(sa, *args, **kwargs)\n" % f
src += "\n"
d = {}
exec(src, {}, d)
setattr(self.__class__, f, d[f])
def xlat0(self, adr, _fail=True):
return self, adr, False
def xlat1(self, adr, fail=True):
low, high, offset, mem, shared = self.mapping[0]
if low <= adr < high:
return mem, (adr - low) + offset, shared
if fail:
raise MemError(adr, "Unmapped memory @0x%x" % adr)
return self, adr, False
def xlatn(self, adr, fail=True):
for i, j in enumerate(self.mapping):
low, high, offset, mem, shared = j
if low <= adr < high:
self.mapping.pop(i)
self.mapping.insert(0, j)
return mem, (adr - low) + offset, shared
if fail:
raise MemError(adr, "Unmapped memory @0x%x" % adr)
return self, adr, False
def dealienate(self, item, low, offset):
j = item
while isinstance(j, self.Link):
j = j.link
if j.lo != item.lo + low - offset:
return self.Alien(item.lo + low - offset, item.hi + low - offset, item)
return j
def __iter__(self):
for i in self.t:
assert not isinstance(i, self.Link), i
yield i
for low, high, offset, mem, _shared in self.seglist:
for i in mem.t:
ll = i.lo + low - offset
hh = i.hi + low - offset
if low <= ll < high and low <= hh <= high:
j = self.dealienate(i, low, offset)
assert not isinstance(j, self.Link), j
yield j
def __getitem__(self, adr):
ms, sa, _sh = self.xlat(adr)
return ms[sa]
def __setitem__(self, adr, dat):
ms, sa, _sh = self.xlat(adr)
ms[sa] = dat
def find(self, lo=None, hi=None, **kwargs):
if hi is None:
hi = lo + 1
if lo is None:
lo = hi - 1
#print("FS", "%x" % lo, "%x" % hi)
yield from super().find(lo=lo, hi=hi, **kwargs)
for low, high, offset, mem, _shared in self.seglist:
#print(" fs", "%x" % low, "%x" % high, "%x" % offset)
if low <= hi or lo <= high:
ll = max(lo, low) + offset - low
hh = min(hi, high) + offset - low
#print(" fs", "%x" % ll, "%x" % hh)
for j in mem.find(lo=ll, hi=hh, **kwargs):
#print(" fs", j)
x = self.dealienate(j, low, offset)
#print(" fs", x)
yield x
def set_something(self, what, adr, *args):
ms, sa, _sh = self.xlat(adr, False)
if ms == self:
getattr(super(), what)(adr, *args)
else:
getattr(ms, what)(sa, *args)
def get_something(self, what, adr, *args):
ms, sa, _sh = self.xlat(adr, False)
if ms == self:
yield from getattr(super(), what)(adr, *args)
else:
yield from getattr(ms, what)(sa, *args)
def get_all_somethings(self, what):
yield from getattr(super(), what)()
for low, high, offset, mem, _shared in self.seglist:
for a, b in getattr(mem, what)():
aa = a + low - offset
if low <= aa < high:
yield aa, b
def set_first_label(self, *args):
self.set_something("set_first_label", *args)
def set_label(self, *args):
self.set_something("set_label", *args)
def get_labels(self, *args):
yield from self.get_something("get_labels", *args)
def get_all_labels(self):
yield from self.get_all_somethings("get_all_labels")
def set_line_comment(self, *args):
self.set_something("set_line_comment", *args)
def get_line_comments(self, *args):
yield from self.get_something("get_line_comments", *args)
def get_all_line_comments(self):
yield from self.get_all_somethings("get_all_line_comments")
def set_block_comment(self, *args):
self.set_something("set_block_comment", *args)
def get_block_comments(self, *args):
yield from self.get_something("get_block_comments", *args)
def get_all_block_comments(self):
yield from self.get_all_somethings("get_all_block_comments")
def add_range(self, lo, hi, **kwargs):
ms, sa, _sh = self.xlat(lo, False)
ms.add_range(sa, sa + hi - lo, **kwargs)
return super().add_range(lo, hi, **kwargs)
def segments(self):
for low, high, _offset, mem, _shared in sorted(self.seglist):
yield mem, low, high
def get_attr(self, adr):
ms, sa, _sh = self.xlat(adr)
return ms.get_attr(sa)
def set_attr(self, adr, aval):
ms, sa, _sh = self.xlat(adr)
return ms.set_attr(sa, aval)
def dfmt(self, adr):
ms, sa, _sh = self.xlat(adr, False)
if ms == self:
return super().dfmt(sa)
return ms.dfmt(sa)
def tfmt(self, adr):
ms, sa, _sh = self.xlat(adr)
return ms.tfmt(sa)
def gaps(self):
for glo, ghi in super().gaps():
for slo, shi, _offset, _mem, _shared in sorted(self.seglist):
if ghi <= slo or glo >= shi:
continue
glo = max(glo, slo)
ghi = min(ghi, shi)
yield glo, ghi
def insert(self, item):
ms, sa, _sh = self.xlat(item.lo, False)
if ms != self:
item.aspace = self
ll = self.Link(sa, item.hi - (item.lo - sa), item)
ms.insert(ll)
else:
super().insert(item)
class WordMem(AddressSpace):
"""
Word memory is characteristic for a lot of the earliest computers,
they could access exactly one word at a time, or possibly fractions
of a word, but the instruction set did not support any "super-size"
data types or access spanning multiple words.
Typical examples: Pretty much any decendant of Von Neumans early
computers down to most of the minicomputers from DEC and DG etc.
Largest supported word-width is 64 bits and 8 attributes.
"""
def __init__(self, lo, hi, bits=8, attr=0, **kwargs):
assert lo < hi
assert bits > 0
# assert bits <= 64
assert attr >= 0
assert attr <= 7
super().__init__(lo, hi, **kwargs)
self.bits = bits
self.fmt = "%" + "0%dx" % ((bits + 3) // 4)
self.undef = "-" * ((bits + 3) // 4)
self.ascii = (bits & 7) == 0
self.lo = lo
self.hi = hi
self.attr = attr
ln = hi - lo
self.msk = (1 << bits) - 1
self.amsk = (1 << attr) - 1
if bits <= 8:
self.mt = ctypes.c_uint8
self.m = (self.mt * ln)()
elif bits <= 16:
self.mt = ctypes.c_uint16
self.m = (self.mt * ln)()
elif bits <= 32:
self.mt = ctypes.c_uint32
self.m = (self.mt * ln)()
elif bits <= 64:
self.mt = ctypes.c_uint64
self.m = (self.mt * ln)()
else:
self.mt = int
self.m = [None] * ln
self.at = ctypes.c_uint8
self.a = (self.at * ln)()
def __repr__(self):
return "<word_mem 0x%x-0x%x, @%d bits, %d attr>" % (
self.lo, self.hi, self.bits, self.attr)
def __getitem__(self, adr):
"""Read location"""
b = self._off(adr)
if not self.a[b] & DEFINED:
raise MemError(adr, "Undefined")
return self.m[b]
def __setitem__(self, adr, dat):
"""Write location"""
if dat & ~self.msk:
raise MemError(adr, "Data too wide (0x%x)" % dat)
b = self._off(adr)
self.m[b] = self.mt(dat)
self.a[b] |= DEFINED
def wr(self, adr, dat):
self[adr] = dat
def dfmt(self, adr):
try:
return self.fmt % self[adr]
except MemError:
return self.undef
def get_attr(self, adr):
"""Get attributes"""
b = self._off(adr)
return self.a[b] & self.amsk
def set_attr(self, adr, x):
"""Set attributes"""
if x & ~self.amsk:
raise MemError(adr, "Attribute too wide (0x%x)" % x)
b = self._off(adr)
self.a[b] |= x
def clr_attr(self, adr, x):
"""Clear attributes"""
if x & ~self.amsk:
raise MemError(adr, "Attribute too big (0x%x)" % x)
b = self._off(adr)
self.a[b] &= ~x
def tfmt(self, adr):
ll = []
b = self.bits
try:
w = self[adr]
except MemError:
w = None
while b >= 8:
b -= 8
if w is None:
ll.append(None)
else:
ll.append((w >> b) & 0xff)
return ll
@mapped
def be(self, adr, n=2):
''' Big endian multiword '''
v = 0
for a in range(adr, adr + n):
v <<= self.bits
v |= self[a]
return v
@mapped
def le(self, adr, n=2):
''' Little endian multiword '''
v = 0
s = 0
for a in range(adr, adr + n):
v |= self[a] << s
s += self.bits
return v
class ByteMem(WordMem):
"""
Byte memory is characteristic for microcomputers, which
typically had very narrow busses, 4 or 8 bits, but instructions
for operating on wider types than the width of the bus.
This introduces the issue of "endianess" but this is not
really attribute of the memory, it is an attribute of the
CPU, instruction set or interpreted code, so we provide
both "sexes" and leave it up to everybody else to use the
right one.
"""
def __init__(self, lo, hi, **kwargs):
super().__init__(lo, hi, bits=8, **kwargs)
def __repr__(self):
return "<ByteMem 0x%x-0x%x, %d attr %s>" % (
self.lo, self.hi, self.attr, self.name)
@mapped
def bytearray(self, lo, bcnt):
i = self._off(lo)
return bytearray(self.m[i:i+bcnt])
def tfmt(self, adr):
try:
return (self[adr],)
except MemError:
return (None,)
@mapped
def u8(self, adr):
"""Unsigned 8-bit byte"""
return self[adr]
@mapped
def bu16(self, adr):
"""Big Endian Unsigned 16-bit half-word"""
retval = self[adr] << 8
retval |= self[adr + 1]
return retval
@mapped
def bu32(self, adr):
"""Big Endian Unsigned 32-bit word"""
retval = self[adr] << 24
retval |= self[adr + 1] << 16
retval |= self[adr + 2] << 8
retval |= self[adr + 3]
return retval
@mapped
def bu64(self, adr):
"""Big Endian Unsigned 64-bit double-word"""
retval = self[adr] << 56
retval |= self[adr + 1] << 48
retval |= self[adr + 2] << 40
retval |= self[adr + 3] << 32
retval |= self[adr + 4] << 24
retval |= self[adr + 5] << 16
retval |= self[adr + 6] << 8
retval |= self[adr + 7]
return retval
@mapped
def lu16(self, adr):
"""Little Endian Unsigned 16-bit half-word"""
retval = self[adr]
retval |= self[adr + 1] << 8
return retval
@mapped
def lu32(self, adr):
"""Little Endian Unsigned 32-bit word"""
retval = self[adr]
retval |= self[adr + 1] << 8
retval |= self[adr + 2] << 16
retval |= self[adr + 3] << 24
return retval
@mapped
def lu64(self, adr):
"""Little Endian Unsigned 64-bit double-word"""
retval = self[adr]
retval |= self[adr + 1] << 8
retval |= self[adr + 2] << 16
retval |= self[adr + 3] << 24
retval |= self[adr + 4] << 32
retval |= self[adr + 5] << 40
retval |= self[adr + 6] << 48
retval |= self[adr + 7] << 56
return retval
@mapped
def s8(self, adr):
"""Signed 8-bit byte"""
retval = self[adr]
if retval & 0x80:
retval -= 256
return retval
@mapped
def bs16(self, adr):
"""Big Endian Signed 16-bit half-word"""
retval = self.bu16(adr)
if retval & 0x8000:
retval -= 0x10000
return retval
@mapped
def ls16(self, adr):
"""Little Endian Signed 16-bit half-word"""
retval = self.lu16(adr)
if retval & 0x8000:
retval -= 0x10000
return retval
@mapped
def bs32(self, adr):
"""Big Endian Signed 32-bit word"""
retval = self.bu32(adr)
if retval & 0x80000000:
retval -= 0x100000000
return retval
@mapped
def ls32(self, adr):
"""Little Endian Signed 32-bit word"""
retval = self.lu32(adr)
if retval & 0x80000000:
retval -= 0x100000000
return retval
@mapped
def bs64(self, adr):
"""Big Endian Signed 64-bit double-word"""
retval = self.bu64(adr)
if retval & 0x8000000000000000:
retval -= 0x10000000000000000
return retval
@mapped
def ls64(self, adr):
"""Little Endian Signed 64-bit double-word"""
retval = self.lu64(adr)
if retval & 0x8000000000000000:
retval -= 0x10000000000000000
return retval
def load_data(self, first, step, dat):
for i in dat:
self[first] = i
first += step
def load_binfile(self, first, step, filename, lo=0, hi=None):
fi = open(filename, "rb")
d = bytearray(fi.read())
fi.close()
if hi:
self.load_data(first, step, d[lo:hi])
else:
self.load_data(first, step, d[lo:])
class Stackup(ByteMem):
"""
Convenience function to stack a set of eproms into ByteMem.
'files' indicate the layout desired, and each element can be
just a filename or an iterable of filenames:
files = (
"singlelane",
("highbyte", "lowbyte"),
("topbyte", "midhibyte", "midlobyte", "lobyte"),
)
'prefix' is used for all filenames.
See also:
examples/HP3335A
examples/HP8568A
"""
def __init__(self, files, lo=0, prefix="", nextto=None):
if nextto is not None:
prefix = os.path.dirname(nextto)
ll = []
hi = lo
for r in files:
ll.append([])
if isinstance(r, str):
b = open(os.path.join(prefix, r), "rb").read()
hi += len(b)
ll[-1].append(b)
else:
for i in r:
b = open(os.path.join(prefix, i), "rb").read()
hi += len(b)
ll[-1].append(b)
super().__init__(lo, hi)
p = lo
for r in ll:
stride = len(r)
ln = len(r[0])
o = stride
for i in r:
o -= 1
pp = p + o
for j in i:
self[pp] = j
pp += stride
p += stride * ln
def do_test():
mem = WordMem(0x0000, 0x1000, bits=64, attr=3)
print(mem)
print(type(mem.m), ctypes.sizeof(mem.m))
mem.wr(0x100, 0x123456789)
print("%x" % mem[0x100])
print(mem.get_attr(0x100))
print(mem.get_attr(0x101))
print(mem.set_attr(0x101, 4))
print(mem.get_attr(0x101))
if __name__ == "__main__":
do_test()
|
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.datasets import make_blobs
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
data = make_blobs(n_samples = 200, n_features = 2, centers = 4, cluster_std = 1.8, random_state = 101)
#plt.scatter(data[0][:,0], data[0][:,1], c = data[1], cmap='rainbow')
# KMeans is unsupervised learning
kmeans = KMeans(n_clusters = 3)
kmeans.fit(data[0])
print(kmeans.cluster_centers_)
print(kmeans.labels_)
fig, (ax1, ax2) = plt.subplots(1, 2, sharey = True, figsize = (10, 6))
ax1.set_title('K Means')
ax1.scatter(data[0][:, 0], data[0][:, 1], c = kmeans.labels_)
ax2.set_title('Original')
ax2.scatter(data[0][:, 0], data[0][:, 1], c = data[1], cmap = 'rainbow')
plt.show()
|
"""Server exposing endpoints for uploading and downloading tracks, extracting their encodings and returning the
mapped visual parameters
"""
import data_processor as dp
import train
import models
import mapping_utils
import commons
import fma_utils
from flask import Flask, request, jsonify, send_file
from flask_cors import CORS
from werkzeug.utils import secure_filename
from werkzeug.contrib.fixers import ProxyFix
import os
import json
import logging
from logging.handlers import RotatingFileHandler
import torch
from geolite2 import geolite2
# Logging
logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logs', 'deepviz-requests.log')
os.makedirs(os.path.dirname(logfile), exist_ok=True)
logger = logging.getLogger('deepviz')
logger.setLevel(logging.INFO)
handler = RotatingFileHandler(logfile, maxBytes=1024 * 1024, backupCount=100) # 1MB x 100 files
formatter = logging.Formatter('%(asctime)s :: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class ServerConfig(commons.BaseConfig):
"""Server Initialization Config
The defaults defined here should be overridden using a json config file
"""
def __init__(self):
self.secret = 'saodoasmdom9908128euh1dn'
self.port = 7000
self.debug = False
self.upload_dir = '/tmp/deepviz'
self.allowed_extensions = ['aac', 'au', 'flac', 'm4a', 'mp3', 'ogg', 'wav']
# Load server config
server_config = ServerConfig.load_from_file('private/server_config.json')
# Server configuration
app = Flask(__name__)
CORS(app)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.secret_key = server_config.secret
app.config['server_config'] = server_config
# Geolite
geo_reader = geolite2.reader()
# Global variables
request_count = 0
@app.route('/fetchmap', methods=['POST'])
def fetch_map():
"""Parse form data and track, extract features and return mapped visual params"""
# Parse Request Config
request_config = mapping_utils.MappingConfig()
request_config.model = request.form['model']
request_config.train_config_path = models.trained_model_configs[request_config.model]
request_config.feature_mapping = request.form['feature_mapping']
request_config.feature_scaling = request.form['feature_scaling']
if 'classifier_layer' in request.form:
request_config.classifier_layer = request.form['classifier_layer']
logger.info('Request Config: %s', json.dumps(request_config.get_dict(), indent=2, sort_keys=True))
# Configs
train_config = train.TrainingConfig.load_from_file(request_config.train_config_path)
if train_config.model == 'cnn_classifier': # avoid loading pre-trained weights
train_config.model_params['pretrained'] = False
dataset_config = dp.DataPrepConfig.load_from_dataset(train_config.dataset_path)
dataset_mode = dp.read_h5_attrib('mode', dataset_config.get_dataset_path())
# Save uploaded file or get FMA track path
track_type = 'upload'
if 'track' not in request.files:
if 'track' in request.form:
track_path = fma_utils.get_audio_path('datasets/fma/fma_small', int(request.form['track']))
track_type = 'fma'
else:
logger.error('Audio file is not available in the request')
raise Exception('Audio file is not available in the request')
else:
track = request.files['track']
if track.filename == '':
raise Exception('No selected audio file')
server_config = app.config['server_config']
if not (track and '.' in track.filename and track.filename
.rsplit('.', 1)[1].lower() in server_config.allowed_extensions):
raise Exception('Invalid file')
filename = secure_filename(track.filename)
os.makedirs(server_config.upload_dir, exist_ok=True)
track_path = os.path.join(server_config.upload_dir, filename)
track.save(track_path)
logger.info('Track type : %s\tTrack path : %s', track_type, track_path)
try:
# Model
cuda = torch.cuda.is_available()
model = train_config.get_by_model_key(cuda)
model.load_state(train_config.get_model_path('state_best'))
# Process track
partition = mapping_utils.generate_partition(track_path, dataset_mode, dataset_config)
batch = dp.PartitionBatchGenerator(partition, train_config.batch_size, mode='track')
# Encode and Map
enc = mapping_utils.encode(model, batch, train_config, request_config)
enc = mapping_utils.map_and_scale(enc, request_config, train_config)
finally:
# Delete uploaded track
if track_type == 'upload':
os.unlink(track_path)
# Compile and send
return jsonify({
'train_config': train_config.get_dict(),
'dataset_mode': dataset_mode,
'dataset_config': dataset_config.get_dict(),
'encoding': enc.tolist()
})
@app.route('/fetchtracks', methods=['GET'])
def fetch_tracks():
"""Fetch metadata for FMA Small"""
tracks = commons.get_fma_meta("datasets/fma/fma_metadata", 'small')
return jsonify(list(zip(*[
tracks.index.tolist(),
tracks['track', 'title'].tolist(),
tracks['artist', 'name'].tolist(),
tracks['track', 'genre_top'].tolist()
])))
@app.route('/downloadtrack/<path:track_id>', methods=['GET'])
def download_fma_track(track_id):
"""Download a track from FMA small"""
track_path = fma_utils.get_audio_path('datasets/fma/fma_small', int(track_id))
return send_file(track_path)
@app.after_request
def log_request(resp):
"""Log request details after each request"""
request_data = {
'endpoint': request.endpoint,
'host_url': request.host_url,
'referrer': request.referrer,
'method': request.method,
'remote_addr': request.remote_addr,
'user_agent': str(request.user_agent)
}
logger.info('Request Data: %s', json.dumps(request_data, indent=2, sort_keys=True))
try:
ip = request.remote_addr
geo_data = geo_reader.get(ip)
logger.info('Geo Data: %s', json.dumps(geo_data, indent=2, sort_keys=True))
except:
logger.exception('Failed to log geo data')
logger.info('Response status: %s', resp.status)
global request_count
request_count += 1
print('Requests Count: {}'.format(request_count))
return resp
# Start dev server
if __name__ == '__main__':
logger.info('Starting DeepViz server at Port:{}'.format(server_config.port))
app.run('0.0.0.0', server_config.port, debug=server_config.debug)
|
import os
import sys
import allure
from allure_commons.types import AttachmentType
from jproperties import Properties
from pytest import fixture
from selenium import webdriver
PROJECT_ROOT = os.path.dirname(__file__)
CHROME_DRIVER_DICT = {
'linux': os.path.join(PROJECT_ROOT, 'webdrivers/chrome/chromedriver_linux64'),
'darwin': os.path.join(PROJECT_ROOT, 'webdrivers/chrome/chromedriver_mac64'),
'win32': os.path.join(PROJECT_ROOT, 'webdrivers/chrome/chromedriver_win.exe'),
'win64': os.path.join(PROJECT_ROOT, 'webdrivers/chrome/chromedriver_win.exe')
}
OPERA_DRIVER_DICT = {
'linux': os.path.join(PROJECT_ROOT, 'webdrivers/opera/operadriver_linux'),
'darwin': os.path.join(PROJECT_ROOT, 'webdrivers/opera/operadriver_mac64'),
'win32': os.path.join(PROJECT_ROOT, 'webdrivers/opera/operadriver_win32.exe'),
'win64': os.path.join(PROJECT_ROOT, 'webdrivers/opera/operadriver_win64.exe')
}
YANDEX_DRIVER_DICT = {
'linux': os.path.join(PROJECT_ROOT, 'webdrivers/yandex/yandexdriver_lin'),
'darwin': os.path.join(PROJECT_ROOT, 'webdrivers/yandex/yandexdriver_mac'),
'win32': os.path.join(PROJECT_ROOT, 'webdrivers/yandex/yandexdriver_win.exe'),
'win64': os.path.join(PROJECT_ROOT, 'webdrivers/yandex/yandexdriver_win.exe')
}
@fixture()
def start_browser():
configs = Properties()
configs.load(open(os.path.join(PROJECT_ROOT, 'app.properties'), 'rb'))
browser_name = configs.get("browser").data
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
options.add_argument('--window-size=1920,1080')
options.add_argument('lang=ru')
if browser_name == "chrome":
driver = webdriver.Chrome(executable_path=CHROME_DRIVER_DICT[sys.platform], options=options)
elif browser_name == "opera":
driver = webdriver.Opera(executable_path=OPERA_DRIVER_DICT[sys.platform], options=options)
driver.maximize_window()
elif browser_name == "yandex":
driver = webdriver.Opera(executable_path=YANDEX_DRIVER_DICT[sys.platform], options=options)
driver.maximize_window()
else:
driver = webdriver.Chrome(executable_path=CHROME_DRIVER_DICT[sys.platform], options=options)
yield driver
if sys.exc_info():
allure.attach(body=driver.get_screenshot_as_png(), name='screenshot', attachment_type=AttachmentType.PNG)
driver.quit() |
#! /usr/bin/env py.test-3
'''
Tests for the module realizing the XML storage using lxml.
'''
__author__ = 'Russel Winder'
__version__ = '1.2'
__date__ = '2014-08-23'
__copyright__ = 'Copyright © 2007, 2012. 2014 Russel Winder'
__licence__ = 'GNU Public Licence (GPL) v3'
import tempfile
import sys
from lxml.etree import ParseError
from xmlPhonebook import Context
emptyContactsDocument = '<contacts xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="contacts.xsd"></contacts>'
basicContactsDocument = '''<contacts xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="contacts.xsd">
<contact>
<name>
<firstname>Russel</firstname>
<lastname>Winder</lastname>
</name>
<number>+44 20 7585 2200</number>
</contact>
</contacts>
'''
basicKey = 'Winder, Russel'
basicValue = '+44 20 7585 2200'
def _setUp_empty(f):
f.write(emptyContactsDocument.encode())
f.seek(0)
def _setUp_basic(f):
f.write(basicContactsDocument.encode())
f.seek(0)
def test_non_existent_file():
try:
Context('thisfilenameissoridiculousthatitcantpossiblyactualyexistonthefilestore')
self.fail('Did not raise IOError.')
except IOError:
pass
def test_empty_file():
with tempfile.NamedTemporaryFile() as f:
try:
Context(f.name)
self.fail('Did not raise ParseError.')
except ParseError:
pass
def test_wellFormed_but_contactless_file_default():
with tempfile.NamedTemporaryFile() as f:
_setUp_empty(f)
context = Context(f.name)
assert (context.filename, context.mode, context.writebackOnExit, context.cache) == (f.name, 'r+b', True, {})
def test_wellFormed_but_contactless_file_no_writeback():
with tempfile.NamedTemporaryFile() as f:
_setUp_empty(f)
context = Context(f.name, 'r', False)
assert (context.filename, context.mode, context.writebackOnExit, context.cache) == (f.name, 'r', False, {})
def test_file_with_one_entry():
with tempfile.NamedTemporaryFile() as f:
_setUp_basic(f)
context = Context(f.name)
assert (context.filename, context.mode, context.writebackOnExit, context.cache) == (f.name, 'r+b', True, {basicKey: basicValue})
def test_reasonable_output_from_an_entry():
with tempfile.NamedTemporaryFile() as f:
_setUp_empty(f)
with Context(f.name) as context:
assert (context.filename, context.mode, context.writebackOnExit, context.cache) == (f.name, 'r+b', True, {})
context.cache = {basicKey: basicValue}
assert basicContactsDocument == f.read().decode()
def test_can_get_an_item_from_cache():
with tempfile.NamedTemporaryFile() as f:
_setUp_basic(f)
context = Context(f.name)
assert context[basicKey] == basicValue
def test_can_set_an_item_to_an_empty_cache():
with tempfile.NamedTemporaryFile() as f:
_setUp_empty(f)
context = Context(f.name)
context[basicKey] = basicValue
assert context[basicKey] == basicValue
def test_use_context_manager_in_with_statement():
with tempfile.NamedTemporaryFile() as f:
_setUp_basic(f)
with Context(f.name) as contacts:
assert contacts[basicKey] == basicValue
def test_keys_method_delivers_keys():
with tempfile.NamedTemporaryFile() as f:
_setUp_basic(f)
with Context(f.name) as contacts:
assert contacts.keys() == {basicKey: ''}.keys()
|
import csv
import sys
import re
def findWholeWord(w):
return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
##ENTRYPOINT
l2s_file = sys.argv[1]
og_file=sys.argv[2] #UNUSED
og2_file=sys.argv[3] #UNUSED
tax_id=sys.argv[4]
# using the tax_id, extract org_id from levels2species. (extracting plant organisms)
#*if third column contains the tax_id then we store the 2nd column(org_id) of levels2species
f=open(l2s_file, 'r')
csv_reader = csv.reader(f, delimiter='\t')
for row in csv_reader:
if(findWholeWord(tax_id)(row[3]) is not None):#(tax_id in row[3]):
print(str(row[1]))
f.close()
sys.exit(0)
|
#!/usr/bin/env python
#USRP1 Address Definition
MAC_ADDR_CSMA_ON = 65
MAC_ADDR_CSMA_TH = 66
MAC_ADDR_CSMA_BT_1L = 67
MAC_ADDR_CSMA_BT_1H = 68
MAC_ADDR_CSMA_BT_2L = 69
MAC_ADDR_CSMA_BT_2H = 70
MAC_ADDR_CSMA_BT_3L = 71
MAC_ADDR_CSMA_BT_3H = 72
MAC_ADDR_CSMA_BT_4L = 73
MAC_ADDR_CSMA_BT_4H = 74
MAC_ADDR_CSMA_BT_5L = 75
MAC_ADDR_CSMA_BT_5H = 76
#MAC Parameter Default Values
MAC_CSMA_TH_DEF = 30
MAC_CSMA_MIN_BE = 3
MAC_CSMA_UNIT_BT = 640 # Unit = us
|
#!/usr/bin/python3
# -*- coding: utf8 -*-
from marshmallow_jsonapi.flask import Schema, SchemaOpts, Relationship
from marshmallow_jsonapi import fields
#~ def dasherize(text):
#~ return text.replace('_', '-')
class HypervisorsSchema(Schema):
id = fields.Str(dump_only=True)
name = fields.Str()
version = fields.Integer()
libversion = fields.Integer()
hostname = fields.Str()
cpu_model = fields.Str()
total_memory = fields.Integer()
cpus = fields.Integer()
mhz = fields.Integer()
numa_nodes = fields.Integer()
cpu_sockets = fields.Integer()
cpu_cores = fields.Integer()
cpu_threads = fields.Integer()
free_memory = fields.Integer()
class Meta:
type_ = 'hypervisor'
self_view = 'hypervisor_detail'
self_view_kwargs = {'id': '<id>'}
self_view_many = 'hypervisors_list'
strict = True
include_resource_linkage=True
include_resource_relationship=True
#~ inflect = dasherize
virtual_machines = Relationship(
attribute = 'virtual-machines',
self_view='hypervisor_virtual_machines',
self_view_kwargs={'id': '<id>'},
related_view='virtual_machine_list',
related_view_kwargs={'hypervisor_id': '<id>'},
many=True,
include_resource_linkage=True,
include_resource_relationship=True,
schema='VirtualMachinesSchema',
type_='virtual-machines')
class VirtualMachinesSchema(Schema):
id = fields.Str(dump_only=True)
name = fields.Str()
state = fields.Str()
cpus = fields.Integer()
memory = fields.Integer()
xml = fields.Str()
class Meta:
type_ = 'virtual-machines'
self_view = 'virtual_machine_detail'
self_view_kwargs = {'id': '<id>'}
self_view_many = 'virtual_machine_list'
strict = True
include_resource_linkage=True
include_resource_relationship=True
#~ inflect = dasherize
hypervisor = Relationship(
attribute='hypervisor',
self_view='virtual_machine_hypervisor',
self_view_kwargs={'id': '<id>'},
related_view='hypervisor_detail',
related_view_kwargs={'virtual_machine_id': '<id>'},
include_resource_linkage=True,
include_resource_relationship=True,
schema='HypervisorsSchema',
type_='hypervisor')
|
import numpy as np
import cv2
import classification
import constants
from scipy.ndimage.measurements import label
previous_detections = None
windows = None
hierarchy_of_windows = None
last_heat_maps = []
heat_map_rolling_sum = None
def slide_window(image_width, image_height, x_start_stop=[None, None], y_start_stop=[400, 650],
windows_size=(64, 64), window_overlap_factor=(0.5, 0.5)):
"""
Slides window with a specified size and overlap
:param image_width: image width
:param image_height: image height
:param x_start_stop: x sliding limits
:param y_start_stop: y sliding limits
:param windows_size: window size
:param window_overlap_factor: window overlap factor
:return: list of windows covering area
"""
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] is None:
x_start_stop[0] = 0
if x_start_stop[1] is None:
x_start_stop[1] = image_width
if y_start_stop[0] is None:
y_start_stop[0] = 0
if y_start_stop[1] is None:
y_start_stop[1] = image_height
# Compute the span of the region to be searched
x_span = x_start_stop[1] - x_start_stop[0]
y_span = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(windows_size[0] * (1 - window_overlap_factor[0]))
ny_pix_per_step = np.int(windows_size[1] * (1 - window_overlap_factor[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(windows_size[0] * (window_overlap_factor[0]))
ny_buffer = np.int(windows_size[1] * (window_overlap_factor[1]))
nx_windows = np.int((x_span - nx_buffer) / nx_pix_per_step)
ny_windows = np.int((y_span - ny_buffer) / ny_pix_per_step)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs * nx_pix_per_step + x_start_stop[0]
endx = startx + windows_size[0]
starty = ys * ny_pix_per_step + y_start_stop[0]
endy = starty + windows_size[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
def generate_sliding_windows(image_width, image_height, sizes=constants.SLIDING_WINDOWS_SIZES,
x_ranges=constants.SLIDING_WINDOWS_X_RANGES,
y_ranges=constants.SLIDING_WINDOWS_Y_RANGES):
"""
Generates sliding windows in multiple scales and coverage
:param image_width: image width
:param image_height: image height
:param sizes: window sizes
:param x_ranges: window x ranges
:param y_ranges: window y ranges
:return: windows covering areas
"""
result = []
for i in range(len(sizes)):
size = sizes[i]
shape = (size, size)
x_range = x_ranges[i]
y_range = y_ranges[i]
window_grid = slide_window(image_width, image_height, x_start_stop=x_range,
y_start_stop=y_range, windows_size=shape)
result.append(window_grid)
return sum(result, []), result
def get_image_features(image, color_space='RGB', spatial_size=(32, 32),
histogram_bins=32, orientations=9,
pixels_per_cell=8, cells_per_block=2, hog_channel=0,
spatial_feature=True, histogram_feature=True, hog_feature=True):
"""
Extracts image features
:param image: image
:param color_space: color space
:param spatial_size: spatial binning size
:param histogram_bins: number of histogram bins
:param orientations: number of HOG orientations
:param pixels_per_cell: number of HOG pixels per cell
:param cells_per_block: number of HOG cells per block
:param hog_channel: HOG channel (0-2 or 'ALL')
:param spatial_feature: flag indicating extracting spatial binning features is needed
:param histogram_feature: flag indicating extracting color histogram features is needed
:param hog_feature: flag indicating extracting HOG features is needed
:return: list with concatenated desired image features
"""
# 1) Define an empty list to receive features
image_features = []
# 2) Apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else:
feature_image = np.copy(image)
# 3) Compute spatial features if flag is set
if spatial_feature:
spatial_features = classification.get_spatial_binning_features(feature_image, size=spatial_size)
# 4) Append features to list
image_features.append(spatial_features)
# 5) Compute histogram features if flag is set
if histogram_feature:
red_histogram, green_histogram, blue_histogram, bin_centers, histogram_features = \
classification.get_color_histogram_features(feature_image, number_of_bins=histogram_bins)
# 6) Append features to list
image_features.append(histogram_features)
# 7) Compute HOG features if flag is set
if hog_feature:
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(
classification.get_hog_features(feature_image[:, :, channel], orientations, pixels_per_cell,
cells_per_block, visualize=False, feature_vector=True))
else:
hog_features = classification.get_hog_features(feature_image[:, :, hog_channel], orientations,
pixels_per_cell, cells_per_block, visualize=False,
feature_vector=True)
# 8) Append features to list
image_features.append(np.ravel(hog_features))
# 9) Return concatenated array of features
return np.concatenate(image_features)
# Define a function you will pass an image
# and the list of windows to be searched (output of slide_windows())
def search_windows(image, windows, classifier, scaler, color_space=constants.HISTOGRAM_COLOR_SPACE,
spatial_size=constants.SPATIAL_BINNING_SIZE, histogram_bins=constants.HISTOGRAM_NUMBER_OF_BINS,
histogram_range=constants.HISTOGRAM_COLOR_RANGE, orientations=constants.HOG_NUMBER_OF_ORIENTATIONS,
pixels_per_cell=constants.HOG_PIXELS_PER_CELL, cells_per_block=constants.HIG_CELLS_PER_BLOCK,
hog_channel=0, spatial_feature=True,
histogram_feature=True, hog_feature=True):
"""
Searches all windows for matches
:param image: image
:param windows: windows
:param classifier: feature classifier
:param scaler: feature scaler
:param color_space: color space
:param spatial_size: spatial binning size
:param histogram_bins: number of histogram bins
:param orientations: number of HOG orientations
:param pixels_per_cell: number of HOG pixels per cell
:param cells_per_block: number of HOG cells per block
:param hog_channel: HOG channel (0-2 or 'ALL')
:param spatial_feature: flag indicating extracting spatial binning features is needed
:param histogram_feature: flag indicating extracting color histogram features is needed
:param hog_feature: flag indicating extracting HOG features is needed
:return: classification matches
"""
# 1) Create an empty list to receive positive detection windows
on_windows = []
# 2) Iterate over all windows in the list
for window in windows:
# 3) Extract the test window from original image
classification_image = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
# 4) Extract features for that window using single_img_features()
features = get_image_features(classification_image, color_space=color_space, spatial_size=spatial_size,
histogram_bins=histogram_bins, orientations=orientations,
pixels_per_cell=pixels_per_cell, cells_per_block=cells_per_block,
hog_channel=hog_channel, spatial_feature=spatial_feature,
histogram_feature=histogram_feature, hog_feature=hog_feature)
# 5) Scale extracted features to be fed to classifier
features_for_test = np.array(features).reshape(1, -1)
# features_for_test = features.reshape(1, -1)
test_features = scaler.transform(features_for_test)
# 6) Predict using your classifier
prediction = classifier.predict(test_features)
# 7) If positive (prediction == 1) then save the window
if prediction == 1:
on_windows.append(window)
# 8) Return windows for positive detections
return on_windows
def draw_boxes(image, bounding_boxes, color=(0, 0, 255), thickness=6):
"""
Draws bounding boxes into image
:param image: image
:param bounding_boxes: list of bounding boxes ((min X, min Y), (max X, max Y))
:param color: line color
:param thickness: line thickness
:return: image with bounding boxes drawn
"""
# Make a copy of the image
result = np.copy(image)
# Iterate through the bounding boxes
for bounding_box in bounding_boxes:
# Draw a rectangle given bounding_box coordinates
cv2.rectangle(result, bounding_box[0], bounding_box[1], color, thickness)
# Return the image copy with boxes drawn
return result
def draw_hierarchy_of_boxes(image, hierarchy_of_bounding_boxes, thickness=1):
"""
Draws a hierarchy of boxes for sliding windows
:param image: image
:param hierarchy_of_bounding_boxes: list of lists of bounding boxes for each window size
:param thickness: line thickness
:return: image with a hierarchy of boxes drawn
"""
# Make a copy of the image
result = np.copy(image)
# Iterate through the bounding boxes
for i in range(len(hierarchy_of_bounding_boxes)):
bounding_boxes = hierarchy_of_bounding_boxes[i]
color = constants.SLIDING_WINDOWS_COLORS[i]
for bounding_box in bounding_boxes:
# Draw a rectangle given bounding_box coordinates
cv2.rectangle(result, bounding_box[0], bounding_box[1], color, thickness)
# Return the image copy with boxes drawn
return result
def find_vehicle_windows(image, y_start, y_stop, x_start, x_stop, scale, classifier, scaler,
orientations=constants.HOG_NUMBER_OF_ORIENTATIONS,
pixels_per_cell=constants.HOG_PIXELS_PER_CELL,
cells_per_block=constants.HIG_CELLS_PER_BLOCK,
spatial_size=constants.SPATIAL_BINNING_SIZE,
histogram_bins=constants.HISTOGRAM_NUMBER_OF_BINS):
"""
Finds all vehicle windows
:param image: image
:param y_start: minimal value of y for search
:param y_stop: maximal value of y for search
:param x_start: minimal value of x for search
:param x_stop: maximal value of x for search
:param scale: classification window scale
:param classifier: feature classifier
:param scaler: feature scaler
:param orientations: number of HOG orientations
:param pixels_per_cell: number of HOG pixels per cell
:param cells_per_block: number of HOG cells per block
:param spatial_size: spatial binning size
:param histogram_bins: number of color histogram bins
:return: list of bounding boxes corresponding to found vehicles
"""
result = []
image = image.astype(np.float32) / 255
# image_to_search = image[y_start:y_stop, :, :]
image_to_search = image[y_start: y_stop, x_start: x_stop, :]
canvas_to_search = classification.convert_color(image_to_search, conversion='RGB2YCrCb')
if scale != 1:
image_shape = canvas_to_search.shape
canvas_to_search = cv2.resize(canvas_to_search, (np.int(image_shape[1] / scale), np.int(image_shape[0] / scale)))
channel_1 = canvas_to_search[:, :, 0]
channel_2 = canvas_to_search[:, :, 1]
channel_3 = canvas_to_search[:, :, 2]
# Define blocks and steps as above
x_number_of_blocks = (channel_1.shape[1] // pixels_per_cell) - 1
y_number_of_blocks = (channel_1.shape[0] // pixels_per_cell) - 1
number_of_features_per_block = orientations * cells_per_block ** 2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = constants.TRACKING_BASE_WINDOW_DIMENSION
number_of_blocks_per_window = (window // pixels_per_cell) - 1
cells_per_step = 2 # Instead of overlap, define how many cells to step
x_number_of_steps = (x_number_of_blocks - number_of_blocks_per_window) // cells_per_step
y_number_of_steps = (y_number_of_blocks - number_of_blocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog_1 = classification.get_hog_features(channel_1, orientations, pixels_per_cell, cells_per_block,
feature_vector=False)
hog_2 = classification.get_hog_features(channel_2, orientations, pixels_per_cell, cells_per_block,
feature_vector=False)
hog_3 = classification.get_hog_features(channel_3, orientations, pixels_per_cell, cells_per_block,
feature_vector=False)
for x_block in range(x_number_of_steps):
for y_block in range(y_number_of_steps):
y = y_block * cells_per_step
x = x_block * cells_per_step
# Extract HOG for this patch
hog_feat1 = hog_1[y: y + number_of_blocks_per_window, x: x + number_of_blocks_per_window].ravel()
hog_feat2 = hog_2[y: y + number_of_blocks_per_window, x: x + number_of_blocks_per_window].ravel()
hog_feat3 = hog_3[y: y + number_of_blocks_per_window, x: x + number_of_blocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
x_left = x * pixels_per_cell
y_top = y * pixels_per_cell
# Extract the image patch
# sub_canvas = cv2.resize(image_to_search[y_top: y_top + window, x_left: x_left + window], (64, 64))
sub_canvas = cv2.resize(canvas_to_search[y_top: y_top + window, x_left: x_left + window], (64, 64))
# Get color features
spatial_features = classification.get_spatial_binning_features(sub_canvas, size=spatial_size)
red_histogram, green_histogram, blue_histogram, bin_centers, histogram_features = \
classification.get_color_histogram_features(sub_canvas, number_of_bins=histogram_bins)
# Scale features and make a prediction
features_for_prediction = np.hstack((spatial_features, histogram_features, hog_features)).reshape(1, -1)
prediction_features = scaler.transform(features_for_prediction)
prediction = classifier.predict(prediction_features)
if prediction == 1:
x_box_left = np.int(x_left * scale)
y_top_draw = np.int(y_top * scale)
window_draw = np.int(window * scale)
result.append(((x_box_left + x_start, y_top_draw + y_start),
(x_box_left + window_draw + x_start, y_top_draw + window_draw + y_start)))
return result
def find_all_vehicles(image, classifier, scaler, orientations=constants.HOG_NUMBER_OF_ORIENTATIONS,
pixels_per_cell=constants.HOG_PIXELS_PER_CELL, cells_per_block=constants.HIG_CELLS_PER_BLOCK,
spatial_size=constants.SPATIAL_BINNING_SIZE, histogram_bins=constants.HISTOGRAM_NUMBER_OF_BINS):
"""
Finds all vehicles
:param image: image
:param classifier: feature classifier
:param scaler: feature scaler
:param orientations: number of HOG orientations
:param pixels_per_cell: number of HOG pixels per cell
:param cells_per_block: number of HOG cells per block
:param spatial_size: spatial binning size
:param histogram_bins: number of color histogram bins
:return: list of bounding boxes corresponding to found vehicles
"""
result = []
for i in range(len(constants.SLIDING_WINDOWS_SCALES)):
scale = constants.SLIDING_WINDOWS_SCALES[i]
y_range = constants.SLIDING_WINDOWS_Y_RANGES[i]
x_range = constants.SLIDING_WINDOWS_X_RANGES[i]
# detected_windows = find_vehicle_windows(image, y_range[0], y_range[1], scale, classifier, scaler)
# detected_windows = find_vehicle_windows(image, 400, 656, scale, classifier, scaler)
detected_windows = find_vehicle_windows(image, y_range[0], y_range[1], x_range[0], x_range[1], scale, classifier, scaler)
result += detected_windows
return result
def add_heat(heat_map, windows):
"""
Adds heat information into a heat map
:param heat_map: heat map
:param windows: list of hot areas
:return: updated heat map
"""
# Iterate through list of bounding boxes
for box in windows:
# Add += 1 for all pixels inside each bounding box
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heat_map[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heat map
return heat_map
def apply_heat_threshold(heat_map, threshold):
"""
Applies heat threshold
:param heat_map: heat map
:param threshold: desired minimal value threshold
:return: heat map with hot pixels with values above threshold
"""
# Zero out pixels below the threshold
heat_map[heat_map <= threshold] = 0
# Return thresholded map
return heat_map
def get_heat_labels(heat_map):
"""
Finds all separate components in a heat map
:param heat_map: heat map
:return: map containing labels of unique components comprised of continuous pixel neighbors
"""
labels = label(heat_map)
return labels
def get_labeled_windows(labels):
"""
Computes bounding boxes for labeled components
:param labels: labeled components
:return: list of bounding boxes for labeled components
"""
result = []
# Iterate through all detected cars
for vehicle_number in range(1, labels[1]+1):
# Find pixels with each vehicle_number label value
non_zero = (labels[0] == vehicle_number).nonzero()
# Identify x and y values of those pixels
non_zero_y = np.array(non_zero[0])
non_zero_x = np.array(non_zero[1])
# Define a bounding box based on min/max x and y
bounding_box = ((np.min(non_zero_x), np.min(non_zero_y)), (np.max(non_zero_x), np.max(non_zero_y)))
if bounding_box[1][0] - bounding_box[0][0] >= constants.TRACKING_MINIMAL_WINDOW_WIDTH and \
bounding_box[1][1] - bounding_box[0][1] >= constants.TRACKING_MINIMAL_WINDOW_HEIGHT:
result.append(bounding_box)
# Return the image
return result
def combine_heat_maps(heat_map):
"""
Combines heat maps together using rolling sum and average heat
:param heat_map: heat map
:return: average heat map
"""
global last_heat_maps
global heat_map_rolling_sum
if not constants.TRACKING_USE_AVERAGED_HEAT_MAP:
return heat_map
if len(last_heat_maps) == 0:
last_heat_maps = [heat_map]
heat_map_rolling_sum = heat_map
return heat_map
else:
last_heat_maps.append(heat_map)
if len(last_heat_maps) > constants.TRACKING_HEAT_MAP_LAYERS:
heat_map_rolling_sum -= last_heat_maps[0]
del last_heat_maps[0]
number_of_layers = len(last_heat_maps)
heat_map_rolling_sum += heat_map
result = heat_map_rolling_sum / number_of_layers
return result
def combine_detections(image, detections):
"""
Combines vehicle detections
:param image: image
:param detections: vehicle detections
:return: list of unique vehicle detections
"""
heat_map = np.zeros_like(image[:, :, 0]).astype(np.float)
add_heat(heat_map, detections)
heat_map = combine_heat_maps(heat_map)
apply_heat_threshold(heat_map, constants.TRACKING_HEAT_MAP_THRESHOLD)
labels = get_heat_labels(heat_map)
result = get_labeled_windows(labels)
return result
|
#!/usr/bin/python3
"""write to a CSV file"""
import csv
import requests
import sys
if __name__ == "__main__":
users = requests.get("https://jsonplaceholder.typicode.com/users/" +
sys.argv[1])
user = users.json().get("username")
all_tasks = requests.get(
"https://jsonplaceholder.typicode.com/todos?userId=" + sys.argv[1])
all_tasks_list = all_tasks.json()
user_id = sys.argv[1]
with open(user_id + ".csv", 'w') as task_record:
for task in all_tasks_list:
status = task.get("completed")
title = task.get("title")
task_format = csv.writer(task_record, delimiter=',', quotechar='"',
quoting=csv.QUOTE_ALL)
task_format.writerow([str(user_id), str(user), str(status),
str(title)])
|
import numpy as np
class RocchioAlgorithm(object):
def __init__(self):
self.alpha = 1
self.beta = 0.75
self.gamma = 0.15
pass
def run(self, doc_vectors, q_vector, evaluations):
# split evaluations to relevant and not relevant
D_r = []
D_nr = []
for idx, evaluation in enumerate(evaluations):
if evaluation:
D_r.append(idx)
else:
D_nr.append(idx)
new_q = self.alpha * q_vector + \
self.beta * np.sum(doc_vectors[D_r, :], axis=0) / len(D_r) - \
self.gamma * np.sum(doc_vectors[D_nr, :], axis=0) / len(D_nr)
return new_q
|
"""
The :mod:`pycircular.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_transactions
__all__ = ['load_transactions'] |
# -*- coding: utf-8 -*-
"""
Project Euler Problem 14 ○
正の整数に以下の式で繰り返し生成する数列を定義する.
n → n/2 (n が偶数)
n → 3n + 1 (n が奇数)
13からはじめるとこの数列は以下のようになる.
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
13から1まで10個の項になる. この数列はどのような数字からはじめても
最終的には 1 になると考えられているが, まだそのことは証明されていない(コラッツ問題)
さて, 100万未満の数字の中でどの数字からはじめれば最長の数列を生成するか.
注意: 数列の途中で100万以上になってもよい
"""
import time
start = time.time()
maxx=0
for n in xrange(13,1000000):
m=n
i=1
while 1:
if n%2==0:
n=n/2
else:
n=3*n+1
if n==1:
break
i+=1
if maxx<i:
maxx=i
print m,maxx
#76.6159999371 p014.py
#109.759000063 p014b.py
print time.time() - start
|
import torch
import math
import matplotlib.pyplot as plt
class RoundingNoGrad(torch.autograd.Function):
"""
RoundingNoGrad is a rounding operation which bypasses the input gradient to output directly.
Original round()/floor()/ceil() opertions have a gradient of 0 everywhere, which is not useful
when doing approximate computing.
This is something like the straight-through estimator (STE) for quantization-aware training.
"""
# Note that both forward and backward are @staticmethods
@staticmethod
def forward(ctx, input, mode="round"):
if mode == "round":
return input.round()
elif mode == "floor":
return input.floor()
elif mode == "ceil":
return input.ceil()
else:
raise ValueError("Input rounding is not supported.")
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output
return grad_input, None
def Trunc(input, intwidth=7, fracwidth=8, rounding="floor"):
"""
Trunc is an operation to convert data to format (1, intwidth, fracwidth).
"""
scale = 2**fracwidth
max_val = (2**(intwidth + fracwidth) - 1)
min_val = 0 - (2**(intwidth + fracwidth))
return RoundingNoGrad.apply(input.mul(scale), rounding).clamp(min_val, max_val).div(scale)
def Trunc_val(input, intwidth=7, fracwidth=8, rounding="round"):
"""
Trunc_val is an operation to convert one single value to format (1, intwidth, fracwidth).
"""
scale = 2**fracwidth
max_val = (2**(intwidth + fracwidth) - 1)
min_val = 0 - (2**(intwidth + fracwidth))
if rounding == "round":
return max(min(round(input*scale), max_val), min_val)/scale
elif rounding == "floor":
return max(min(math.floor(input*scale), max_val), min_val)/scale
elif rounding == "ceil":
return max(min(math.ceil(input*scale), max_val), min_val)/scale
else:
raise ValueError("Input rounding is not supported.")
def data_gen(data_range="0.0_1.0", mu=0.5, sigma = 0.3):
# This is a function to generate data
if data_range == "0.0_1.0":
mu_eff = mu
elif data_range == "0.5_1.0":
mu_eff = (mu - 0.5) * 2
mu_tensor = torch.ones([100000]).mul(mu_eff)
sigma_tensor = torch.ones([100000]).mul(sigma)
data = torch.distributions.normal.Normal(mu_tensor, sigma_tensor).sample()
data = data - data.floor()
if data_range == "0.0_1.0":
data = data
elif data_range == "0.5_1.0":
data = data/2 + 0.5
fig = plt.hist(data.cpu().numpy(), bins='auto') # arguments are passed to np.histogram
plt.title("Histogram for data")
plt.show()
return data
|
"""
Main module in optrsa project.
Optimization of packing fraction of two-dimensional Random Sequential Adsorption (RSA)
packings using Covariance Matrix Adaptation Evolution Strategy (CMA-ES).
"""
# TODO Read about docstrings and automatic documentation generation services like pydoctor
import cma
import sys
import matplotlib
import subprocess
# PyCharm Professional sets other backend, which may cause trouble
if sys.platform.startswith('darwin'): # MacOS
# See https://matplotlib.org/tutorials/introductory/usage.html#backends
matplotlib.use("MacOSX")
# Qt5Agg, Qt4Agg - can't import qt bindings, GTK3Agg, GTK3Cairo - can't install all dependencies. nbAgg - fails.
# WX does not have access to the screen. TkAgg works, WebAgg (with tornado imported) works worse than TkAgg.
else:
try:
# TODO Maybe use platform module instead
system_info = subprocess.check_output(["uname", "-mrs"]).decode().strip()
except Exception as exception:
system_info = "not checked"
okeanos_system_info = "Linux 4.12.14-150.17_5.0.86-cray_ari_s x86_64"
if system_info != okeanos_system_info and system_info != "not checked":
# Partially tested
matplotlib.use("Qt5Agg") # Maybe try also TkAgg (works) if interactivity is needed. Agg is not interactive.
import matplotlib.pyplot as plt
import matplotlib.transforms
import matplotlib.offsetbox
import matplotlib.patches
import matplotlib.text
# import matplotlib_shiftable_annotation
import numpy as np
from scipy.spatial import ConvexHull
import shapely.geometry
import pandas as pd
from typing import Callable, Tuple, Union, List, Optional, Set
from collections import namedtuple
import abc
import inspect
import io
import os
import glob
import shutil
from file_read_backwards import FileReadBackwards
import traceback
import logging
import logging.config
import ruamel.yaml
import pprint
import timeit
# import subprocess
import multiprocessing.pool
# TODO Maybe import MPIPoolExecutor only in the optimize mode, if okeanos_parallel option is set
from mpi4py.futures import MPIPoolExecutor
from concurrent.futures import Future
import threading
import time
import datetime
from copy import deepcopy
import pickle
from wolframclient.evaluation import WolframLanguageSession
from wolframclient.language import wl
from itertools import combinations
from functools import wraps
from module_arg_parser import ModuleArgumentParser
# Get absolute path to optrsa project directory
_proj_dir = os.path.dirname(__file__)
# TODO Maybe adjust it in order to point to the correct virtual environment
# Absolute path to python interpreter, assuming that relative path reads /optrsa-py-3-8-1-venv/bin/python
_python_path = _proj_dir + "/optrsa-py-3-8-1-venv/bin/python" # Or: os.path.abspath("optrsa-py-3-8-1-venv/bin/python")
# Absolute path to Wolfram Kernel script, assuming that relative path reads /exec/wolframscript
_wolfram_path = _proj_dir + "/exec/wolframscript"
# Absolute path to rsa3d executable compiled with target 2.1, assuming that relative path reads /exec/rsa.2.1
_rsa_path = _proj_dir + "/exec/rsa.2.1"
# Absolute paths to input and output directories
_input_dir = _proj_dir + "/input"
_output_dir = _proj_dir + "/output"
_outrsa_dir = _proj_dir + "/outrsa" # To be removed
_outcmaes_dir = _proj_dir + "/outcmaes" # To be removed
_cmaes_logging_config = _proj_dir + "/optimization_logging.yaml"
libraries_info_logfile_excluded_loggers = ["optrsa"]
graph_processes = []
opt_classes = {}
# class ChosenLoggersOrLogLevelFilter(logging.Filter):
# """
# Logging filter accepting log records from (children of) loggers from a list or above certain log severity level
# """
# def __init__(self, loggers: Optional[List[str]] = None, level: Union[int, str] = logging.WARNING) -> None:
# self.loggers = loggers if loggers is not None else []
# self.level = level if isinstance(level, int) else getattr(logging, level)
#
# def filter(self, record: logging.LogRecord) -> bool:
# if record.levelno >= self.level:
# return True
# for logger_name in self.loggers:
# logger_name_len = len(logger_name)
# if record.name.startswith(logger_name) and (len(record.name) == logger_name_len
# or record.name[logger_name_len] == "."):
# return True
# return False
class StrFormatStyleMessageLogRecord(logging.LogRecord):
def getMessage(self) -> str:
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message by using str.format method.
"""
msg = str(self.msg)
if self.args:
msg = msg.format(self.args)
return msg
logging.setLogRecordFactory(StrFormatStyleMessageLogRecord)
# TODO Annotate this function properly
def opt_class(name: str):
def opt_class_decorator(cls):
opt_classes[name] = cls
return cls
return opt_class_decorator
# TODO Use click instead, maybe together with click-config-file
mod_arg_parser = ModuleArgumentParser()
opt_input_file_arg_parser = mod_arg_parser.add_common_arg_parser("opt_input_file")
opt_input_file_arg_parser.add_argument("file", help="YAML optimization input file from ./input directory")
opt_signature_arg_parser = mod_arg_parser.add_common_arg_parser("opt_signature")
opt_signature_arg_parser.add_argument("signature", help="optimization signature - name of subdirectory of ./output")
yaml = ruamel.yaml.YAML()
yaml.indent(mapping=4, sequence=6, offset=4)
@mod_arg_parser.command("testcma")
def test_cma_package() -> None:
"""Test cma package using doctest, according to http://cma.gforge.inria.fr/apidocs-pycma/cma.html"""
print("Testing time (tests should run without complaints in about between 20 and 100 seconds):",
timeit.timeit(stmt='cma.test.main()', setup="import cma.test", number=1))
# TODO Maybe write a class for managing graph processes
def plot_cmaes_graph_in_background(data_dir: str, window_name: str) -> None:
"""
Plot CMA-ES data in a different process.
Window will be shown without blocking execution and will not be closed when the main process ends working.
Current version should be launched only in this module - subprocess wait method should be called at the end of the
main process, therefore graph_processes global variable should be accessible and wait_for_graphs function should
be called.
"""
graph_process = subprocess.Popen([_python_path, _proj_dir + "/plot_cmaes_data.py", data_dir, window_name])
graph_processes.append(graph_process)
def wait_for_graphs() -> None:
for plot_process in graph_processes:
plot_process.wait()
def waiting_for_graphs(function: Callable[..., None]) -> Callable[..., None]:
@wraps(function)
def wrapped(*args, **kwargs) -> None:
function(*args, **kwargs)
wait_for_graphs()
return wrapped
@mod_arg_parser.command("examplecmaplots")
@waiting_for_graphs
def example_cma_plots() -> None:
"""
Illustrative CMA-ES data plotting in subprocesses.
Example with optimization of a simple function.
See http://cma.gforge.inria.fr/apidocs-pycma/cma.evolution_strategy.CMAEvolutionStrategy.html
"""
es_first = cma.CMAEvolutionStrategy(4 * [0.2], 0.5, {'verb_disp': 0})
es_first.logger.name_prefix += "sphere-test-1/"
es_first.logger.disp_header() # annotate the print of disp
while not es_first.stop():
X = es_first.ask()
es_first.tell(X, [cma.ff.sphere(x) for x in X])
es_first.logger.add() # log current iteration
es_first.logger.disp([-1]) # display info for last iteration
es_first.logger.disp_header()
plot_cmaes_graph_in_background(es_first.logger.name_prefix, "Sphere 1")
print()
es_second = cma.CMAEvolutionStrategy(4 * [0.2], 0.5, {'verb_disp': 0})
es_second.logger.name_prefix += "sphere-test-2/"
es_second.logger.disp_header()
while not es_second.stop():
X = es_second.ask()
es_second.tell(X, [cma.ff.sphere(x) for x in X])
es_second.logger.add()
es_second.logger.disp([-1])
es_second.logger.disp_header()
plot_cmaes_graph_in_background(es_second.logger.name_prefix, "Sphere 2")
def wolfram_polydisk_area(arg: np.ndarray) -> float:
"""Calculate the area of a polydisk using Wolfram Kernel script"""
disks_arg = np.reshape(arg, (-1, 3))
wolfram_disks_list = ["Disk[{{{},{}}},{}]".format(*disk) for disk in disks_arg]
wolfram_disks_str = "{" + ",".join(wolfram_disks_list) + "}"
# TODO Check, if the Wolfram Kernel script can be called with shell=False and whether the performance will be better
area_str = subprocess.check_output(_wolfram_path
+ " -code 'N[Area[Region[Apply[RegionUnion,{}]]]]'".format(wolfram_disks_str),
stderr=subprocess.STDOUT, shell=True)
return float(area_str)
def softplus(x, k: Optional[float] = 1):
"""
Calculate element-wise softplus of the NumPy array_like x argument with "sharpness" parameter k
:param x: Argument suitable to pass to NumPy functions using array_like arguments
:param k: Optional parameter determining "sharpness" of the function, defaults to 1
:return: Object of the same type as x, element-wise softplus of x
"""
return np.log(1 + np.exp(k * x)) / k
def logistic(x, min: Optional[float] = 0, max: Optional[float] = 1, k: Optional[float] = 1):
"""
Calculate element-wise logistic function of the NumPy array_like x argument with "sharpness" parameter k
:param x: Argument suitable to pass to NumPy functions using array_like arguments
:param min: Optional parameter defining left asymptote of the function, defaults to 0
:param max: Optional parameter defining right asymptote of the function, defaults to 1
:param k: Optional parameter determining "steepness" of the function, defaults to 1
:return: Object of the same type as x, element-wise logistic function of x
"""
return min + (max - min) / (1 + np.exp(-k * x))
class StreamToLogger:
"""
Source: https://stackoverflow.com/questions/11124093/redirect-python-print-output-to-logger/11124247
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ""
def write(self, buf):
temp_linebuf = self.linebuf + buf
self.linebuf = ""
for line in temp_linebuf.splitlines(True):
# From the io.TextIOWrapper docs:
# On output, if newline is None, any '\n' characters written
# are translated to the system default line separator.
# By default sys.stdout.write() expects '\n' newlines and then
# translates them so this is still cross platform.
if line[-1] == "\n":
self.logger.log(self.log_level, line.rstrip())
else:
self.linebuf += line
def flush(self):
if self.linebuf != "":
self.logger.log(self.log_level, self.linebuf.rstrip())
self.linebuf = ""
class ExcludedLoggersFilter(logging.Filter):
"""
Logging filter rejecting log records from (children of) loggers from a list
"""
def __init__(self, loggers: Optional[List[str]] = None) -> None:
self.loggers = loggers if loggers is not None else []
def filter(self, record: logging.LogRecord) -> bool:
for logger_name in self.loggers:
logger_name_len = len(logger_name)
if record.name.startswith(logger_name) and (len(record.name) == logger_name_len
or record.name[logger_name_len] == "."):
return False
return True
class OptimizationFormatter(logging.Formatter):
"""
Partly inspired by https://stackoverflow.com/questions/18639387/how-change-python-logging-to-display-time-passed
-from-when-the-script-execution
"""
def __init__(self,
fmt: Optional[str] = None,
datefmt: Optional[str] = None,
style: str = "%") -> None:
super().__init__(fmt, datefmt, style)
self.optimization_start = datetime.datetime.now()
def format(self, record):
# It seems that this method may be called multiple times when processing a log record, so modifying record's
# attributes based on their previous values (e.g. extending a string) won't work as expected (can be done
# multiple times).
# Add attribute with readable running time (time since logging module was loaded)
# time_diff = str(datetime.timedelta(milliseconds=record.relativeCreated))
time_diff = str(datetime.datetime.now() - self.optimization_start)
# Add attribute with readable running time (time since logging was configured)
record.runningTime = time_diff[:-3]
# return super(OptimizationFormatter, self).format(record)
# Dealing with logging messages with multiple lines
# Record before calling format has only msg attribute, message attribute is created in format method
# To illustrate it (and the fact that the format method is called multiple times) uncomment:
# with open("test.txt", "a") as file:
# file.write(record.msg + " " + str(hasattr(record, "message")) + "\n")
message = record.getMessage()
if "\n" not in message or record.exc_info or record.exc_text or record.stack_info:
# If the message doesn't contain newlines or contains exception or stack information, print in a standard
# way without dealing with newlines
return super(OptimizationFormatter, self).format(record)
else:
msg = record.msg
record.msg = ""
prefix = super(OptimizationFormatter, self).format(record)
record.msg = msg
# indentation = " " * len(prefix)
# message_lines = message.split("\n")
# output = prefix + message_lines.pop(0) + "\n"
# for line in message_lines:
# output += indentation + line + "\n"
message_lines = message.split("\n")
output = ""
for line in message_lines:
output += prefix + line + "\n"
return output[:-1]
# TODO Maybe change it into a typed named tuple
DefaultRSASimulationResult = namedtuple(typename="DefaultRSASimulationResult",
field_names=["candidate_num", "simulation_num", "first_collector_num",
"collectors_num", "return_code", "node_message", "pid",
"start_time", "time", "particles_numbers"])
class RSACMAESOptimization(metaclass=abc.ABCMeta):
"""
Abstract base class for performing optimization of RSA packing fraction with CMA-ES optimizer
and managing the output data
"""
# TODO Maybe treat cma_options in the same way as rsa_parameters (default values dictionary, input files)
default_rsa_parameters: dict = {}
# Optimization-type-specific rsa parameters - to be set by child classes
mode_rsa_parameters: dict = {}
# TODO Add names of the used files and directories as class attributes
# rsa_output_dirname = "outrsa"
# cmaes_output_dirname = "outcmaes"
# opt_input_filename = "optimization-input.yaml"
# gen_rsa_input_filename = "generated-rsa-input.txt"
# cp_rsa_input_filename_prefix = "copied-rsa-input-"
# rsa_sim_input_filename = "rsa-simulation-input.txt"
# rsa_sim_output_filename = "rsa-simulation-output.txt"
# output_filename = "packing-fraction-vs-params.dat"
# opt_output_filename = "optimization-output.txt"
# opt_data_output_filename = "optimization.dat"
optimization_data_columns: dict = {"generationnum": np.int,
"meanarg": str,
"meanpartattrs": str,
"stddevs": str,
"covmat": str,
"partstddevs": str,
"bestind": np.int, "bestarg": str, "bestpartattrs": str,
"bestpfrac": np.float, "bestpfracstddev": np.float,
"medianind": np.int, "medianarg": str, "medianpartattrs": str,
"medianpfrac": np.float, "medianpfracstddev": np.float,
"worstind": np.int, "worstarg": str, "worstpartattrs": str,
"worstpfrac": np.float, "worstpfracstddev": np.float,
"candidatesdata": str}
# DefaultRSASimulationResult = namedtuple(typename="DefaultRSASimulationResult",
# field_names=["candidate_num", "simulation_num", "first_collector_num",
# "collectors_num", "return_code", "node_message", "pid",
# "start_time", "time", "particles_numbers"])
stddevs_sample_size_optclattr: int = None
@abc.abstractmethod
def get_arg_signature(self) -> str:
return ""
# TODO Maybe make some attributes obligatory
def __init__(self,
initial_mean: np.ndarray = None,
initial_stddevs: float = None,
cma_options: dict = None,
rsa_parameters: dict = None,
accuracy: float = 0.001,
parallel: bool = True,
threads: int = None,
particle_attributes_parallel: bool = False,
okeanos: bool = False,
max_nodes_number: int = None,
okeanos_parallel: bool = False,
nodes_number: int = None,
min_collectors_number: int = 10,
collectors_per_task: int = 1,
input_rel_path: str = None,
output_dir: str = None,
output_to_file: bool = True,
output_to_stdout: bool = False,
log_generations: bool = True,
show_graph: bool = False,
signature_suffix: str = None,
optimization_input: dict = None,
# Keyword arguments are captured to receive unnecessarily passed, ignored optimization class attributes
**kwargs
) -> None:
self.initial_mean = initial_mean
self.initial_stddevs = initial_stddevs
self.cma_options = cma_options if cma_options is not None else {}
# Alternative (probably less safe): cma_options or {}
self.rsa_parameters = rsa_parameters if rsa_parameters is not None else {}
self.accuracy = accuracy
self.parallel = parallel
self.particle_attributes_parallel = particle_attributes_parallel
self.okeanos = okeanos
self.max_nodes_number = max_nodes_number
self.okeanos_parallel = okeanos_parallel
self.nodes_number = nodes_number
self.min_collectors_number = max(min_collectors_number, 2)
self.collectors_per_task = collectors_per_task
self.output_to_file = output_to_file
self.output_to_stdout = output_to_stdout
self.log_generations = log_generations
self.show_graph = show_graph
self.optimization_input = optimization_input
self.set_optimization_class_attributes(optimization_input=self.optimization_input)
# Set optimization signature
self.signature = datetime.datetime.now().isoformat(timespec="milliseconds") # Default timezone is right
self.signature += "-" + type(self).__name__
self.signature += "-" + self.get_arg_signature()
self.signature += ("-" + signature_suffix) if signature_suffix is not None else ""
self.signature = self.signature.replace(":", "-").replace(".", "_")
# Create output directory and subdirectories for RSA and CMAES output
self.output_dir = (output_dir if output_dir is not None else _output_dir) + "/" + self.signature
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.rsa_output_dir = self.output_dir + "/outrsa"
if not os.path.exists(self.rsa_output_dir):
os.makedirs(self.rsa_output_dir)
self.cmaes_output_dir = self.output_dir + "/outcmaes"
if not os.path.exists(self.cmaes_output_dir):
os.makedirs(self.cmaes_output_dir)
# Maybe use shutil instead
# Generate used optimization input file in output directory
with open(self.output_dir + "/optimization-input.yaml", "w+") as opt_input_file:
yaml.dump(self.optimization_input, opt_input_file)
# Update self.rsa_parameters by optimization-type-specific parameters
self.rsa_parameters.update(self.mode_rsa_parameters)
# Create rsa3d input file in output directory
if "particleAttributes" in rsa_parameters:
del rsa_parameters["particleAttributes"]
self.input_given = input_rel_path is not None
self.input_filename = _input_dir + "/" + input_rel_path if self.input_given else None
# When input file is not given, self.all_rsa_parameters dictionary will be used with self.default_rsa_parameters
# overwritten by self.rsa_parameters specified in the constructor
self.all_rsa_parameters = dict(self.default_rsa_parameters, **self.rsa_parameters) if not self.input_given\
else None
if self.input_given:
# Copy input file to output directory and add overwritten options at the end
# TODO Add checking, if the file exists
# TODO Test if it works here (worked in optimize_fixed_radii_disks function)
shutil.copy(self.input_filename, self.output_dir)
copied_file_name = self.output_dir + "/copied-rsa-input-" + os.path.basename(self.input_filename)
shutil.move(self.output_dir + "/" + os.path.basename(self.input_filename), copied_file_name)
with open(copied_file_name, "a") as copied_input_file:
copied_input_file.write("\n")
copied_input_file.writelines(["{} = {}\n".format(param_name, param_value)
for param_name, param_value in rsa_parameters.items()])
else:
# Create input file in the output directory
# Use parameters from self.default_rsa_parameters and self.rsa_parameters dictionaries
with open(self.output_dir + "/generated-rsa-input.txt", "w+") as generated_input_file:
generated_input_file.writelines(["{} = {}\n".format(param_name, param_value)
for param_name, param_value in self.all_rsa_parameters.items()])
# Create file containing output of the rsa3d accuracy mode in output directory
self.output_filename = self.output_dir + "/packing-fraction-vs-params.txt"
# Create a file if it does not exist
with open(self.output_filename, "w+"): # as output_file
pass
# Create list of common arguments (constant during the whole optimization)
# for running rsa3d program in other process
self.set_rsa_proc_arguments()
self.configure_logging()
# TODO Maybe move these definitions to run method (then redirecting output will be done only in run method and
# CMAES initialization information will be added to output file).
# Then change here self.CMAES initialization to self.CMAES = None.
# In run method, before self.CMAES assignment add printing optimization signature, and after optimization add
# printing current time (maybe also the optimization time).
# Counter of conducted simulations
self.simulations_num = 0
# Counter of phenotype candidates in generation
self.candidate_num = 0
# Redirect output - done here to catch CMAES initialization information. To be moved to run method together
# with CMAES initialization
self.redirect_output_to_logger()
# Create CMA evolution strategy optimizer object
# TODO Maybe add serving input files and default values for CMA-ES options (currently directly self.cma_options)
self.logger.info(msg="Optimization class: {}".format(self.__class__.__name__))
self.logger.info(msg="Optimizer:")
self.CMAES = cma.CMAEvolutionStrategy(self.initial_mean, self.initial_stddevs,
inopts=self.cma_options)
# self.CMAES.logger.name_prefix += self.signature + "/" # Default name_prefix is outcmaes/
self.CMAES.logger.name_prefix = self.cmaes_output_dir + "/"
# Settings for parallel computation
# TODO Maybe add ompThreads to common self.rsa_parameters if it will not be changed
# TODO multiprocessing.cpu_count() instead? (Usage on server)
self.parallel_threads_number = threads if threads is not None else os.cpu_count() # * 2
if not self.okeanos:
self.parallel_simulations_number = min(self.parallel_threads_number, self.CMAES.popsize)
else:
self.parallel_simulations_number = min(self.max_nodes_number - 1, self.CMAES.popsize) \
if self.max_nodes_number is not None else self.CMAES.popsize
# self.omp_threads = self.parallel_threads_number // self.CMAES.popsize\
# if self.parallel and self.parallel_threads_number > self.CMAES.popsize else 1
# if self.okeanos:
# from mpi4py.futures import MPIPoolExecutor
if self.okeanos_parallel and self.nodes_number is None:
slurm_job_num_nodes = os.getenv("SLURM_JOB_NUM_NODES")
if slurm_job_num_nodes is not None:
self.nodes_number = int(slurm_job_num_nodes)
else:
self.logger.warning(msg="Unable to get number of nodes allocated to the job; SLURM_JOB_NUM_NODES"
" environment variable is not set")
self.logger.warning(msg="Setting the value of the nodes_number attribute to 1 + (population"
" size)")
# It is assumed that the SLURM job has the same number of assigned nodes
self.nodes_number = 1 + self.CMAES.popsize
# Create file for logging generation data
self.opt_data_filename = self.output_dir + "/optimization.dat" if self.log_generations else None
if self.log_generations:
# Create a file if it does not exist
# with open(self.opt_data_filename, "w+"):
# pass
with open(self.opt_data_filename, "w+") as opt_data_file:
# Write header line
opt_data_file.write("\t".join(self.optimization_data_columns) + "\n")
def set_rsa_proc_arguments(self) -> None:
"""
Create list of common arguments (constant during the whole optimization)
for running rsa3d program in other process
:return: None
"""
self.rsa_proc_arguments = [_rsa_path]
# TODO Maybe implement in in another way
if not self.okeanos_parallel:
self.rsa_proc_arguments.append("accuracy")
else:
self.rsa_proc_arguments.append("simulate")
if self.input_given:
self.rsa_proc_arguments.extend(["-f", self.input_filename])
self.rsa_proc_arguments.extend(["-{}={}".format(param_name, param_value)
for param_name, param_value in self.rsa_parameters.items()])
else:
self.rsa_proc_arguments.extend(["-{}={}".format(param_name, param_value)
for param_name, param_value in self.all_rsa_parameters.items()])
if not self.okeanos_parallel:
# Index at which particleAttributes parameter will be inserted
self.rsa_proc_args_last_param_index = len(self.rsa_proc_arguments)
self.rsa_proc_arguments.extend([str(self.accuracy), self.output_filename])
def configure_logging(self) -> None:
# Configure and set optimization state logger
# Logs can be printed to a logfile or to the standard output. By default, logfile will contain log records of
# severity level at least logging.INFO and standard output - at least logging.DEBUG.
with open(_cmaes_logging_config) as config_file:
logging_configuration = yaml.load(config_file)
if self.output_to_file:
# To set handlers' filename, modify configuration dictionary as below, try to set it to a variable in
# configuration file, use logger's addHandler method or modify logger.handlers[0] (probably it is not
# possible to specify file name after handler was instantiated)
logging_configuration["handlers"]["optrsa_optimization_logfile"]["filename"] = self.output_dir \
+ "/optimization-output.log"
else:
logging_configuration["loggers"]["optrsa.optimization"]["handlers"].pop(0)
del logging_configuration["handlers"]["optrsa_optimization_logfile"]
if self.output_to_stdout:
logging_configuration["loggers"]["optrsa.optimization"]["handlers"].append("debug_stdout")
logging.config.dictConfig(logging_configuration)
self.logger = logging.getLogger("optrsa.optimization")
# Configure root logger
if len(logging.root.handlers) == 1:
logging_root_handler = logging.root.handlers[0]
if isinstance(logging_root_handler, logging.StreamHandler) and logging_root_handler.stream == sys.stderr:
logging.root.removeHandler(logging_root_handler)
if not logging.root.hasHandlers():
formatter = self.logger.handlers[0].formatter
warnings_logfile_handler = logging.FileHandler(filename=self.output_dir + "/warnings.log")
warnings_logfile_handler.setLevel(logging.WARNING)
warnings_logfile_handler.setFormatter(formatter)
logging.root.addHandler(warnings_logfile_handler)
libraries_info_logfile_handler = logging.FileHandler(filename=self.output_dir + "/libraries_info.log")
libraries_info_logfile_handler.setLevel(logging.INFO)
libraries_info_logfile_handler.setFormatter(formatter)
libraries_info_logfile_handler.addFilter(ExcludedLoggersFilter(libraries_info_logfile_excluded_loggers))
logging.root.addHandler(libraries_info_logfile_handler)
def redirect_output_to_logger(self) -> None:
self.stdout = sys.stdout
self.stderr = sys.stderr
# If a decorator for redirecting output were used, a "with" statement could have been used
sys.stdout = StreamToLogger(logger=self.logger, log_level=logging.INFO)
sys.stderr = StreamToLogger(logger=self.logger, log_level=logging.ERROR)
# TODO Check, if earlier (more frequent) writing to output file can be forced (something like flush?)
@staticmethod
@abc.abstractmethod
def get_initial_mean(opt_mode_args: dict) -> np.ndarray:
pass
@staticmethod
def create_optimization(optimization_input: dict):
# TODO Try to annotate the return type - -> RSACMAESOptimization does not work.
# See https://stackoverflow.com/questions/44640479/mypy-annotation-for-classmethod-returning-instance
opt_class = opt_classes[optimization_input["opt_class"]]
opt_mode_args = optimization_input["opt_mode_args"]
opt_class_args = deepcopy(optimization_input["opt_class_args"])
initial_mean: np.ndarray = opt_class.get_initial_mean(opt_mode_args)
opt_class_args["initial_mean"] = initial_mean
opt_class_args["optimization_input"] = optimization_input
return opt_class(**opt_class_args)
def __getstate__(self):
"""
Method modifying pickling behaviour of the class' instance.
See https://docs.python.org/3/library/pickle.html#handling-stateful-objects.
It needs to be overridden by a child class if it defines another unpicklable attributes.
"""
# Copy the object's state from self.__dict__ which contains all instance attributes using the dict.copy()
# method to avoid modifying the original state
state = self.__dict__.copy()
# Remove the unpicklable entries
unpicklable_attributes = ["stdout", "stderr", "logger", "rsa_processes_stdins"]
for attr in unpicklable_attributes:
if attr in state:
del state[attr]
return state
def __setstate__(self, state):
"""
Method modifying unpickling behaviour of the class' instance.
See https://docs.python.org/3/library/pickle.html#handling-stateful-objects.
It needs to be overridden by a child class if it defines another unpicklable attributes.
"""
# Restore pickled instance attributes
self.__dict__.update(state)
# Restore unpicklable attributes
self.configure_logging()
# Redirect output
# TODO Maybe separate redirecting output from unpickling in order to be able to unpickle and use standard output
self.redirect_output_to_logger()
def pickle(self, name: Optional[str] = None) -> None:
pickle_name = "" if name is None else "-" + name
with open(self.output_dir + "/_" + self.__class__.__name__ + pickle_name + ".pkl", "wb") as pickle_file:
pickle.dump(self, pickle_file)
# TODO Annotate it correctly
@classmethod
def unpickle(cls, signature: str):
with open(_output_dir + "/" + signature + "/_" + cls.__name__ + ".pkl", "rb") as pickle_file:
return pickle.load(pickle_file)
# Unpickling works outside this module provided that the class of pickled object is imported, e.g.:
# "from optrsa import FixedRadiiXYPolydiskRSACMAESOpt".
@classmethod
def set_optimization_class_attributes(cls, signature: Optional[str] = None,
optimization_input: Optional[dict] = None):
if optimization_input is None:
# Get optimization input data from optimization directory
with open(_output_dir + "/" + signature + "/optimization-input.yaml", "r") as opt_input_file:
optimization_input = yaml.load(opt_input_file)
# Set optimization class attibutes (class attributes with suffix "_optclattr") using optimization input data
# Based on https://www.geeksforgeeks.org/how-to-get-a-list-of-class-attributes-in-python/
suffix = "_optclattr"
# Get pairs (member, value) for current optimization class
for member in inspect.getmembers(cls):
if member[0].endswith(suffix) and not (member[0].startswith("_") or inspect.ismethod(member[1])):
attr_name = member[0][:-len(suffix)]
setattr(cls, member[0], optimization_input["opt_class_args"][attr_name])
@classmethod
@abc.abstractmethod
def arg_to_particle_attributes(cls, arg: np.ndarray) -> str:
"""Function returning rsa3d program's parameter particleAttributes based on arg"""
return ""
@classmethod
def arg_to_particle_parameters(cls, arg: np.ndarray) -> np.ndarray:
"""Function returning particle's parameters based on arg"""
return arg
@classmethod
def arg_in_domain(cls, arg: np.ndarray) -> bool:
"""Function checking if arg belongs to the optimization domain"""
return True
@classmethod
def swap_arg(cls, arg: np.ndarray) -> np.ndarray:
"""
Function swapping arg to another, for which the objective function value is the same. In some cases it may be
useful to do it in order to manage plateaus. By default, it does not change the argument.
"""
return arg
@classmethod
@abc.abstractmethod
def stddevs_to_particle_stddevs(cls, arg: np.ndarray, stddevs: np.ndarray, covariance_matrix: np.ndarray) \
-> np.ndarray:
"""
Function returning particle's parameters' standard deviations based on standard deviations (and possibly mean
coordinates) in optimization's space
"""
# TODO Check, how standard deviations should be transformed
optimization_sample = np.random.default_rng().multivariate_normal(mean=arg,
cov=covariance_matrix,
size=cls.stddevs_sample_size_optclattr)
particle_parameters_sample = np.apply_along_axis(func1d=cls.arg_to_particle_parameters,
axis=1,
arr=optimization_sample)
particle_parameters_stddevs = np.std(particle_parameters_sample, axis=0, dtype=np.float64)
return particle_parameters_stddevs
@classmethod
@abc.abstractmethod
def draw_particle(cls, particle_attributes: str, scaling_factor: float, color: str,
arg: Optional[np.ndarray] = None, std_devs: Optional[np.ndarray] = None,
covariance_matrix: Optional[np.ndarray] = None, part_std_devs: Optional[np.ndarray] = None) \
-> matplotlib.offsetbox.DrawingArea:
"""
Abstract class method drawing particle described by `particle_attributes` string attribute on
matplotlib.offsetbox.DrawingArea and returning DrawingArea object.
:param particle_attributes: Particle's particleAttributes rsa3d program's parameter string
:param scaling_factor: Factor for scaling objects drawn on matplotlib.offsetbox.DrawingArea
:param color: Particle's color specified by matplotlib's color string
:param arg: Argument point describing the particle - may be given if it is needed to draw the particle with full
information
:param std_devs: Standard deviations of the probability distribution - may be given if it is needed to draw the
particle corresponding to the mean of the probability distribution
:param covariance_matrix: Covariance matrix of the probability distribution - may be given if it is needed to
draw the particle corresponding to the mean of the probability distribution
:param part_std_devs: Particle attributes' standard deviations - they may be given in order to show them on the
drawing of the particle corresponding to the mean of the probability distribution
:return: matplotlib.offsetbox.DrawingArea object with drawn particle
"""
pass
def omp_threads_number(self, simulation_number: int, parallel_simulations_number: int,
parallel_threads_number: int) -> int:
"""
Method calculating number of OpenMP threads to assign to rsa3d program's process.
:param simulation_number: Number (index) of simulation from range [0, parallel_simulations_number - 1]
:param parallel_simulations_number: Number of parallel running simulations
:param parallel_threads_number: Overall available number of threads to assign to simulations
:return: Number of OpenMP threads to assign to rsa3d program's process
"""
if parallel_threads_number < parallel_simulations_number:
self.logger.warning(msg="Assigned threads number {} is lower than"
" parallelized simulations number {}".format(parallel_threads_number,
parallel_simulations_number))
if not self.parallel or parallel_threads_number <= parallel_simulations_number:
return 1
omp_threads = self.parallel_threads_number // parallel_simulations_number
if simulation_number >= parallel_simulations_number * (omp_threads + 1) - self.parallel_threads_number:
omp_threads += 1
return omp_threads
# TODO Maybe rename this function (to rsa_simulation_serial) and make the second (rsa_simulation_parallel),
# and in current function use nested "with" statements for closing process and file objects after waiting for
# the process and return nothing.
# Maybe create other functions for reading mean packing fractions from packing-fraction-vs-params.txt file for both
# serial and parallel computing cases.
def get_rsa_simulation_process(self, arg: np.ndarray) -> Tuple[io.TextIOBase, subprocess.Popen]:
"""
Function running simulations for particle shape specified by arg and returning rsa3d output file
and rsa3d process
"""
# Run a process with rsa3d program running simulation
print("Argument: {}".format(arg))
rsa_proc_arguments = self.rsa_proc_arguments[:] # Copy the values of the template arguments
particle_attributes = self.arg_to_particle_attributes(arg)
print("particleAttributes: {}".format(particle_attributes))
rsa_proc_arguments.insert(self.rsa_proc_args_last_param_index, "-particleAttributes=" + particle_attributes)
# TODO Maybe add setting ompThreads option in case of parallel computation (otherwise specify it explicitly),
# maybe calculate ompThreads based on self.CMAES.popsize and number of CPU cores obtained using
# multiprocessing module - DONE in rsa_simulation method
simulation_labels = ",".join([str(self.CMAES.countiter), str(self.candidate_num), str(self.simulations_num),
" ".join(map(str, arg))])
rsa_proc_arguments.append(simulation_labels)
self.simulations_num += 1
# TODO In case of reevaluation (UH-CMA-ES), simulation_labels will have to identify the evaluation correctly
# (self.simulations_num should be fine to ensure distinction)
# Create subdirectory for output of rsa3d program in this simulation.
# simulation_labels contain generation number, candidate number and evaluation number.
simulation_output_dir = self.rsa_output_dir + "/" + "_".join(simulation_labels.split(",")[:3])
if not os.path.exists(simulation_output_dir):
os.makedirs(simulation_output_dir)
# Maybe use shutil instead
# Create a file for saving the output of rsa3d program
# rsa_output_filename = simulation_output_dir + "/packing_" + particle_attributes.replace(" ", "_")
# # surfaceVolume parameter is not appended to filename if it is given in input file
# if not self.input_given:
# rsa_output_filename += "_" + str(self.all_rsa_parameters["surfaceVolume"])
# rsa_output_filename += "_output.txt"
rsa_output_filename = simulation_output_dir + "/rsa-simulation-output.txt"
# # Serial computing case
# with open(rsa_output_filename, "w+") as rsa_output_file:
# # Open a process with simulation
# with subprocess.Popen(rsa_proc_arguments, stdout=rsa_output_file, stderr=rsa_output_file,
# cwd=self.rsa_output_dir) as rsa_process:
# rsa_process.wait()
rsa_output_file = open(rsa_output_filename, "w+")
# TODO To be removed - for debugging
print(" ".join(rsa_proc_arguments))
rsa_process = subprocess.Popen(rsa_proc_arguments,
stdout=rsa_output_file,
stderr=rsa_output_file,
cwd=simulation_output_dir)
return rsa_output_file, rsa_process
def evaluate_generation_serial(self, pheno_candidates: List[np.ndarray]) -> List[float]:
self.candidate_num = 0
values = []
for pheno_candidate in pheno_candidates:
print("\nGeneration no. {}, candidate no. {}".format(self.CMAES.countiter, self.candidate_num))
# Run simulation
rsa_output_file, rsa_process = self.get_rsa_simulation_process(pheno_candidate)
rsa_process.wait()
rsa_output_file.close()
# TODO Check, if in this solution (without "with" statement) process ends properly
# and if this solution is safe
# Previous, not working solution:
# with self.rsa_simulation(pheno_candidate) as (rsa_output_file, rsa_process):
# rsa_process.wait()
# Get the packing fraction from the file
# See https://stackoverflow.com/questions/3346430/what-is-the-most-efficient-way-to-get-first
# -and-last-line-of-a-text-file
with open(self.output_filename, "rb") as rsa_output_file:
# In serial computing reading the last line is sufficient. In parallel computing the right line
# will have to be found
rsa_output_file.seek(0, os.SEEK_END)
while rsa_output_file.read(1) != b"\n":
if rsa_output_file.tell() > 1:
rsa_output_file.seek(-2, os.SEEK_CUR)
else:
# Beginning of the file
break
last_line = rsa_output_file.readline().decode()
# Mean packing fraction is written as the second value, values are separated by tabulators
mean_packing_fraction = float(last_line.split("\t")[1])
print("Mean packing fraction: {}".format(mean_packing_fraction))
values.append(-mean_packing_fraction)
self.candidate_num += 1
return values
# TODO Optimize parallel computing so that all of the time the right (optimal) number of threads is running.
# Maybe use multiprocessing.cpu_count() value and multiprocessing.Pool (the latter - if it makes sense in this
# case)
def evaluate_generation_parallel(self, pheno_candidates: List[np.ndarray]) -> List[float]:
self.candidate_num = 0
values = np.empty(len(pheno_candidates), dtype=np.float)
rsa_processes_and_outputs = []
# TODO Maybe implement repeating these loops if population size is bigger than for example
# (number of CPU cores) * 2.
for pheno_candidate in pheno_candidates:
rsa_processes_and_outputs.append(self.get_rsa_simulation_process(pheno_candidate))
self.candidate_num += 1
# TODO Check if using EvalParallel from cma package or multiprocessing (e.g. Pool) together with Popen
# makes sense
for rsa_output_file, rsa_process in rsa_processes_and_outputs:
rsa_process.wait()
rsa_output_file.close()
print("Generation no. {}, end of candidate no. {} evaluation".format(self.CMAES.countiter,
self.candidate_num))
with open(self.output_filename, "rb") as rsa_output_file:
# TODO Maybe find more efficient or elegant solution
# TODO Maybe iterate through lines in file in reversed order - results of the current generation should be
# at the end
for line in rsa_output_file:
# Does the line need to be decoded (line_str = line.decode())?
evaluation_data = line.split(b"\t")
evaluation_labels = evaluation_data[0].split(b",")
if int(evaluation_labels[0]) == self.CMAES.countiter:
candidate_num = int(evaluation_labels[1])
mean_packing_fraction = float(evaluation_data[1])
values[candidate_num] = -mean_packing_fraction
return values.tolist() # or list(values), because values.ndim == 1
def rsa_simulation(self, candidate_num: int, arg: np.ndarray,
particle_attributes: Optional[str] = None, omp_threads: Optional[int] = None,
simulation_num: Optional[int] = None) -> int:
"""
Function running simulations for particle shape specified by arg and waiting for rsa3d process.
It assigns proper number of OpenMP threads to the rsa3d evaluation.
:param candidate_num: Identification number of the candidate
:param arg: Phenotype candidate's point
:param particle_attributes: Particle attributes computed for arg - if not given, they are computed
:param omp_threads: Number of OpenMP threads to assign to rsa3d program - if not given, they are computed
:param simulation_num: Simulation number - needs to be passed only when self.okeanos option is True and the
method is called by evaluate_generation_parallel_in_pool method
:return: RSA simulation's return code
"""
try:
sim_start_time = datetime.datetime.now()
# Run a process with rsa3d program running simulation
rsa_proc_arguments = self.rsa_proc_arguments[:] # Copy the values of the template arguments
if particle_attributes is None:
particle_attributes = self.arg_to_particle_attributes(arg)
rsa_proc_arguments.insert(self.rsa_proc_args_last_param_index, "-particleAttributes=" + particle_attributes)
# TODO Maybe move part of this code to constructor
# omp_threads_attribute = str(self.omp_threads)
# if self.parallel and self.parallel_threads_number > self.CMAES.popsize\
# and candidate_num >= self.CMAES.popsize * (self.omp_threads + 1) - self.parallel_threads_number:
# omp_threads_attribute = str(self.omp_threads + 1)
if omp_threads is not None:
omp_threads_attribute = str(omp_threads)
else:
if self.parallel:
if self.okeanos:
omp_threads_attribute = str(self.parallel_threads_number)
else:
omp_threads_attribute = str(self.omp_threads_number(candidate_num, self.pool_workers_number,
self.parallel_threads_number))
else:
omp_threads_attribute = str(self.parallel_threads_number)
rsa_proc_arguments.insert(self.rsa_proc_args_last_param_index, "-ompThreads=" + omp_threads_attribute)
# Maybe use candidate_num instead of simulation_num to label rsa3d processes' stdins
okeanos_node_process = simulation_num is not None
if self.parallel and not okeanos_node_process:
simulation_num = self.simulations_num
simulation_labels = ",".join([str(self.CMAES.countiter), str(candidate_num),
str(simulation_num if self.parallel else self.simulations_num),
" ".join(map(str, arg))])
# Earlier: str(self.simulations_num), str(self.CMAES.countevals), str(self.CMAES.countiter),
# str(candidate_num) - self.CMAES.countevals value is updated of course only after the end of each
# generation. self.simulations_num is the number ordering the beginning of simulation, the position of data
# in packing-fraction-vs-params.txt corresponds to ordering of the end of simulation, and from generation
# number (self.CMAES.countiter), population size and candidate number one can calculate the number of
# evaluation mentioned in self.CMAES optimizer e.g. in the result (number of evaluation for the best
# solution)
rsa_proc_arguments.append(simulation_labels)
if not (self.okeanos and okeanos_node_process):
# If this method was run by MPIPoolExecutor worker, original optimization object would not be modified
self.simulations_num += 1
# TODO In case of reevaluation (UH-CMA-ES), simulation_labels will have to identify the evaluation correctly
# (self.simulations_num should be fine to ensure distinction)
# Create subdirectory for output of rsa3d program in this simulation.
# simulation_labels contain generation number, candidate number and evaluation number.
simulation_output_dir = self.rsa_output_dir + "/{:03d}_{:02d}_{:04d}" \
.format(*map(int, simulation_labels.split(",")[:3]))
if not os.path.exists(simulation_output_dir):
os.makedirs(simulation_output_dir)
# Maybe use shutil instead
# Create rsa3d input file containing simulation-specific parameters in simulation output directory
with open(simulation_output_dir + "/rsa-simulation-input.txt", "w+") as rsa_input_file:
rsa_input_file.write("ompThreads = {}\n".format(omp_threads_attribute))
rsa_input_file.write("particleAttributes = {}\n".format(particle_attributes))
# Check the node ID if run on Okeanos
node_message = ""
if self.okeanos:
try:
node_id = subprocess.check_output(["hostname"]).decode().strip()
except subprocess.CalledProcessError as exception:
self.logger.warning(msg="subprocess.CalledProcessError raised when checking host name:"
" command: {}, return code: {}, output: \"{}\".\n{}\n"
"Node ID will not be logged.".format(exception.cmd,
-exception.returncode,
exception.output.decode(),
traceback.format_exc(limit=6).strip()))
node_id = "not checked"
except Exception as exception:
self.logger.warning(msg="Exception raised when checking host name; {}: {}\n"
"{}".format(type(exception).__name__, exception,
traceback.format_exc(limit=6).strip()))
node_id = "not checked"
node_message = "NID: {}, ".format(node_id)
# Create a file for saving the output of rsa3d program
rsa_output_filename = simulation_output_dir + "/rsa-simulation-output.txt"
with open(rsa_output_filename, "w+") as rsa_output_file:
# Open a process with simulation
with subprocess.Popen(rsa_proc_arguments,
stdin=subprocess.PIPE, # Maybe specify it only if self.parallel
stdout=rsa_output_file,
stderr=rsa_output_file,
cwd=simulation_output_dir) as rsa_process:
pid = rsa_process.pid
self.logger.info(msg="RSA simulation start: generation no. {}, candidate no. {}, simulation no. {},"
" {}PID: {}, ompThreads: {}\n"
"Argument: {}\n"
"particleAttributes: {}".format(*simulation_labels.split(",")[:3],
node_message,
pid,
omp_threads_attribute,
pprint.pformat(arg),
particle_attributes))
# For debugging
# self.logger.debug(msg="RSA simulation process call: {}".format(" ".join(rsa_proc_arguments)))
if self.parallel and not self.okeanos:
self.rsa_processes_stdins[simulation_num] = rsa_process.stdin
return_code = rsa_process.wait()
sim_end_time = datetime.datetime.now()
threads_message = ""
if self.parallel and not self.okeanos:
self.remaining_pool_simulations -= 1
del self.rsa_processes_stdins[simulation_num]
if 0 < self.remaining_pool_simulations < self.pool_workers_number:
# Send messages with new numbers of OpenMP threads to rsa3d processes
# TODO Maybe remember numbers of threads and send messages only when numbers need to be changed
threads_message = "\nRemaining parallel simulations: {}." \
" Increasing numbers of OpenMP threads.\n" \
"simulation number: ompThreads".format(self.remaining_pool_simulations)
for rsa_process_num, remaining_sim_num, rsa_process_stdin\
in zip(list(range(len(self.rsa_processes_stdins))),
list(self.rsa_processes_stdins),
list(self.rsa_processes_stdins.values())):
new_omp_threads = self.omp_threads_number(rsa_process_num, self.remaining_pool_simulations,
self.parallel_threads_number)
# Flushing is needed to send the message and the newline is needed for rsa3d program to end
# reading the message
rsa_process_stdin.write("ompThreads:{}\n".format(new_omp_threads).encode())
rsa_process_stdin.flush()
threads_message += "\n{}: {}".format(remaining_sim_num, new_omp_threads)
# Get collectors' number
# rsa_data_file_lines_count = subprocess.check_output(["wc", "-l",
# glob.glob(simulation_output_dir + "/*.dat")[0]])
# On Okeanos in the first generation this command used to fail
try:
rsa_data_file_lines_count = subprocess.check_output(["wc", "-l",
glob.glob(simulation_output_dir + "/*.dat")[0]])
collectors_num_message = str(int(rsa_data_file_lines_count.strip().split()[0]))
except subprocess.CalledProcessError as exception:
self.logger.warning(msg="subprocess.CalledProcessError raised when checking collectors number:"
" command: {}, return code: {},"
" output: \"{}\".\n{}".format(exception.cmd,
-exception.returncode,
exception.output.decode(),
traceback.format_exc(limit=6).strip()))
try:
rsa_data_file_lines_count = subprocess.check_output(["wc", "-l",
glob.glob(simulation_output_dir
+ "/*.dat")[0]],
shell=True)
collectors_num_message = str(int(rsa_data_file_lines_count.strip().split()[0]))
except subprocess.CalledProcessError as exception:
self.logger.warning(msg="subprocess.CalledProcessError raised when checking collectors number with"
" \"shell\" option set to True: command: {}, return code: {},"
" output: \"{}\".\n{}\nCollectors number"
" will not be logged.".format(exception.cmd,
-exception.returncode,
exception.output.decode(),
traceback.format_exc(limit=6).strip()))
collectors_num_message = "not checked"
except Exception as exception:
self.logger.warning(msg="Exception raised when checking collectors number; {}: {}\n{}\nCollectors"
" number will not be logged.".format(type(exception).__name__, exception,
traceback.format_exc(limit=6).strip()))
collectors_num_message = "not checked"
except Exception as exception:
self.logger.warning(msg="Exception raised when checking collectors number; {}: {}\n{}\nCollectors"
" number will not be logged.".format(type(exception).__name__, exception,
traceback.format_exc(limit=6).strip()))
collectors_num_message = "not checked"
self.logger.info(msg="RSA simulation end: generation no. {}, candidate no. {}, simulation no. {},"
" {}PID: {}. Time: {}, collectors: {}, return code: {}"
"{}".format(*simulation_labels.split(",")[:3],
node_message,
pid,
str(sim_end_time - sim_start_time),
collectors_num_message,
str(return_code),
threads_message))
# self.logger.debug(msg="remaining_pool_simulations: {}, rsa_processes_stdins number: {}".format(
# self.remaining_pool_simulations, len(self.rsa_processes_stdins)))
# self.logger.debug(msg="rsa_processes_stdins:\n{}".format(self.rsa_processes_stdins))
# TODO Maybe check also for subprocess.CalledProcessError and set return code appropriately
except Exception as exception:
self.logger.warning(msg="Exception raised in rsa_simulation method for generation no. {}, candidate no. {},"
" argument: {}; {}: {}\n"
"{}\n"
"Candidate will be resampled.".format(str(self.CMAES.countiter), str(candidate_num),
pprint.pformat(arg),
type(exception).__name__, exception,
traceback.format_exc(limit=6).strip()))
return_code = -1
return return_code
# def safe_rsa_simulation_wrapper(self, rsa_simulation_method: Callable[..., int]) -> Callable[..., int]:
# def safe_rsa_simulation(custom_self, candidate_num: int, arg: np.ndarray,
# particle_attributes: Optional[str] = None, omp_threads: Optional[int] = None,
# simulation_num: Optional[int] = None) -> int:
# rsa_simulation_method.__doc__
#
# try:
# return_code = rsa_simulation_method(candidate_num, arg, particle_attributes,
# omp_threads, simulation_num)
# # TODO Maybe check also for subprocess.CalledProcessError and set return code appropriately
# except Exception as exception:
# self.logger.warning(msg="Exception raised in rsa_simulation method for generation no. {},"
# " candidate no. {}, simulation no. {}; {}: {}\n{}\n"
# "Candidate will be resampled.".format(self.CMAES.countiter, candidate_num,
# simulation_num,
# type(exception).__name__, exception,
# traceback.format_exc(limit=6).strip()))
# return_code = -1
# return return_code
# return safe_rsa_simulation
# TODO Maybe find a way to pass arguments to optrsa program's process in runtime to change the overall number of
# used threads from the next generation on
def evaluate_generation_parallel_in_pool(self, pheno_candidates: List[np.ndarray],
cand_particle_attributes: Optional[List[str]] = None)\
-> Tuple[List[float], List[int]]:
"""
Method running rsa simulations for all phenotype candidates in generation.
It evaluates self.run_simulation method for proper number of candidates in parallel.
It uses multiprocessing.pool.ThreadPool for managing a pool of workers.
concurrent.futures.ThreadPoolExecutor is an alternative with poorer API.
:param pheno_candidates: List of NumPy ndarrays containing phenotype candidates in generation
:param cand_particle_attributes: List of candidates' particleAttributes parameters (optional)
:return: 2-tuple with list of fitness function values (minus mean packing fraction) for respective phenotype
candidates and list of return codes of RSA simulations for respective phenotype candidates. If the
RSA simulation for a candidate failed or was terminated, the respective candidates' value is np.NaN.
"""
# values = np.zeros(len(pheno_candidates), dtype=np.float)
values = np.full(shape=len(pheno_candidates), fill_value=np.NaN, dtype=np.float)
# TODO Maybe define these attributes in constructor
self.pool_workers_number = self.parallel_simulations_number # Maybe it will be passed as an argument
if not self.okeanos:
self.remaining_pool_simulations = len(pheno_candidates)
self.rsa_processes_stdins = {}
# cand_sim_omp_threads = [self.omp_threads_number(sim_num, self.pool_workers_number,
# self.parallel_threads_number)
# for sim_num in range(len(pheno_candidates))]
# It is said that multiprocessing module does not work with class instance method calls,
# but in this case multiprocessing.pool.ThreadPool seems to work fine with the run_simulation method.
with multiprocessing.pool.ThreadPool(processes=self.pool_workers_number) as pool:
simulations_arguments = list(enumerate(pheno_candidates)) if cand_particle_attributes is None\
else list(zip(list(range(len(pheno_candidates))), pheno_candidates, cand_particle_attributes))
return_codes = pool.starmap(self.rsa_simulation, simulations_arguments)
# Maybe read the last packing fraction value after waiting for simulation process in
# rsa_simulation method and check if the simulation labels are correct (which means that rsa3d program
# successfully wrote the last line)
# TODO Maybe add (it works - probably it doesn't need closing the pool):
# pool.close()
# pool.join()
# TODO Consider returning status from Popen objects (if it is possible) and getting them from pool.map and
# checking, if rsa simulations finished correctly
else:
with MPIPoolExecutor(max_workers=self.pool_workers_number) as pool:
candidate_nums = list(range(len(pheno_candidates)))
none_list = [None] * len(pheno_candidates)
simulation_nums = list(range(self.simulations_num, self.simulations_num + len(pheno_candidates)))
simulations_arguments = list(zip(candidate_nums, pheno_candidates, none_list,
none_list, simulation_nums)) if cand_particle_attributes is None\
else list(zip(candidate_nums, pheno_candidates, cand_particle_attributes,
none_list, simulation_nums))
return_codes_iterator = pool.starmap(self.rsa_simulation, simulations_arguments)
return_codes = list(return_codes_iterator)
self.simulations_num += len(pheno_candidates)
with open(self.output_filename, "r") as rsa_output_file:
# TODO Maybe find more efficient or elegant solution
# TODO Maybe iterate through lines in file in reversed order - results of the current generation should be
# at the end
for line in rsa_output_file:
evaluation_data = line.split("\t")
evaluation_labels = evaluation_data[0].split(",")
if int(evaluation_labels[0]) == self.CMAES.countiter:
candidate_num = int(evaluation_labels[1])
mean_packing_fraction = float(evaluation_data[1])
values[candidate_num] = -mean_packing_fraction
# TODO Add checking if there exists a zero value in values list and deal with the error (in such a case
# record for corresponding candidate wasn't found in packing-fraction-vs-params.txt file)
# return values.tolist() # or list(values), because values.ndim == 1
return list(values), return_codes
def default_rsa_simulation(self, candidate_num: int, simulation_num: int, first_collector_num: int,
rsa_proc_arguments: List[str], task_submitting_time: datetime.datetime,
collectors_num: Optional[int] = 1, first_part_sim: Optional[bool] = False) \
-> Tuple[int, int, int, int, int, str, int, datetime.datetime, str, np.ndarray]:
"""
Function running collectors_num simulations using rsa_proc_arguments and waiting for rsa3d process. Meant to be
called by run_simulations_on_okeanos method.
:param candidate_num: Identification number of the candidate
:param simulation_num: Identification number of the simulation
:param first_collector_num: Identification number of the first collector
:param rsa_proc_arguments: rsa3d process' arguments, which are updated with from and collectors parameters
:param task_submitting_time: datetime.datetime object representing the time of submitting the partial simulation
task to the pool of workers in run_simulations_on_okeanos method. Used in a logging
purpose.
:param collectors_num: Number of collectors to generate (optional), defaults to 1
:param first_part_sim: bool value telling if the partial simulation is the first in entire RSA simulation for
the phenotype candidate. Used in a logging purpose.
:return: DefaultRSASimulationResult namedtuple object containing result of the partial simulation
"""
sim_start_time = datetime.datetime.now()
return_code: int = 0
particles_numbers: list = []
pid: int = -1
node_message: str = ""
time: str = ""
if self.okeanos_parallel:
try:
node_id = subprocess.check_output(["hostname"]).decode().strip()
except subprocess.CalledProcessError as exception:
self.logger.warning(msg="subprocess.CalledProcessError raised when checking host name:"
" command: {}, return code: {}, output: \"{}\".\n{}\n"
"Node ID will not be logged.".format(exception.cmd,
-exception.returncode,
exception.output.decode(),
traceback.format_exc(limit=6).strip()))
node_id = "not checked"
except Exception as exception:
self.logger.warning(msg="Exception raised when checking host name; {}: {}\n"
"{}".format(type(exception).__name__, exception,
traceback.format_exc(limit=6).strip()))
node_id = "not checked"
node_message = "NID: {}, ".format(node_id)
try:
if first_part_sim: # first_collector_num == 0
self.logger.info(msg="\nRSA simulation start: generation number {}, candidate number {},"
" simulation number {}{}\n".format(self.CMAES.countiter,
candidate_num,
simulation_num,
", " + node_message[:-2]))
# Prepare RSA process arguments
rsa_proc_arguments = rsa_proc_arguments[:] # Maybe it is not needed
rsa_proc_arguments.extend(["-from=" + str(first_collector_num), "-collectors=" + str(collectors_num)])
# appendToDat parameter is not added by "-appendToDat=true", because appending is now a default operation
# Create a file for saving the output of rsa3d program
simulation_output_dir = self.rsa_output_dir + "/{:03d}_{:02d}_{:04d}".format(self.CMAES.countiter,
candidate_num,
simulation_num)
collectors_filename_info = "collector"
if collectors_num == 1:
collectors_filename_info += "-" + str(first_collector_num)
else:
collectors_filename_info += "s-{}-{}".format(first_collector_num, first_collector_num + collectors_num)
rsa_output_filename = simulation_output_dir + "/rsa-{}-output.txt".format(collectors_filename_info)
with open(rsa_output_filename, "w+") as rsa_output_file:
# Open a process with simulation
with subprocess.Popen(rsa_proc_arguments,
# stdin=subprocess.PIPE,
stdout=rsa_output_file,
stderr=rsa_output_file,
cwd=simulation_output_dir) as rsa_process:
pid = rsa_process.pid
self.logger.info(msg="RSA part. sim. start: gen. no. {}, cand. no. {}, sim. no. {},"
" first col. no.: {}, collectors: {}."
" {}PID: {}, task beg. delay: {}".format(self.CMAES.countiter,
candidate_num,
simulation_num,
first_collector_num,
collectors_num,
node_message,
pid,
sim_start_time
- task_submitting_time))
self.logger.debug(msg="RSA process arguments: {}".format(" ".join(rsa_proc_arguments)))
# For debugging
# self.logger.debug(msg="RSA simulation process call: {}".format(" ".join(rsa_proc_arguments)))
return_code = rsa_process.wait()
sim_end_time = datetime.datetime.now()
time = str(sim_end_time - sim_start_time)
if return_code != 0:
self.logger.warning(msg="RSA partial simulation for generation no. {}, candidate no. {},"
" simulation no. {}, first collector number: {}, collectors number: {}"
" returned code: {}. {}PID: {}, time: {}.\nCollectors data"
" will be ignored.".format(self.CMAES.countiter,
candidate_num,
simulation_num,
first_collector_num,
collectors_num,
return_code,
node_message,
pid,
time))
# Read RSA data file and get collectors' particle numbers
rsa_data_file_glob = glob.glob(simulation_output_dir + "/*.dat")
if len(rsa_data_file_glob) == 0:
raise FileNotFoundError("RSA data file not found in the simulation directory")
with FileReadBackwards(rsa_data_file_glob[0]) as rsa_data_file:
# Get lines one by one starting from the last line up
for line in rsa_data_file:
collector_data = line.split("\t")
collector_num = int(collector_data[0])
if first_collector_num <= collector_num < first_collector_num + collectors_num:
particles_numbers.insert(0, int(collector_data[1]))
if collector_num == first_collector_num:
break
self.logger.debug(msg="RSA partial simulation end: generation no. {}, candidate no. {}, simulation no. {},"
" first collector number: {}, collectors number: {},"
" {}PID: {}. Time: {}, return code: {}\n"
"Read collectors' particles numbers: {}".format(self.CMAES.countiter,
candidate_num,
simulation_num,
first_collector_num,
collectors_num,
node_message,
pid,
time,
return_code,
", ".join(map(str,
particles_numbers))))
except Exception as exception:
self.logger.warning(msg="Exception raised in default_rsa_simulation method for generation no. {},"
" candidate no. {}, simulation no. {}, first collector number: {},"
" collectors number: {}{}; {}: {}\n{}\nCollectors data"
" will be ignored.".format(self.CMAES.countiter, candidate_num, simulation_num,
first_collector_num, collectors_num, node_message,
type(exception).__name__, exception,
traceback.format_exc(limit=6).strip()))
return_code = -1 if return_code == 0 else return_code
result = DefaultRSASimulationResult(candidate_num=candidate_num,
simulation_num=simulation_num,
first_collector_num=first_collector_num,
collectors_num=collectors_num,
return_code=return_code,
node_message=node_message,
pid=pid,
start_time=sim_start_time,
time=time,
particles_numbers=np.array(particles_numbers))
# return candidate_num, simulation_num, first_collector_num, collectors_num, return_code, node_message, pid, \
# sim_start_time, time, np.array(particles_numbers)
return result
def run_simulations_on_okeanos(self, pheno_candidates: List[np.ndarray],
cand_particle_attributes: Optional[List[str]] = None,
candidates_numbers: Optional[List[int]] = None) \
-> Tuple[List[float], List[int]]:
"""
Method running rsa simulations for phenotype candidates given in pheno_candidates parameter in parallel using
self.nodes_number - 1 Okeanos nodes as workers. Each node evaluates self.collectors_per_task collectors at once
during a partial simulation using self.default_rsa_simulation method. Worker nodes number can be bigger than,
equal to or smaller than the candidates number. Each simulation ends if mean packing fraction standard deviation
is equal to or smaller than self.accuracy, unless collectors number is smaller than self.min_collectors_number.
The method uses mpi4py.futures.MPIPoolExecutor to submit tasks (partial simulations) to processes on worker
nodes. After submitting initial tasks, results are managed and subsequent tasks are submitted using manage_tasks
function as concurrent.futures.Future objects' callbacks. Since mpi4py.futures.MPIPoolExecutor is used, to
guarantee that worker processes will be spawned on different nodes, SLURM job has to specify number of nodes and
set ntasks-per-node option to 1. Then optrsa module has to be run using srun python -m mpi4py.futures -m optrsa
(optrsa arguments).
:param pheno_candidates: List of NumPy ndarrays containing phenotype candidates
:param cand_particle_attributes: List of candidates' particleAttributes parameters (optional). If not given,
they will be calculated in parallel. It has to be of the same length as
pheno_candidates.
:param candidates_numbers: List of candidates numbers (optional). If not given, they will be set to the indices
of the pheno_candidates list. This argument can be used to specify candidates'
numbers when repeating RSA simulations for several candidates in generation, it has
to be of the same length as pheno_candidates.
:return: 2-tuple with list of fitness function values (minus mean packing fraction) for phenotype candidates and
list of return codes of RSA simulations for phenotype candidates. If RSA simulation for a candidate
failed or was terminated, the candidate's value is np.NaN.
"""
rsa_proc_arguments = self.rsa_proc_arguments[:] # Copy the values of the template arguments
simulations_num = len(pheno_candidates)
if cand_particle_attributes is None:
# Compute candidates' particleAttributes parameters in parallel
with multiprocessing.pool.ThreadPool(processes=min(simulations_num, os.cpu_count())) as pool:
cand_particle_attributes = pool.map(self.arg_to_particle_attributes, pheno_candidates)
if candidates_numbers is None:
candidates_numbers = list(range(simulations_num))
simulations_numbers = list(range(self.simulations_num, self.simulations_num + simulations_num))
# TODO Maybe move it to the set_rsa_proc_arguments method
rsa_proc_arguments.append("-ompThreads=" + str(self.parallel_threads_number))
# For each simulation create a subdirectory for output of rsa3d program and rsa3d input file
for candidate_num, simulation_num, particle_attributes in zip(candidates_numbers, simulations_numbers,
cand_particle_attributes):
simulation_output_dir = self.rsa_output_dir + "/{:03d}_{:02d}_{:04d}".format(self.CMAES.countiter,
candidate_num,
simulation_num)
if not os.path.exists(simulation_output_dir):
os.makedirs(simulation_output_dir)
# Create rsa3d input file containing simulation-specific parameters in the simulation output directory
with open(simulation_output_dir + "/rsa-simulation-input.txt", "w") as rsa_input_file:
rsa_input_file.write("ompThreads = {}\n".format(str(self.parallel_threads_number)))
rsa_input_file.write("particleAttributes = {}\n".format(particle_attributes))
values = np.full(shape=simulations_num, fill_value=np.NaN, dtype=np.float)
return_codes = np.full(shape=simulations_num, fill_value=-1, dtype=np.int)
self.pool_workers_number = self.nodes_number - 1 # Maybe it will be passed as an argument
self.logger.info(msg="Generation no. {}, running {} simulations"
" using {} nodes".format(self.CMAES.countiter, simulations_num, self.pool_workers_number))
for candidate_num, simulation_num, arg, particle_attributes in zip(candidates_numbers, simulations_numbers,
pheno_candidates, cand_particle_attributes):
self.logger.info(msg="Candidate no. {}, simulation no. {}".format(candidate_num, simulation_num))
self.logger.info(msg="Argument: {}".format(arg)) # pprint.pformat(arg)
self.logger.info(msg="particleAttributes: {}".format(particle_attributes))
def simulation_nodes_number(simulation_number: int, parallel_simulations_number: int) -> int:
"""
Method calculating number of nodes to assign to RSA simulation
:param simulation_number: Number (index) of simulation from range [0, parallel_simulations_number - 1]
:param parallel_simulations_number: Number of parallel running simulations
:return: Number of nodes to assign to RSA simulation
"""
sim_nodes_number = self.pool_workers_number // parallel_simulations_number
if simulation_number < self.pool_workers_number - parallel_simulations_number * sim_nodes_number:
sim_nodes_number += 1
return sim_nodes_number
# TODO Maybe add adjusting collectors_per_task value
# simulations_nodes_numbers = np.ndarray([simulation_nodes_number(sim_num, simulations_num)
# for sim_num in range(simulations_num)])
# active_simulations_indices = np.arange(simulations_num)
# ending_sim_lock = multiprocessing.Lock()
ending_sim_lock = threading.Lock()
class OkeanosSimulation:
collectors_ids: np.ndarray # list of used collectors' numbers
particles_nums: np.ndarray # list of the corresponding particles' numbers
packing_frac: float # mean packing fraction
standard_dev: float # mean packing fraction standard deviation
next_collector: int # the collector index for the next task to be submitted (first free index not
# corresponding to a submitted task)
# data_lock: multiprocessing.Lock
data_lock: threading.Lock
start_time: Union[None, datetime.datetime]
pending_part_sims: int
nodes_number: int
active: bool
tasks: list
def __init__(self):
self.collectors_ids = np.empty(shape=0, dtype=np.int)
self.particles_nums = np.empty(shape=0, dtype=np.int)
self.packing_frac = 0
self.standard_dev = 0
self.next_collector = 0
# self.data_lock = multiprocessing.Lock()
self.data_lock = threading.Lock()
self.start_time = None
self.pending_part_sims = 0
self.nodes_number = 0
self.active = True
self.tasks = []
sims_data = [OkeanosSimulation() for _ in range(simulations_num)]
packing_area = float(self.rsa_parameters["surfaceVolume"]) if self.input_given \
else float(self.all_rsa_parameters["surfaceVolume"])
# self.accuracy
pending_simulations = simulations_num
# evaluation_finished = multiprocessing.Event()
# After the end of evaluation program was still waiting when using multiprocessing.Event()
# TODO Maybe try using threading.Event() instead, maybe try using threading (or multiprocessing) Condition
normal_evaluation = True
# Solution inspired by https://stackoverflow.com/questions/51879070/python-executor-spawn-tasks-from-done
# -callback-recursively-submit-tasks
with MPIPoolExecutor(max_workers=self.pool_workers_number) as pool:
def manage_tasks(future: Future) -> None:
try:
nonlocal pending_simulations, normal_evaluation # , sims_data
if not normal_evaluation:
return
if future.cancelled():
for sim in sims_data:
if future in sim.tasks:
sim.pending_part_sims -= 1
sim.tasks.remove(future)
break
# if pending_simulations == 0: # Maybe it is not needed
# evaluation_finished.set()
return
# try:
# (candidate_num, first_collector_num, collectors_num, return_code,
# particles_numbers) = future.result()
# except Exception as exception:
# # TODO Maybe change it in order to avoid infinite recursion
# self.logger.warning(msg="Exception raised while getting the future's result in manage_tasks"
# " function {}: {}\n{}\nThe task will be"
# " submitted again.".format(type(exception).__name__, exception,
# traceback.format_exc(limit=6).strip()))
# pool.submit(self.default_rsa_simulation, ) # We probably don't know the task's parameters
res: DefaultRSASimulationResult = future.result() # During a normal operation,
# default_rsa_simulation method should catch every exception
# (candidate_num, simulation_num, first_collector_num, collectors_num, return_code, node_message,
# pid, start_time, time, particles_numbers) = future.result()
# TODO Maybe store future objects in sim.tasks together with the call arguments
# - default_rsa_simulation method would not have to return them. Then find simulation owning the
# future:
# for sim in sims_data:
# if future in sim.tasks:
# sim.tasks.index(future)
# break
simulation_index = res.simulation_num - self.simulations_num
sim = sims_data[simulation_index]
# if sim.start_time is None: # res.first_collector_num == 0
# sim.start_time = res.start_time
# One does not need to return res.start_time now
sim.pending_part_sims -= 1
sim.tasks.remove(future)
data_lock_message = ""
if not sim.data_lock.acquire(blocking=False): # if not sim.data_lock.acquire(block=False):
self.logger.info(msg="Waiting for the data lock. {}".format(res.node_message[:-2]))
lock_pending_start = datetime.datetime.now()
sim.data_lock.acquire()
lock_pending_end = datetime.datetime.now()
data_lock_message = "Waiting for lock acquiring time: {}. ".format(lock_pending_end
- lock_pending_start)
# time.sleep(2) # Testing the data lock
# Apparently locks aren't necessary, because callbacks are most probably executed sequentially
# Locks would be necessary if the future's callback function would asynchronously spawn another
# thread that would call manage_tasks function
if not sim.active:
# TODO Maybe don't ignore this results
self.logger.info(msg="RSA part. sim. end: gen. no. {}, cand. no. {}, sim. no. {},"
" first col. no.: {}, collectors: {}, {}PID: {}, time: {}, ret. code: {}."
" {}Pend. part. sims.: {}/{}. Simulation is not active,"
" result will be ignored".format(self.CMAES.countiter,
res.candidate_num,
res.simulation_num,
res.first_collector_num,
res.collectors_num,
res.node_message,
res.pid,
res.time,
res.return_code,
data_lock_message,
sim.pending_part_sims,
sim.nodes_number))
sim.data_lock.release()
# if pending_simulations == 0:
# evaluation_finished.set()
return
if res.return_code != 0:
# TODO Maybe change it in order to avoid infinite recursion
# Warning was logged in the default_rsa_simulation method
# Submit a new task
sim.data_lock.release()
submit_task(simulation_index)
return
# Calculate new mean packing fraction and standard deviation and submit a new task, if the standard
# deviation is bigger than the accuracy, or end the simulation if it is smaller or equal to accuracy
# TODO If manage_tasks will be run in a separate thread, check if locking works and sims_data cannot
# be changed during calculations
prev_collectors_number = sim.particles_nums.size
cur_collectors_number = prev_collectors_number + res.particles_numbers.size
sim.packing_frac = (sim.packing_frac * prev_collectors_number + np.sum(res.particles_numbers)
/ packing_area) / cur_collectors_number
collectors_indices = np.arange(res.first_collector_num,
res.first_collector_num + res.collectors_num)
sim.collectors_ids = np.concatenate((sim.collectors_ids, collectors_indices))
sim.particles_nums = np.concatenate((sim.particles_nums, res.particles_numbers))
# TODO Implement calculating running mean standard deviation
if cur_collectors_number > 1:
sim.standard_dev = np.sqrt(np.sum(np.power(sim.particles_nums / packing_area - sim.packing_frac,
2))
/ (cur_collectors_number - 1) / cur_collectors_number)
if sim.standard_dev > self.accuracy or cur_collectors_number < self.min_collectors_number:
self.logger.info(msg="RSA part. sim. end: gen. no. {}, cand. no. {}, sim. no. {},"
" first col. no.: {}, collectors: {}. {}Current pack. frac.: {},"
" cur. std. dev.: {}, pend. part. sims.: {}/{}. {}PID: {}, time: {},"
" ret. code: {}".format(self.CMAES.countiter,
res.candidate_num,
res.simulation_num,
res.first_collector_num,
res.collectors_num,
data_lock_message,
sim.packing_frac,
sim.standard_dev,
sim.pending_part_sims,
sim.nodes_number,
res.node_message,
res.pid,
res.time,
res.return_code))
sim.data_lock.release()
# Submit a new task
submit_task(simulation_index)
return
else:
# End of the RSA simulation for the candidate
# TODO Maybe test storing candidates' future objects, maybe cancel pending tasks belonging to
# an inactive simulation, maybe after the initial submits (which number is equal to the nodes
# number) submit some additional tasks in order to feed pool of workers without delays
# TODO If manage_tasks will be run in a separate thread, check if locking works and
# pending_simulations and active attributes are not changed during calculations
ending_sim_lock_message = ""
if not ending_sim_lock.acquire(blocking=False): # if not ending_sim_lock.acquire(block=False):
self.logger.info(msg="Waiting for the ending simulation lock."
" {}".format(res.node_message[:-2]))
lock_pending_start = datetime.datetime.now()
ending_sim_lock.acquire()
lock_pending_end = datetime.datetime.now()
ending_sim_lock_message = "waiting for lock" \
" acquiring time: {}, ".format(lock_pending_end
- lock_pending_start)
# time.sleep(3) # Testing the ending simulation lock
sim.active = False
sim.data_lock.release()
pending_simulations -= 1
values[simulation_index] = -sim.packing_frac
return_codes[simulation_index] = 0
# Adjust numbers of nodes assigned to the remaining simulations
sim_num = 0
nodes_message = "\nRemaining parallel simulations: {}. Increasing numbers of nodes assigned" \
" to the simulations.\n" \
"simulation number: nodes number".format(pending_simulations)
for sim_index, simulation in enumerate(sims_data):
if simulation.active:
prev_nodes_number = simulation.nodes_number
simulation.nodes_number = simulation_nodes_number(sim_num, pending_simulations)
for _ in range(simulation.nodes_number - prev_nodes_number):
submit_task(sim_index)
nodes_message += "\n{}: {}".format(simulations_numbers[sim_index],
simulation.nodes_number)
sim_num += 1
self.logger.info(msg="RSA part. sim. end: gen. no. {}, cand. no. {}, sim. no. {},"
" first col. no.: {}, collectors: {}. {}Current pack. frac.: {},"
" cur. std. dev.: {}, pend. part. sims.: {}/{}."
" {}PID: {}, time: {}, ret. code: {}\n"
"\nRSA simulation end. Time: {}, collectors: {},"
" {}return code: 0\n{}\n".format(self.CMAES.countiter,
res.candidate_num,
res.simulation_num,
res.first_collector_num,
res.collectors_num,
data_lock_message,
sim.packing_frac,
sim.standard_dev,
sim.pending_part_sims,
sim.nodes_number,
res.node_message,
res.pid,
res.time,
res.return_code,
datetime.datetime.now() - sim.start_time,
sim.particles_nums.size,
ending_sim_lock_message,
nodes_message))
ending_sim_lock.release()
# if pending_simulations == 0:
# evaluation_finished.set()
return
except Exception as exception:
# Check the future result and the simulation data
future_result_message = "not available"
simulation_data_message = "not found"
if "res" in locals():
future_result_message = str(res)
if isinstance(res.simulation_num, int):
simulation_index = res.simulation_num - self.simulations_num
sim = sims_data[simulation_index]
simulation_data_message = pprint.pformat(vars(sim))
# Release any lock that is acquired
for sim in sims_data:
if sim.data_lock.locked():
sim.data_lock.release()
if ending_sim_lock.locked():
ending_sim_lock.release()
self.logger.warning(msg="Exception raised in manage_tasks function; {}: {}\n"
"{}\nFuture result: {}\nSimulation data: {}\nSimulations that have not"
" ended yet will be repeated".format(type(exception).__name__, exception,
traceback.format_exc(limit=6).strip(),
future_result_message,
simulation_data_message))
# evaluation_finished.set()
normal_evaluation = False # TODO If more tasks than workers will be submitted, cancel other tasks
return
def submit_task(simulation_index: int):
sim = sims_data[simulation_index]
first_part_sim = sim.start_time is None
if first_part_sim:
sim.start_time = datetime.datetime.now()
future = pool.submit(self.default_rsa_simulation,
candidates_numbers[simulation_index],
simulations_numbers[simulation_index],
sim.next_collector,
rsa_proc_arguments + ["-particleAttributes="
+ cand_particle_attributes[simulation_index]],
datetime.datetime.now(),
self.collectors_per_task,
first_part_sim)
future.add_done_callback(manage_tasks)
sim.next_collector += self.collectors_per_task
sim.pending_part_sims += 1
sim.tasks.append(future)
# Submit initial tasks
nodes_message = "Initial numbers of nodes assigned to the simulations:\n" \
"simulation number: nodes number".format(pending_simulations)
for simulation_index, sim in enumerate(sims_data):
sim.nodes_number = simulation_nodes_number(simulation_index, pending_simulations)
for _ in range(sim.nodes_number):
submit_task(simulation_index)
nodes_message += "\n{}: {}".format(simulations_numbers[simulation_index], sim.nodes_number)
self.logger.info(msg=nodes_message + "\n")
# evaluation_finished.wait()
while pending_simulations > 0 and normal_evaluation:
time.sleep(1)
# pool.shutdown()
if pending_simulations != 0:
# TODO Maybe set return codes other than -1
active_simulations = []
for simulation_index, sim in enumerate(sims_data):
if sim.active:
active_simulations.append(candidates_numbers[simulation_index])
self.logger.warning(msg="RSA simulations for candidates no. {}"
" will be repeated".format(", ".join(map(str, active_simulations))))
self.simulations_num += len(pheno_candidates)
# Print evaluation results to the output file
with open(self.output_filename, "a") as output_file:
for arg, candidate_num, simulation_num, particle_attributes, sim in zip(pheno_candidates,
candidates_numbers,
simulations_numbers,
cand_particle_attributes,
sims_data):
if not sim.active:
output_file.write("{},{},{},{}"
"\t{}\t{}\t{}\t{}\n".format(self.CMAES.countiter,
candidate_num,
simulation_num,
" ".join(map(str, arg)),
sim.packing_frac,
sim.standard_dev,
self.mode_rsa_parameters["particleType"],
particle_attributes))
# For each simulation, if there are unused collectors, print their indices to a file
for simulation_index, sim in enumerate(sims_data):
unused_collectors_ids = np.setdiff1d(np.arange(sim.next_collector), sim.collectors_ids,
assume_unique=True)
# Alternative solution:
# unused_collectors_ids = [id for id in range(sim.next_collector) if id not in sim.collectors_ids]
# More efficient alternative solution that could be used if sim.collectors_ids were sorted:
# unused_collectors_ids = []
# prev_collector_id = -1
# for collector_id in np.append(sim.collectors_ids, sim.next_collector):
# if collector_id - prev_collector_id > 1:
# unused_collectors_ids.extend(range(prev_collector_id + 1, collector_id))
# prev_collector_id = collector_id
if unused_collectors_ids.size > 0:
# TODO Maybe log a warning
simulation_output_dir = self.rsa_output_dir \
+ "/{:03d}_{:02d}_{:04d}".format(self.CMAES.countiter,
candidates_numbers[simulation_index],
simulations_numbers[simulation_index])
with open(simulation_output_dir + "/unused_collectors.txt", "w") as unused_collectors_file:
unused_collectors_file.writelines("{}\n".format(id) for id in unused_collectors_ids)
return list(values), list(return_codes)
def log_generation_data(self) -> None:
func_data = pd.DataFrame(columns=["arg", "partattrs", "pfrac", "pfracstddev"])
with open(self.output_filename) as output_file:
# TODO Maybe find more efficient or elegant solution
# TODO Maybe iterate through lines in file in reversed order - results of the current generation should be
# at the end
for line in output_file:
# Warning: when reading text files this way, each line contains "\n" character at the end - it can be
# seen when calling print(line.__repr__()). line.rstrip("\n") should be used. Maybe use csv.reader or
# csv.DictReader to read data files instead.
evaluation_data = line.rstrip("\n").split("\t")
evaluation_labels = evaluation_data[0].split(",")
if int(evaluation_labels[0]) == self.CMAES.countiter:
candidate_num = int(evaluation_labels[1])
# If multiple lines in packing-fraction-vs-params.txt file correspond to the same candidate, the
# values from the last such line will be used
# func_data[candidate_num] = [evaluation_data[4],
# float(evaluation_data[1]), float(evaluation_data[2])]
func_data.loc[candidate_num] = [evaluation_labels[3],
evaluation_data[4],
float(evaluation_data[1]),
float(evaluation_data[2])]
func_data.sort_values(by="pfrac", ascending=False, inplace=True)
best_cand = func_data.iloc[0]
median_cand = func_data.iloc[func_data.shape[0] // 2]
worst_cand = func_data.iloc[-1]
func_values_data = func_data.loc[:, ["pfrac", "pfracstddev"]]
candidates = [val for ind, cand in func_values_data.iterrows()
for val in [ind, cand.at["pfrac"], cand.at["pfracstddev"]]]
generation_data = [str(self.CMAES.countiter),
" ".join(map(str, self.CMAES.mean)),
self.arg_to_particle_attributes(self.CMAES.mean), # " ".join(map(str, self.CMAES.mean))
" ".join(map(str, self.stddevs)),
# TODO Address the issue with save_optimization_data method, which does not have information
# about covariance matrices during optimization
";".join([",".join(map(str, row)) for row in self.covariance_matrix]),
" ".join(map(str, self.stddevs_to_particle_stddevs(self.CMAES.mean,
self.stddevs,
self.covariance_matrix))),
str(best_cand.name), best_cand.at["arg"], best_cand.at["partattrs"],
str(best_cand.at["pfrac"]), str(best_cand.at["pfracstddev"]),
str(median_cand.name), median_cand.at["arg"], median_cand.at["partattrs"],
str(median_cand.at["pfrac"]), str(median_cand.at["pfracstddev"]),
str(worst_cand.name), worst_cand.at["arg"], worst_cand.at["partattrs"],
str(worst_cand.at["pfrac"]), str(worst_cand.at["pfracstddev"]),
",".join(map(str, candidates))]
# Candidates' data is joined with "," rather than printed as separate fields separated by "\t", as below:
# generation_data.extend(map(str, candidates))
with open(self.opt_data_filename, "a") as opt_data_file:
opt_data_file.write("\t".join(generation_data) + "\n")
@classmethod
def save_optimization_data(cls, signature) -> None:
output_filename = _output_dir + "/" + signature + "/packing-fraction-vs-params.txt"
mean_output_filename = _output_dir + "/" + signature + "/outcmaes/xmean.dat"
stddev_output_filename = _output_dir + "/" + signature + "/outcmaes/stddev.dat"
opt_data_filename = _output_dir + "/" + signature + "/optimization.dat"
# Data of the first generation (no. 0) is logged in xmean.dat file, but in some of other CMA-ES output files
# it is not logged
generations_mean_data = np.loadtxt(fname=mean_output_filename, comments=['%', '#'])
generations_stddev_data = np.loadtxt(fname=stddev_output_filename, comments=['%', '#'])
# For backwards compatibility, check if arg field is present in the labels in RSA output file and if not, do not
# save arg fields
with open(output_filename) as output_file:
line = output_file.readline()
evaluation_data = line.rstrip("\n").split("\t")
evaluation_labels = evaluation_data[0].split(",")
old_format = len(evaluation_labels) < 4
with open(output_filename) as output_file, open(opt_data_filename, "w+") as opt_data_file:
# Write header line
opt_data_file.write("\t".join(cls.optimization_data_columns) + "\n")
gen_num = 0
func_data = pd.DataFrame(columns=["arg", "partattrs", "pfrac", "pfracstddev"])
# TODO Maybe find more efficient or elegant solution
def save_generation_data() -> None:
func_data.sort_values(by="pfrac", ascending=False, inplace=True)
mean_arg = generations_mean_data[gen_num, 5:]
stddevs = generations_stddev_data[gen_num, 5:]
best_cand = func_data.iloc[0]
median_cand = func_data.iloc[func_data.shape[0] // 2]
worst_cand = func_data.iloc[-1]
func_values_data = func_data.loc[:, ["pfrac", "pfracstddev"]]
candidates = [val for ind, cand in func_values_data.iterrows()
for val in [ind, cand.at["pfrac"], cand.at["pfracstddev"]]]
generation_data = [str(gen_num),
" ".join(map(str, mean_arg)),
cls.arg_to_particle_attributes(mean_arg),
" ".join(map(str, stddevs)),
# TODO Address the problem that this method does not have information about
# covariance matrices during optimization
" ".join(map(str, cls.stddevs_to_particle_stddevs(mean_arg,
stddevs,
np.diag(stddevs ** 2)))),
str(best_cand.name), best_cand.at["arg"], best_cand.at["partattrs"],
str(best_cand.at["pfrac"]), str(best_cand.at["pfracstddev"]),
str(median_cand.name), median_cand.at["arg"], median_cand.at["partattrs"],
str(median_cand.at["pfrac"]), str(median_cand.at["pfracstddev"]),
str(worst_cand.name), worst_cand.at["arg"], worst_cand.at["partattrs"],
str(worst_cand.at["pfrac"]), str(worst_cand.at["pfracstddev"]),
",".join(map(str, candidates))]
# Candidates' data is joined with "," rather than printed as separate fields separated by "\t",
# as below:
# generation_data.extend(map(str, candidates))
opt_data_file.write("\t".join(generation_data) + "\n")
for line in output_file:
evaluation_data = line.rstrip("\n").split("\t")
evaluation_labels = evaluation_data[0].split(",")
if int(evaluation_labels[0]) > gen_num:
save_generation_data()
gen_num += 1
del func_data
func_data = pd.DataFrame(columns=["arg", "partattrs", "pfrac", "pfracstddev"])
candidate_num = int(evaluation_labels[1])
# If multiple lines in packing-fraction-vs-params.txt file correspond to the same candidate, the
# values from the last such line will be used
func_data.loc[candidate_num] = [evaluation_labels[3] if not old_format else "None",
evaluation_data[4],
float(evaluation_data[1]),
float(evaluation_data[2])]
# Save last generation's data
save_generation_data()
@classmethod
def plot_optimization_data(cls, signature: str, config_file_name: str) -> None:
opt_data_filename = _output_dir + "/" + signature + "/optimization.dat"
# Prepare optimization data file if it does not exist
if not os.path.isfile(opt_data_filename):
cls.save_optimization_data(signature=signature)
# Alternative solutions for loading optimization data:
# 1) Check if solution using NumPy works:
# with open(opt_data_filename) as opt_data_file:
# optimization_data = np.loadtxt(opt_data_file,
# # dtype={"names": tuple(cls.optimization_data_columns),
# # "formats": tuple(cls.optimization_data_columns.values())},
# dtype={"names": ("generation_num", "meanpartattrs", "bestind",
# "bestpfrac"),
# # "formats": (np.int, str, np.int, np.float)}
# "formats": ("i4", "U", "i4", "f4")}, # Debugging
# delimiter="\t", # Debugging
# skiprows=1, # Skip header line
# usecols=(0, 1, 2, 3) # tuple(range(len(cls.optimization_data_columns)))
# )
# 2) Maybe use function fread from datatable package
# 3) Solution with standard lines reading and filling pd.DataFrame:
# optimization_data = pd.DataFrame(opt_data_columns=list(opt_data_columns))
# with open(opt_data_filename, "r") as opt_data_file:
# for line in opt_data_file:
# generation_data = line.split("\t")
# # ...
# # optimization_data.loc[int(generation_data[0])] = ...
# 4) Reading with csv module works right:
# with open(opt_data_filename, newline="") as opt_data_file:
# import csv
# # opt_data_reader = csv.reader(opt_data_file, delimiter="\t") # Works right
# opt_data_reader = csv.DictReader(opt_data_file, delimiter="\t") # Works right
# # If candidates' data is in separate opt_data_columns, remove "candidatesdata"
# # from cls.optimization_data_columns and use restkey="candidatesdata" in csv.DictReader constructor.
# for record in opt_data_reader:
# pprint.pprint(record)
# Loading optimization data using pd.read_table
# Alternatively pass filepath_or_buffer=opt_data_filename to pd.read_table
with open(opt_data_filename) as opt_data_file:
optimization_data = pd.read_table(filepath_or_buffer=opt_data_file,
index_col="generationnum",
dtype=cls.optimization_data_columns)
# Debugging
# # pd.set_option('display.max_columns', None)
# # pd.set_option('display.max_rows', None)
# pprint.pprint(optimization_data.dtypes)
# pprint.pprint(optimization_data.index)
# pprint.pprint(optimization_data)
# pprint.pprint(optimization_data.loc[0, ["bestpfrac", "bestpfracstddev"]])
# pprint.pprint(optimization_data.loc[0, ["bestpfrac", "bestpfracstddev"]].to_numpy())
# print(optimization_data.loc[0, "bestpfrac"])
# print(optimization_data.loc[0, "bestpartattrs"])
# print(type(optimization_data.loc[0, "bestpartattrs"]))
# # pprint.pprint(optimization_data.head(1))
config_file_path = _output_dir + "/" + signature + "/" + config_file_name
with open(config_file_path) as config_file:
graph_config = yaml.load(config_file)
# TODO Maybe use fig, ax = plt.subplots() and plot on axes
# fig, ax = plt.subplots()
# plt.rcParams["axes.autolimit_mode"] = "round_numbers"
plt.rcParams["text.usetex"] = True
plt.rcParams["font.family"] = "serif"
plt.rcParams["font.size"] = graph_config["font_size"]
fig = plt.figure(num=signature, figsize=graph_config["graph_size"]) # figsize is given in inches
# (10, 6.5)
ax = plt.axes()
# plt.title("CMA-ES optimization of RSA mean packing fraction\nof fixed-radii polydisks")
plt.ylabel("Mean packing fraction")
plt.xlabel("Generation number")
# TODO Try to adjust ticks automatically
# plt.xticks(optimization_data.index) # Needed for small numbers of generations
ax.tick_params(direction="in", right=True, top=True)
# plt.plot(optimization_data.index, optimization_data["bestpfrac"], "go-", label="Best candidate's value")
# plt.plot(optimization_data.index, optimization_data["medianpfrac"], "ro-", label="Median candidate's value")
# plt.plot(optimization_data.index, optimization_data["worstpfrac"], "bo-", label="Worst candidate's value")
candidates_data = [np.array(gen_cands_data.split(","), dtype=np.float).reshape(-1, 3)
for gen_cands_data in optimization_data["candidatesdata"]]
for gen_num, gen_cands_data in enumerate(candidates_data):
for cand_data in reversed(gen_cands_data[1:-1]):
# Best and worst candidates are removed, median candidate stays, but his point is later covered
plt.errorbar(x=optimization_data.index[gen_num], y=cand_data[1], yerr=cand_data[2],
fmt="k.", capsize=1.5) # "ko"
plt.errorbar(x=optimization_data.index, y=optimization_data["worstpfrac"],
yerr=optimization_data["worstpfracstddev"],
fmt="bo-", capsize=2, label="Worst candidate") # barsabove=True # Worst candidate's value
plt.errorbar(x=optimization_data.index, y=optimization_data["medianpfrac"],
yerr=optimization_data["medianpfracstddev"],
fmt="rs-", capsize=2, label="Median candidate") # barsabove=True
plt.errorbar(x=optimization_data.index, y=optimization_data["bestpfrac"],
yerr=optimization_data["bestpfracstddev"],
fmt="gD-", capsize=2, label="Best candidate") # barsabove=True
plt.fill_between(optimization_data.index, optimization_data["worstpfrac"], optimization_data["bestpfrac"],
color="0.75")
# plt.grid(axis="y") # True, axis="y"
plt.grid()
handles, labels = ax.get_legend_handles_labels()
leg = plt.legend(reversed(handles), reversed(labels))
leg.set_draggable(True)
# After dragging legend disappears, but reappears shifted after changing anything in the graph.
# update="bbox" does not change this behaviour, but sets legend's position relative to figure, not axes, which
# is bad.
def particle_drawings_annotations(part_attrs_col: str, packing_frac_col: str = "medianpfrac",
color: str = "0.5", modulo: int = 1, drawings_scale: float = 0.05,
drawings_offset: Tuple[float, float] = None, vertical_alignment: float = 0.1,
position: str = "point", arrows: bool = False, means: bool = False) -> None:
"""
Annotate packing fraction data series with, draggable if necessary, particle drawings
:param part_attrs_col: Name of the column with particle attributes in optimization_data pd.DataFrame
:param packing_frac_col: Name of the column with mean packing fractions in optimization_data pd.DataFrame
:param color: Color of the particle drawings
:param modulo: Annotate points in first, last and every modulo generation
:param drawings_scale: Length of unitary segment in drawing (drawing's scale) given in fraction
of x axis width
:param drawings_offset: Tuple specifying annotation boxes offset in fraction of axes' width and height
:param vertical_alignment: Annotation boxes common vertical position in fraction of axes' height
:param position: String specifying type of annotation boxes positioning. "point" - relative to annotated
points' positions - uses drawings_offset argument, "x" - at the same x positions as
annotated points and at the common y position for all annotation boxes,
specified by vertical_alignment argument.
:param means: Whether to make annotations corresponding to means of the distributions - among others,
visualize particle attributes' standard deviations
:return: None
"""
# TODO Maybe scale drawings' paddings and arrows and boxes' frames relatively to graph's width and height,
# similarly as drawings are scaled
# Maybe use max(int(data_len * drawings_scale), 1) as the default modulo (data points are placed uniformly
# on axes, drawings' widths are approximately 1 in drawings' coordinates if in drawings' coordinates
# particle area is 1).
# Factor for scaling objects drawn on matplotlib.offsetbox.DrawingArea
scaling_factor = (ax.transAxes.transform((drawings_scale, drawings_scale))[0]
- ax.transAxes.transform((0, 0))[0]) \
/ fig.canvas.get_renderer().points_to_pixels(1.)
# scaling_factor is divided by factor used in scaling of transformation applied by DrawingArea so that real
# drawings' sizes are scaled by specified fraction of x axis.
# See: https://matplotlib.org/_modules/matplotlib/offsetbox.html#DrawingArea.
# Use fig.transFigure to specify fraction of figure's width instead. Try setting scaling_factor to 1 to see
# how it behaves.
gen_nums = list(range(0, data_len, modulo))
if data_len - 1 not in gen_nums:
gen_nums.pop()
gen_nums.append(data_len - 1)
for gen_num in gen_nums:
part_attrs = optimization_data[part_attrs_col].at[gen_num]
arg_col = part_attrs_col[:-9] + "arg"
# For backwards compatibility, check if the arg_col field contains None
arg_field = optimization_data[arg_col].at[gen_num]
arg = np.array(arg_field.split(" "), dtype=np.float) if arg_field != "None" else None
# In order to be able to draw all types of particles with full information about optimization process,
# phenotype candidates have to be given here in the arg parameter to the draw_particle method. Therefore
# phenotype candidates have to be somehow saved (currently they are written as the fourth label in the
# RSA output file, but before they were only written to the logfile and saved in the optimization object
# in the dictionary optimization.CMAES.archive). It is needed e.g. to draw the correct numbers of convex
# polygon's vertices in the optimization space.
# Get particle drawing and set properties of the arrow
if not means:
drawing_area = cls.draw_particle(particle_attributes=part_attrs,
scaling_factor=scaling_factor,
color=color,
arg=arg)
else:
stddevs = np.array(optimization_data["stddevs"].at[gen_num].split(" "), dtype=np.float)
part_stddevs = np.array(optimization_data["partstddevs"].at[gen_num].split(" "), dtype=np.float)
covariance_matrix = np.array([row.split(",") for row in
optimization_data["covmat"].at[gen_num].split(";")], dtype=np.float)
# covariance_matrix = np.diag(stddevs ** 2)
drawing_area = cls.draw_particle(particle_attributes=part_attrs,
scaling_factor=scaling_factor,
color=color,
arg=arg,
std_devs=stddevs,
covariance_matrix=covariance_matrix,
part_std_devs=part_stddevs)
arrows = False
if arrows:
arrow_props = dict(arrowstyle="simple," # "->", "simple"
"head_length=0.2,"
"head_width=0.3," # 0.1, 0.5
"tail_width=0.01", # 0.2
facecolor="black",
connectionstyle="arc3,"
"rad=0.3")
else:
arrow_props = dict()
# Set coordinates and positions of the annotated point and the label with drawing
xy = (optimization_data.index[gen_num], optimization_data[packing_frac_col].at[gen_num])
if position == "point":
box_coords = "data"
offset_x, offset_y = drawings_offset
xy_axes = ax.transAxes.inverted().transform(ax.transData.transform(xy))
# Transforming back to data coordinates and using xybox=xy_box, boxcoords="data" instead of
# xybox=(xy_axes[0] + offset_x, xy_axes[1] + offset_y), boxcoords="axes fraction",
# to assure correct shifting sensitivity (something concerning handling transforms by
# DraggableAnnotation with AnnotationBbox is wrongly implemented in matplotlib and using
# boxcoords="axes fraction" results in wrongly recalculated mouse's offsets and different
# sensitivities in both axes).
xy_box = ax.transData.inverted().transform(ax.transAxes.transform((xy_axes[0] + offset_x,
xy_axes[1] + offset_y)))
# Specifying axes' coordinates using ScaledTranslation doesn't work well:
# offset_trans = matplotlib.transforms.ScaledTranslation(offset_x, offset_y, ax.transAxes)\
# - matplotlib.transforms.ScaledTranslation(0, 0, ax.transAxes)
# annotation_trans = ax.transData + offset_trans
# In AnnotationBbox constructor: xybox=xy, boxcoords=annotation_trans
# Alternative solutions (work well):
# 1) Specifying annotation box offset in fraction of figure's width and height:
# offset_trans = matplotlib.transforms.ScaledTranslation(offset_x, offset_y, fig.transFigure)
# annotation_trans = ax.transData + offset_trans
# In AnnotationBbox constructor: xybox=xy, boxcoords=annotation_trans
# 2) Displaying annotations at the bottom of axes (legend needs to be shifted then):
# In AnnotationBbox constructor: xybox=(xy[0], 0.1), boxcoords=("data", "axes fraction")
# 3) Specifying annotation box offset in data coordinates (needs to be adjusted depending on data):
# In AnnotationBbox constructor e.g.: xybox=(xy[0] + 0.2, xy[1] - 0.01), boxcoords="data"
if position == "x":
box_coords = ("data", "axes fraction")
xy_box = (xy[0], vertical_alignment)
# arrow_props = dict()
# Make annotation
# drag_part_drawing = matplotlib.offsetbox.DraggableOffsetBox(ax, part_drawing) # Not needed
# Use matplotlib_shiftable_annotation.AnnotationBboxWithShifts for shiftability instead of draggability
ab = matplotlib.offsetbox.AnnotationBbox(drawing_area,
xy=xy,
xybox=xy_box,
xycoords="data",
# boxcoords="axes fraction",
boxcoords=box_coords,
pad=0.2, # 0.4
fontsize=12, # 12
# bboxprops={},
arrowprops=arrow_props)
ax.add_artist(ab)
if position == "point":
# # AnnotationBbox subclasses matplotlib.text._AnnotationBase, so we can toggle draggability
# # using the following method:
ab.draggable()
# ab.shiftable()
# Maybe following is equivalent:
# drag_ab = matplotlib.offsetbox.DraggableAnnotation(ab)
data_len = len(optimization_data["bestpartattrs"])
drawings_scale = graph_config["drawings_scale"] # 0.05
modulo = graph_config.get("modulo")
if modulo is None:
modulo = max(int(data_len * drawings_scale), 1)
particle_drawings_annotations(part_attrs_col="worstpartattrs", packing_frac_col="worstpfrac", color="b",
modulo=modulo, drawings_scale=drawings_scale,
position="x", vertical_alignment=graph_config["worst_position"]) # 0.1
# particle_drawings_annotations(part_attrs_col="worstpartattrs", packing_frac_col="worstpfrac", color="b",
# modulo=modulo, drawings_scale=drawings_scale, drawings_offset=(0., -0.15))
# drawings_offset=(0.1, -0.1) (0.2, -0.3)
# particle_drawings_annotations(part_attrs_col="medianpartattrs", packing_frac_col="medianpfrac", color="r",
# modulo=modulo, drawings_scale=drawings_scale, drawings_offset=(0., -0.1))
# # drawings_offset=(0.1, 0.) (0.2, -0.2)
particle_drawings_annotations(part_attrs_col="medianpartattrs", packing_frac_col="medianpfrac", color="r",
modulo=modulo, drawings_scale=drawings_scale,
position="x", vertical_alignment=graph_config["median_position"]) # 0.2
# vertical_alignment=0.22
# particle_drawings_annotations(part_attrs_col="bestpartattrs", packing_frac_col="bestpfrac", color="g",
# modulo=modulo, drawings_scale=drawings_scale, drawings_offset=(0., 0.1))
# # drawings_offset = (0.1, 0.1) (0.2, -0.1)
particle_drawings_annotations(part_attrs_col="bestpartattrs", packing_frac_col="bestpfrac", color="g",
modulo=modulo, drawings_scale=drawings_scale,
position="x", vertical_alignment=graph_config["best_position"]) # 0.3
# vertical_alignment=1.08
particle_drawings_annotations(part_attrs_col="meanpartattrs",
modulo=modulo, drawings_scale=drawings_scale,
position="x", vertical_alignment=graph_config["mean_position"],
means=graph_config["annotate_mean_particles"])
# 0.9 # 0.95
# vertical_alignment=0.1
gen_nums = list(range(0, data_len, modulo))
if data_len - 1 not in gen_nums:
gen_nums.append(data_len - 1)
plt.xticks(gen_nums)
# TODO Adjust top limit so that it corresponds to a major tick
# ax.set_ymargin(0.1)
plt.locator_params(axis="y", nbins=15)
# ax.set_xlim(...)
bottom_lim, top_lim = ax.get_ylim()
bottom_space = graph_config["bottom_space"] # 0.35
top_space = graph_config["top_space"] # 0.12
ax.set_ylim(bottom_lim - bottom_space / (1. - top_space - bottom_space) * (top_lim - bottom_lim),
top_lim + top_space / (1. - top_space - bottom_space) * (top_lim - bottom_lim))
# ax.set_ylim(bottom_lim - bottom_space / (1. - bottom_space) * (top_lim - bottom_lim), top_lim)
# Create inset graph
inset_ax = ax.inset_axes(graph_config["inset_origin"] + graph_config["inset_size"])
inset_ax.set_xlim([graph_config["inset_data_x"][0] - 0.5, graph_config["inset_data_x"][1] + 0.5])
inset_ax.set_ylim(graph_config["inset_data_y"])
if graph_config["indicate_inset_zoom"]:
ax.indicate_inset_zoom(inset_ax, edgecolor="k")
inset_ax.tick_params(direction="in", right=True, top=True)
inset_optimization_data = optimization_data[graph_config["inset_data_x"][0]:graph_config["inset_data_x"][1] + 1]
inset_candidates_data = [np.array(gen_cands_data.split(","), dtype=np.float).reshape(-1, 3)
for gen_cands_data in inset_optimization_data["candidatesdata"]]
for gen_num, gen_cands_data in enumerate(inset_candidates_data):
for cand_data in reversed(gen_cands_data[1:-1]):
# Best and worst candidates are removed, median candidate stays, but his point is later covered
inset_ax.errorbar(x=inset_optimization_data.index[gen_num], y=cand_data[1], yerr=cand_data[2],
fmt="k.", capsize=1.5) # "ko"
inset_ax.errorbar(x=inset_optimization_data.index, y=inset_optimization_data["worstpfrac"],
yerr=inset_optimization_data["worstpfracstddev"],
fmt="bo-", capsize=2, label="Worst candidate") # barsabove=True
inset_ax.errorbar(x=inset_optimization_data.index, y=inset_optimization_data["medianpfrac"],
yerr=inset_optimization_data["medianpfracstddev"],
fmt="rs-", capsize=2, label="Median candidate") # barsabove=True
inset_ax.errorbar(x=inset_optimization_data.index, y=inset_optimization_data["bestpfrac"],
yerr=inset_optimization_data["bestpfracstddev"],
fmt="gD-", capsize=2, label="Best candidate") # barsabove=True
inset_ax.fill_between(inset_optimization_data.index, inset_optimization_data["worstpfrac"],
inset_optimization_data["bestpfrac"], color="0.75")
# inset_ax.grid(axis="y") # True, axis="y"
inset_ax.locator_params(axis="x", nbins=graph_config["inset_x_ticks"])
inset_ax.locator_params(axis="y", nbins=graph_config["inset_y_ticks"])
inset_ax.grid()
plt.tight_layout()
plt.show()
@waiting_for_graphs
def run(self) -> None:
"""Method running optimization"""
self.logger.info(msg="")
if self.CMAES.countiter == 0:
self.logger.info(msg="Start of optimization")
self.CMAES.logger.add()
else:
self.logger.info(msg="Start of resumed optimization")
if self.okeanos:
self.logger.info(msg="")
self.logger.info(msg="Parallel simulations number: {}".format(self.parallel_simulations_number))
self.logger.info(msg="Parallel threads number: {}".format(self.parallel_threads_number))
if self.okeanos_parallel:
self.logger.info(msg="")
self.logger.info(msg="Population size: {}".format(self.CMAES.popsize))
self.logger.info(msg="Nodes number: {}".format(self.nodes_number))
self.logger.info(msg="Parallel threads number: {}".format(self.parallel_threads_number))
self.logger.info(msg="Collectors per task: {}".format(self.collectors_per_task))
self.logger.info(msg="Target accuracy: {}".format(self.accuracy))
self.logger.info(msg="Minimum collectors number: {}".format(self.min_collectors_number))
while not self.CMAES.stop():
gen_start_time = datetime.datetime.now()
self.logger.info(msg="")
self.logger.info(msg="Generation number {}".format(self.CMAES.countiter))
if self.CMAES.countiter > 0:
self.CMAES.logger.disp_header()
self.CMAES.logger.disp([-1])
# pheno_candidates = self.CMAES.ask()
# TODO Check, why resampling causes problems in self.CMAES.tell method when population size
# is small and mirroring is used
pheno_candidates = []
resamplings_num = 0
while len(pheno_candidates) < self.CMAES.popsize:
candidate = self.CMAES.ask(number=1)[0]
while not self.arg_in_domain(arg=candidate):
candidate = self.CMAES.ask(number=1)[0]
resamplings_num += 1
pheno_candidates.append(candidate)
if resamplings_num > 0:
self.logger.info(msg="Resamplings per candidate: {}".format(str(resamplings_num / self.CMAES.popsize)))
# TODO Maybe add a mode for plotting an image of a shape corresponding to mean candidate solution(s)
self.logger.info(msg="Mean of the distribution:")
self.logger.info(msg=pprint.pformat(self.CMAES.mean))
self.logger.info(msg="Step size: {}".format(str(self.CMAES.sigma)))
self.logger.info(msg="Standard deviations:")
self.stddevs = self.CMAES.sigma * self.CMAES.sigma_vec.scaling * self.CMAES.sm.variances ** 0.5
self.logger.info(msg=pprint.pformat(self.stddevs))
self.logger.info(msg="Covariance matrix:")
self.covariance_matrix = self.CMAES.sigma ** 2 * self.CMAES.sm.covariance_matrix
for line in pprint.pformat(self.covariance_matrix).split("\n"): # or .splitlines()
self.logger.info(msg=line)
self.logger.info(msg="Phenotype candidates:")
for line in pprint.pformat(pheno_candidates).split("\n"):
self.logger.info(msg=line)
swapped_pheno_candidates = [self.swap_arg(arg) for arg in pheno_candidates]
self.logger.info(msg="Swapped phenotype candidates:")
for line in pprint.pformat(swapped_pheno_candidates).split("\n"):
self.logger.info(msg=line)
# values = self.evaluate_generation_parallel(pheno_candidates) if self.parallel\
# else self.evaluate_generation_serial(pheno_candidates)
# TODO Maybe make evaluate_generation_* methods return values as np.ndarray
if self.parallel:
if self.particle_attributes_parallel:
if not self.okeanos_parallel:
values, return_codes = self.evaluate_generation_parallel_in_pool(pheno_candidates)
else:
values, return_codes = self.run_simulations_on_okeanos(pheno_candidates)
else:
self.logger.info(msg="Computing candidates' particleAttributes parameters in series")
cand_particle_attributes = [self.arg_to_particle_attributes(arg) for arg in pheno_candidates]
self.logger.debug(msg="Candidates' particleAttributes parameters:")
for line in pprint.pformat(cand_particle_attributes, width=200).split("\n"):
self.logger.debug(msg=line)
if not self.okeanos_parallel:
values, return_codes = self.evaluate_generation_parallel_in_pool(pheno_candidates,
cand_particle_attributes)
else:
values, return_codes = self.run_simulations_on_okeanos(pheno_candidates,
cand_particle_attributes)
# TODO Implement computing it in parallel (probably using evaluate_generation_parallel_in_pool method,
# use also a function that for number of simulations and simulation number returns number
# of ompThreads)
# TODO Maybe add an option to somehow tell the program to end optimization (e.g. kill -KILL an RSA
# process or send a signal to the main process)
take_median = np.full(shape=len(pheno_candidates), fill_value=False)
if not self.okeanos_parallel:
# while np.any(np.isnan(values)):
while np.any(np.logical_and(np.isnan(values), np.logical_not(take_median))):
for candidate_num, candidate, candidate_value, return_code\
in zip(list(range(len(pheno_candidates))), pheno_candidates, values, return_codes):
if np.isnan(candidate_value):
warning_message = "RSA simulation for candidate no. {} did not succeed." \
" Return code: {}".format(str(candidate_num),
str(return_code))
signal_name = ""
if return_code < 0:
try:
system_info = subprocess.check_output(["uname", "-mrs"]).decode().strip()
except Exception as exception:
self.logger.warning(msg="Exception raised when checking system information;"
" {}: {}\n"
"{}".format(type(exception).__name__, exception,
traceback.format_exc(limit=6).strip()))
system_info = "not checked"
okeanos_system_info = "Linux 4.12.14-150.17_5.0.86-cray_ari_s x86_64"
# See https://docs.python.org/3/library/subprocess.html#subprocess.Popen.returncode
if not self.okeanos and system_info != okeanos_system_info \
and system_info != "not checked":
if sys.platform.startswith("linux"):
signal_info = subprocess.check_output("kill -l " + str(-return_code),
shell=True)
else:
signal_info = subprocess.check_output(["kill", "-l", str(-return_code)])
signal_name = signal_info.decode().strip().upper()
else:
# "kill -l [number]" on okeanos doesn't work, see /usr/include/asm/signal.h
# TODO Test it
if return_code == -10:
signal_name = "USR1"
elif return_code == -12:
signal_name = "USR2"
else:
signal_name = str(-return_code)
warning_message += ", signal name: {}".format(signal_name)
# self.logger.debug(msg=signal_info)
# self.logger.debug(msg=signal_name)
self.logger.warning(msg=warning_message)
random_seed = self.rsa_parameters.get("seedOrigin") == "random" if self.input_given\
else self.all_rsa_parameters.get("seedOrigin") == "random"
# if return_code_name in ["", "TERM"] or (return_code_name == "USR1"
# and not random_seed):
# self.logger.warning(msg="Resampling phenotype candidate"
# " no. {}".format(str(candidate_num)))
# new_candidate = self.CMAES.ask(number=1)[0]
# while not self.arg_in_domain(arg=new_candidate):
# new_candidate = self.CMAES.ask(number=1)[0]
# self.logger.info(msg="Resampled candidate no. {}:".format(str(candidate_num)))
# self.logger.info(msg=pprint.pformat(new_candidate))
# pheno_candidates[candidate_num] = new_candidate
# return_codes[candidate_num] = self.rsa_simulation(candidate_num, new_candidate)
# elif return_code_name == "USR1" and random_seed:
if signal_name == "USR1" and random_seed:
# To repeat RSA simulation in the same point when random seed RSA parameter is set,
# kill simulation process with "kill -USR1 pid"
self.logger.warning(msg="Repeating RSA simulation for phenotype candidate"
" no. {}".format(str(candidate_num)))
return_codes[candidate_num] = self.rsa_simulation(
candidate_num, candidate,
omp_threads=self.parallel_threads_number)
elif signal_name == "USR2":
# To set corresponding to RSA simulation phenotype candidate's value to the median
# of other candidates' values, kill simulation process with "kill -USR2 pid"
self.logger.warning(msg="Phenotype candidate's no. {} value will be set"
" to the median of other candidates'"
" values".format(str(candidate_num)))
take_median[candidate_num] = True
else:
# To resample phenotype candidate corresponding to RSA simulation,
# kill simulation process in other way, e.g. with "kill pid"
self.logger.warning(msg="Resampling phenotype candidate"
" no. {}".format(str(candidate_num)))
new_candidate = self.CMAES.ask(number=1)[0]
while not self.arg_in_domain(arg=new_candidate):
new_candidate = self.CMAES.ask(number=1)[0]
self.logger.info(msg="Resampled candidate no. {}:".format(str(candidate_num)))
self.logger.info(msg=pprint.pformat(new_candidate))
pheno_candidates[candidate_num] = new_candidate
return_codes[candidate_num] = self.rsa_simulation(
candidate_num, new_candidate,
omp_threads=self.parallel_threads_number)
with open(self.output_filename, "r") as rsa_output_file:
# TODO Maybe find more efficient or elegant solution
# TODO Maybe iterate through lines in file in reversed order - results of the current
# generation should be at the end
for line in rsa_output_file:
evaluation_data = line.split("\t")
evaluation_labels = evaluation_data[0].split(",")
if int(evaluation_labels[0]) == self.CMAES.countiter:
read_candidate_num = int(evaluation_labels[1])
mean_packing_fraction = float(evaluation_data[1])
values[read_candidate_num] = -mean_packing_fraction
else:
# TODO Test it
values = np.array(values)
return_codes = np.array(return_codes)
random_seed = self.rsa_parameters.get("seedOrigin") == "random" if self.input_given\
else self.all_rsa_parameters.get("seedOrigin") == "random"
while np.any(np.logical_and(np.isnan(values), np.logical_not(take_median))):
repeat_candidates = np.full(shape=len(pheno_candidates), fill_value=False)
if random_seed:
repeat_candidates = return_codes == -10
repeat_candidates_nums = [str(candidate_num) for candidate_num, repeat
in enumerate(repeat_candidates) if repeat]
if np.any(repeat_candidates):
self.logger.warning(msg="Phenotype candidates' no. {} simulations"
" will be repeated".format(", ".join(repeat_candidates_nums)))
median_candidates = return_codes == -12
if np.any(median_candidates):
median_candidates_nums = [candidate_num for candidate_num, median
in enumerate(median_candidates) if median]
self.logger.warning(msg="Phenotype candidates' no. {} values will be set"
" to the median of other candidates'"
" values".format(", ".join(map(str, median_candidates_nums))))
take_median[median_candidates] = True
# Resample candidates
resample_candidates = np.logical_and(np.isnan(values),
np.logical_not(take_median),
np.logical_not(repeat_candidates))
if np.any(resample_candidates):
resample_candidates_nums = [candidate_num for candidate_num, resample
in enumerate(resample_candidates) if resample]
resamplings_num = 0
for candidate_num in resample_candidates_nums:
new_candidate = self.CMAES.ask(number=1)[0]
while not self.arg_in_domain(arg=new_candidate):
new_candidate = self.CMAES.ask(number=1)[0]
resamplings_num += 1
pheno_candidates[candidate_num] = new_candidate
self.logger.warning(msg="Phenotype candidates no. {}"
" were resampled.".format(", ".join(map(str,
resample_candidates_nums))))
if resamplings_num > 0:
self.logger.info(msg="Resamplings per candidate: {}".format(
resamplings_num / len(resample_candidates_nums)))
self.logger.info(msg="New candidates:")
for candidate_num in resample_candidates_nums:
self.logger.info(msg=pprint.pformat(pheno_candidates[candidate_num]))
if not self.particle_attributes_parallel:
self.logger.info(msg="Computing candidates' particleAttributes parameters in series")
self.logger.debug(msg="Candidates' particleAttributes parameters:")
for candidate_num in resample_candidates_nums:
cand_particle_attributes[candidate_num] = self.arg_to_particle_attributes(
pheno_candidates[candidate_num])
self.logger.debug(msg=cand_particle_attributes[candidate_num])
# Evaluate rest of the candidates
evaluate_candidates = np.logical_and(np.isnan(values), np.logical_not(take_median))
candidates_to_evaluate = [candidate for candidate, evaluate
in zip(pheno_candidates, evaluate_candidates) if evaluate]
candidates_numbers = [candidate_num for candidate_num, evaluate
in enumerate(evaluate_candidates) if evaluate]
self.logger.info(msg="Evaluating values for candidates"
" no. {}".format(", ".join(map(str, candidates_numbers))))
if self.particle_attributes_parallel:
new_values, new_return_codes = self.run_simulations_on_okeanos(
candidates_to_evaluate,
candidates_numbers=candidates_numbers)
else:
candidates_part_attrs = [part_attrs for part_attrs, evaluate
in zip(cand_particle_attributes, evaluate_candidates) if evaluate]
new_values, new_return_codes = self.run_simulations_on_okeanos(
candidates_to_evaluate,
cand_particle_attributes=candidates_part_attrs,
candidates_numbers=candidates_numbers)
# Set values and return codes
values[evaluate_candidates] = new_values
return_codes[evaluate_candidates] = new_return_codes
# values = list(values)
if np.any(take_median):
# TODO Maybe make evaluate_generation_* methods return values in np.ndarray - then it would be
# easier to manipulate the values array
correct_values = []
for val in values:
if not np.isnan(val):
correct_values.append(val)
median_value = np.sort(correct_values)[len(correct_values) // 2]
self.logger.warning(msg="Phenotype candidates' no. {} values"
" are set to the median of other candidates' values"
" equal to {}".format(", ".join(map(str, np.nonzero(take_median)[0])),
str(median_value)))
values = [value if not take_med else median_value for value, take_med in zip(values, take_median)]
else:
# TODO Implement checking results in serial computing case
values = self.evaluate_generation_serial(pheno_candidates)
# TODO Check, what happens in case when e.g. None is returned as candidate value
# TODO Maybe add checking if rsa simulation finished with success and successfully wrote a line to
# packing-fraction-vs-params.txt file. If it fails, in serial computing the previous packing fraction
# is assigned as the current value in values array without any warning, and in parallel - wrong value
# from np.zeros function is treated as a packing fraction.
self.logger.info(msg="End of generation number {}".format(self.CMAES.countiter))
self.logger.info(msg="Candidate values:")
self.logger.info(msg=values)
if self.log_generations:
# Less costly solution: read not only packing fraction, but also standard deviation from
# packing-fraction-vs-params.txt file in evaluate_generation_* function and return both values.
# File wouldn't be read twice then. For distribution mean, best, median and worst candidate
# self.arg_to_particle_attributes would be called. All data read from file by self.log_generation_data
# would be available. Standard deviations could be written to optimization-output.log file, too.
self.log_generation_data()
self.CMAES.tell(swapped_pheno_candidates, values)
self.CMAES.logger.add()
# Pickling of the object
# TODO Add jsonpickle pickling
self.pickle()
gen_end_time = datetime.datetime.now()
self.logger.info("Generation time: {}".format(str(gen_end_time - gen_start_time)))
self.logger.info(msg="")
self.logger.info(msg="End of optimization")
self.logger.info(msg="")
self.CMAES.logger.disp_header()
self.CMAES.logger.disp([-1])
self.logger.info(msg=pprint.pformat(self.CMAES.result))
self.CMAES.result_pretty()
# Pickling of the object
self.pickle()
termination_condition = "-".join(["{}-{}".format(key, val) for key, val in self.CMAES.result.stop.items()]) \
.replace(".", "_")
self.pickle(name="gen-{}-term-{}".format(self.CMAES.countiter - 1, termination_condition))
# TODO Add separate method for making graphs
# TODO Maybe create another class for analyzing the results of optimization
if self.show_graph:
plot_cmaes_graph_in_background(self.CMAES.logger.name_prefix, self.signature)
# if self.output_to_file:
sys.stdout = self.stdout
sys.stderr = self.stderr
def get_result(self) -> Tuple[str, float, float]:
"""Method returning tuple with particle attributes and packing fraction of the best particle in optimization"""
opt_data_filename = self.output_dir + "/optimization.dat"
# Prepare optimization data file if it does not exist
if not os.path.isfile(opt_data_filename):
self.save_optimization_data(signature=self.signature)
# Loading optimization data using pd.read_table
# Alternatively pass filepath_or_buffer=opt_data_filename to pd.read_table
with open(opt_data_filename) as opt_data_file:
optimization_data = pd.read_table(filepath_or_buffer=opt_data_file,
index_col="generationnum",
dtype=self.optimization_data_columns)
best_generation_num: int = optimization_data["bestpfrac"].idxmax()
best_generation_data: pd.Series = optimization_data.loc[best_generation_num]
return best_generation_data["bestpartattrs"], \
best_generation_data["bestpfrac"], \
best_generation_data["bestpfracstddev"]
class PolydiskRSACMAESOpt(RSACMAESOptimization, metaclass=abc.ABCMeta):
# mode_rsa_parameters: dict = dict(super().mode_rsa_parameters, particleType="Polydisk")
# TODO Check, if it is a right way of overriding class attributes (how to get parent class' attribute)
mode_rsa_parameters: dict = dict(RSACMAESOptimization.mode_rsa_parameters,
surfaceDimension="2", particleType="Polydisk")
wolfram_polydisk_area_eval_num: int = -1
@classmethod
@abc.abstractmethod
def arg_to_polydisk_attributes(cls, arg: np.ndarray) -> Tuple[str, np.ndarray]:
"""
Function returning part of Polydisk's particleAttributes in a tuple, which first element is \"xy\" or \"rt\"
string indicating type of coordinates and the second is a numpy ndarray with c01 c02 r0 c11 c12 r1 ... floats
(disks' coordinates and radii)
"""
pass
# @classmethod
# @abc.abstractmethod
# def stddevs_to_polydisk_stddevs(cls, stddevs: np.ndarray) -> np.ndarray:
# pass
@classmethod
def arg_in_domain(cls, arg: np.ndarray) -> bool:
coordinates_type, disks_params = cls.arg_to_polydisk_attributes(arg)
# TODO Maybe do it better
intersection_tests = {
"xy": lambda first_disk, second_disk: np.sqrt((first_disk[0] - second_disk[0]) ** 2
+ (first_disk[1] - second_disk[1]) ** 2)
<= first_disk[2] + second_disk[2],
"rt": lambda first_disk, second_disk: np.sqrt((first_disk[0] * np.cos(first_disk[1])
- second_disk[0] * np.cos(second_disk[1])) ** 2
+ (first_disk[0] * np.sin(first_disk[1])
- second_disk[0] * np.sin(second_disk[1])) ** 2)
<= first_disk[2] + second_disk[2]
}
if coordinates_type in intersection_tests:
disks_intersect = intersection_tests[coordinates_type]
disks_args = np.reshape(disks_params, (-1, 3))
# Check, if the polydisk is connected by checking if one iteration of DFS of the corresponding
# undirected graph visits all of the graph's vertices. Check the vertex corresponding to a free disk in
# constrained versions of polydisks.
disks_visited = np.full(shape=disks_args.shape[0], fill_value=False)
# TODO Check if it is correct and sufficiently optimal
def polydisk_dfs_visit(disk_index: int) -> None:
current_disk_args = disks_args[disk_index]
disks_visited[disk_index] = True
for checked_disk_index, checked_disk_args in enumerate(disks_args):
if not disks_visited[checked_disk_index] and disks_intersect(checked_disk_args, current_disk_args):
polydisk_dfs_visit(disk_index=checked_disk_index)
polydisk_dfs_visit(disk_index=0)
return np.all(disks_visited)
else:
raise NotImplementedError("Checking if a polydisk with attributes given in {} coordinates is connected"
" is not implemented yet.".format(coordinates_type))
@classmethod
def draw_particle(cls, particle_attributes: str, scaling_factor: float, color: str,
arg: Optional[np.ndarray] = None, std_devs: Optional[np.ndarray] = None,
covariance_matrix: Optional[np.ndarray] = None, part_std_devs: Optional[np.ndarray] = None) \
-> matplotlib.offsetbox.DrawingArea:
# Extract particle data
# Scale polydisks so that they have unitary area
part_data = np.array(particle_attributes.split(" ")[2:-1], dtype=np.float).reshape(-1, 3) \
/ np.sqrt(np.float(particle_attributes.rpartition(" ")[2]))
if part_std_devs is not None:
std_devs_data = part_std_devs.reshape(-1, 3) / np.sqrt(np.float(particle_attributes.rpartition(" ")[2]))
# Draw particle
# Get polydisk drawing's width and height
if part_std_devs is None:
x_min = np.min(part_data[:, 0] - part_data[:, 2])
x_max = np.max(part_data[:, 0] + part_data[:, 2])
y_min = np.min(part_data[:, 1] - part_data[:, 2])
y_max = np.max(part_data[:, 1] + part_data[:, 2])
else:
x_min = np.min(np.concatenate((part_data[:, 0] - part_data[:, 2], part_data[:, 0] - std_devs_data[:, 0])))
x_max = np.max(np.concatenate((part_data[:, 0] + part_data[:, 2], part_data[:, 0] + std_devs_data[:, 0])))
y_min = np.min(np.concatenate((part_data[:, 1] - part_data[:, 2], part_data[:, 1] - std_devs_data[:, 1])))
y_max = np.max(np.concatenate((part_data[:, 1] + part_data[:, 2], part_data[:, 1] + std_devs_data[:, 1])))
# TODO Take into account also radii standard deviations
drawing_area = matplotlib.offsetbox.DrawingArea(scaling_factor * (x_max - x_min),
scaling_factor * (y_max - y_min),
scaling_factor * -x_min,
scaling_factor * -y_min)
for disk_args in part_data:
disk = matplotlib.patches.Circle((scaling_factor * disk_args[0], scaling_factor * disk_args[1]),
scaling_factor * disk_args[2],
color=color)
# transform=matplotlib.transforms.IdentityTransform() - same as transform=None, probably different
# than transform used when this argument is not passed.
drawing_area.add_artist(disk)
for disk_num, disk_args in enumerate(part_data):
if part_std_devs is None:
disk_label = matplotlib.text.Text(x=scaling_factor * disk_args[0], y=scaling_factor * disk_args[1],
text=str(disk_num),
horizontalalignment="center",
verticalalignment="center",
fontsize=11)
drawing_area.add_artist(disk_label)
else:
disk_label = matplotlib.text.Text(x=scaling_factor * disk_args[0] + scaling_factor * disk_args[2] / 2,
y=scaling_factor * disk_args[1] + scaling_factor * disk_args[2] / 2,
text=str(disk_num),
horizontalalignment="center",
verticalalignment="center",
fontsize=9)
drawing_area.add_artist(disk_label)
# test_arrow = matplotlib.patches.FancyArrow(0, 0, scaling_factor * 1, scaling_factor * 1)
# arrow_style = matplotlib.patches.ArrowStyle("simple", head_width=1.2)
# test_arrow = matplotlib.patches.FancyArrowPatch(
# (0, 0),
# (scaling_factor * 1 / np.sqrt(np.float(particle_attributes.rpartition(" ")[2])),
# 0),
# shrinkA=0,
# shrinkB=0)
# drawing_area.add_artist(test_arrow)
# test_arrow = matplotlib.patches.FancyArrowPatch(
# (0, 0),
# (scaling_factor * 1 / np.sqrt(np.float(particle_attributes.rpartition(" ")[2])) / np.sqrt(2),
# scaling_factor * 1 / np.sqrt(np.float(particle_attributes.rpartition(" ")[2])) / np.sqrt(2)),
# arrowstyle=arrow_style,
# shrinkA=0,
# shrinkB=0)
# # test_arrow = matplotlib.patches.ConnectionPatch((0, 0), (scaling_factor * 1, scaling_factor * 1))
# drawing_area.add_artist(test_arrow)
# arrow_style = matplotlib.patches.ArrowStyle("simple", head_width=1.2) # Causes a bug in matplotlib
# arrow_style = matplotlib.patches.ArrowStyle("->", head_width=0.8)
# Head lengths are not scaled and for small standard deviations heads are longer than arrow, so one
# solution is to make them not visible
# TODO Make arrows lengths correct while using arrows without heads
arrow_style = matplotlib.patches.ArrowStyle("->", head_length=0.)
center = (scaling_factor * disk_args[0], scaling_factor * disk_args[1])
ticks = [(center[0] + scaling_factor * std_devs_data[disk_num][0], center[1]),
(center[0] - scaling_factor * std_devs_data[disk_num][0], center[1]),
(center[0], center[1] + scaling_factor * std_devs_data[disk_num][1]),
(center[0], center[1] - scaling_factor * std_devs_data[disk_num][1])]
for tick in ticks:
std_dev_arrow = matplotlib.patches.FancyArrowPatch(
center,
tick,
arrowstyle=arrow_style,
shrinkA=0,
shrinkB=0)
drawing_area.add_artist(std_dev_arrow)
# TODO Take into account also radii standard deviations
return drawing_area
# TODO Make it a class method?
def wolfram_polydisk_area(self, disks_params: np.ndarray) -> float:
"""Calculate the area of a polydisk using Wolfram Kernel script"""
disks_arg = np.reshape(disks_params, (-1, 3))
wolfram_disks_list = ["Disk[{{{},{}}},{}]".format(*disk) for disk in disks_arg]
wolfram_disks_str = "{" + ",".join(wolfram_disks_list) + "}"
wolfram_code = "N[Area[Region[Apply[RegionUnion,{}]]]]".format(wolfram_disks_str)
# Worked wrong together with ThreadPool parallelization, even when using Python 3.8.1:
# wolfram_proc_arguments = [_wolfram_path, "-code", wolfram_code]
# area_str = subprocess.check_output(wolfram_proc_arguments, stderr=subprocess.STDOUT)
self.wolfram_polydisk_area_eval_num += 1
output_file_name = self.output_dir + "/wolfram-polydisk-area-" + str(self.wolfram_polydisk_area_eval_num)\
+ ".txt"
# TODO Check, if output file necessarily has to be created when using system with ">" redirection
with open(output_file_name, "w+") as output_file:
pass
# The solution below works correctly:
os.system(" ".join([_wolfram_path, "-code", "'" + wolfram_code + "'", ">", output_file_name]))
area_str = ""
with open(output_file_name, "r") as output_file:
for line in output_file:
area_str += line
os.remove(output_file_name)
return float(area_str)
@staticmethod
def wolframclient_polydisk_area(disks_params: np.ndarray) -> float:
"""Calculate the area of a polydisk using wolframclient"""
disks_arg = np.reshape(disks_params, (-1, 3))
wl_session = WolframLanguageSession()
w_disks = [wl.Disk([disk[0], disk[1]], disk[2]) for disk in disks_arg]
area = wl_session.evaluate(wl.N(wl.Area(wl.Region(wl.Apply(wl.RegionUnion, w_disks)))))
# A way to export image with polydisk to file:
# wl_session.evaluate(
# wl.Export("/path/to/file/polydisk.pdf",
# wl.Graphics([wl.Darker(wl.Green, 0.45), w_disks])))
wl_session.terminate()
return area
@staticmethod
def analytical_polydisk_area(disks_params: np.ndarray) -> float:
"""Calculate the area of a connected polydisk analytically"""
# TODO Check, if it works for 1-disk, 2-disk and 3-disk
accuracy = 10 ** -10
disks_arg = np.reshape(disks_params, (-1, 3))
disks = pd.DataFrame(data={"origin": list(disks_arg[:, [0, 1]]), "radius": disks_arg[:, 2]})
disks_number = disks.shape[0]
# Get the set of disks intersection points and discard the disks entirely contained in other disks
intersection_points = pd.DataFrame(index=pd.MultiIndex(levels=[[], [], []],
codes=[[], [], []],
names=["first_disk", "second_disk", "point_num"],
dtype=np.int),
columns=["point_x", "point_y"],
dtype=np.float)
inner_disks_indices = set()
for i in range(0, disks_number):
first_disk = disks.iloc[i]
for j in range(i + 1, disks_number):
second_disk = disks.iloc[j]
distance = np.linalg.norm(second_disk.at["origin"] - first_disk.at["origin"])
inclusion_distance = np.abs(first_disk.at["radius"] - second_disk.at["radius"])
disjointness_distance = first_disk.at["radius"] + second_disk.at["radius"]
# Check, if the disks intersect in two points or one disk is contained in the other
if inclusion_distance < distance < disjointness_distance:
# The disks intersect in two points
# Calculate intersection points
# See https://math.stackexchange.com/questions/256100/how-can-i-find-the-points-at-which-two-circles
# -intersect
middle_radical_point = (first_disk.at["origin"] + second_disk.at["origin"]) / 2 \
+ (np.power(first_disk.at["radius"], 2)
- np.power(second_disk.at["radius"], 2)) / (2 * np.power(distance, 2)) \
* (second_disk.at["origin"] - first_disk.at["origin"])
radical_vector = np.sqrt(2 * (np.power(first_disk.at["radius"], 2)
+ np.power(second_disk.at["radius"], 2)) / np.power(distance, 2)
- np.power(np.power(first_disk.at["radius"], 2)
- np.power(second_disk.at["radius"], 2), 2)
/ np.power(distance, 4) - 1) / 2 \
* np.array([second_disk.at["origin"][1] - first_disk.at["origin"][1],
first_disk.at["origin"][0] - second_disk.at["origin"][0]])
intersection_points.loc[(i, j, 0)] = middle_radical_point + radical_vector
intersection_points.loc[(i, j, 1)] = middle_radical_point - radical_vector
elif distance <= inclusion_distance:
# One disk is contained in the other
if first_disk.at["radius"] <= second_disk.at["radius"]:
inner_disks_indices.add(i)
else:
inner_disks_indices.add(j)
# def polygon_area(points_indices: Tuple[int]) -> float:
# pass
# def cap_area(...):
# pass
def disks_intersection_area(disks_indices: Tuple[int]) -> float:
"""Calculate the area of intersection of disks collection analytically"""
# Choose the "vertices" of the intersection region:
intersecting_disks_number = len(disks_indices)
if intersecting_disks_number == 1:
# Return the area of the disk:
radius = disks.iloc[disks_indices[0]].at["radius"]
return np.pi * radius * radius
intersection_points_indices = []
for i in range(0, intersecting_disks_number):
for j in range(i + 1, intersecting_disks_number):
# Check, if the disks intersect in two points:
if (disks_indices[i], disks_indices[j], 0) not in intersection_points.index:
continue
for k in [0, 1]:
point_index = (disks_indices[i], disks_indices[j], k)
# Check, if the point belongs to all of the intersecting disks:
inner_point = True
for disk_index in disks_indices:
if np.linalg.norm(intersection_points.loc[point_index].values
- disks.iloc[disk_index].at["origin"]) \
> disks.iloc[disk_index].at["radius"] + accuracy:
inner_point = False
break
# If it does, it is a "vertex" of the intersection region:
if inner_point:
intersection_points_indices.append(point_index)
# =============
# print(disks_indices)
# =============
# Check, if disks are disjoint:
if len(intersection_points_indices) == 0:
# =============
# print("Disjoint")
# print()
# =============
return 0
# Calculate the area of the intersection region:
# TODO Check, what to do in the cases when the intersection is an intersection of two disks or a disk
# TODO Implement this for unconnected polydisks
# intersection_points.loc[intersection_points_indices] - pd.DataFrame with intersection points
# Find the order in which the intersection points form a convex polygon
vertices = intersection_points.loc[intersection_points_indices]
# =============
# print(vertices)
# =============
center_point = vertices.mean()
vertices -= center_point
# =============
# if len(vertices) < 3:
# return 0
# =============
def to_polar_coordinates(point: np.ndarray) -> np.ndarray:
x, y = point
r = np.sqrt(x * x + y * y)
if r == 0:
t = 0
else:
angle = np.arccos(x / r)
if y >= 0:
t = angle
else:
t = 2 * np.pi - angle
return np.array([r, t])
vertices_polar = np.apply_along_axis(func1d=to_polar_coordinates, axis=1, arr=vertices)
sorted_vertices_indices = np.argsort(vertices_polar[:, 1])
vertices_polar = vertices_polar[sorted_vertices_indices]
# I assume that there are at least 3 vertices
# Intersection region is a sum of a convex polygon and "caps"
area: float = 0
vertices_number = len(intersection_points_indices)
phase = 0
for i in range(vertices_number):
j = i + 1
if j == vertices_number:
j = 0
phase = 2 * np.pi
# Add area of the triangle
triangle_area = vertices_polar[i, 0] * vertices_polar[j, 0] \
* np.sin(vertices_polar[j, 1] + phase - vertices_polar[i, 1]) / 2
area += triangle_area
# Find index of the disk to which the arc belongs to
first_int_point_index = intersection_points_indices[sorted_vertices_indices[i]]
second_int_point_index = intersection_points_indices[sorted_vertices_indices[j]]
if first_int_point_index[0] == second_int_point_index[0] \
or first_int_point_index[0] == second_int_point_index[1]:
if first_int_point_index[1] == second_int_point_index[1]:
disk_index = first_int_point_index[first_int_point_index[2]]
# (first_int_point_index[2] + 1) % 2
else:
disk_index = first_int_point_index[0]
else:
disk_index = first_int_point_index[1]
# ============
# print("Disk index: {}".format(disk_index))
# ============
# Calculate area of the "cap"
disk = disks.iloc[disk_index]
origin = disk.at["origin"]
radius = disk.at["radius"]
first_int_point = intersection_points.loc[first_int_point_index]
second_int_point = intersection_points.loc[second_int_point_index]
int_point_vectors_product = np.cross(first_int_point - origin, second_int_point - origin)
# ============
# print("First point index: {}".format(first_int_point_index))
# print("Second point index: {}".format(second_int_point_index))
# print("Vector product: {}".format(int_point_vectors_product))
# ============
smaller_part_of_disk = int_point_vectors_product >= 0
if not smaller_part_of_disk:
int_point_vectors_product = -int_point_vectors_product
# arc_area = np.arcsin(int_point_vectors_product / (radius * radius)) * radius * radius / 2
arc_area = np.arccos(np.dot(first_int_point - origin, second_int_point - origin) / (radius * radius)) \
* radius * radius / 2
cap_triangle_area = int_point_vectors_product / 2
small_cap_area: float = arc_area - cap_triangle_area
cap_area: float = small_cap_area if smaller_part_of_disk else np.pi * radius * radius - small_cap_area
# Add area of the "cap"
area += cap_area
# ============
# print("Triangle area: {}, arc area: {}, cap area: {}, partial intersection area: {}"
# .format(triangle_area, arc_area, cap_area, triangle_area + cap_area))
# ============
# ============
# print("Intersection area: {}".format(area))
# print()
# ============
return area
# Calculate the area of the polydisk using inclusion-exclusion principle
area: float = 0
all_disks_indices: Set[int] = set(range(disks_number))
disks_indices: Set[int] = all_disks_indices.difference(inner_disks_indices)
sign = 1
# See https://stackoverflow.com/questions/1482308/how-to-get-all-subsets-of-a-set-powerset ,
# https://docs.python.org/3/library/itertools.html#itertools-recipes
for n in range(1, len(disks_indices) + 1):
for disks_indices_subset in combinations(disks_indices, n):
area += sign * disks_intersection_area(disks_indices_subset)
sign *= -1
return area, intersection_points
# @abc.abstractmethod
# def get_arg_signature(self) -> str:
# return ""
@classmethod
def arg_to_particle_attributes(cls, arg: np.ndarray) -> str:
"""Function returning rsa3d program's parameter particleAttributes based on arg"""
coordinates_type, disks_params = cls.arg_to_polydisk_attributes(arg)
disks_num = disks_params.size // 3
# area = cls.wolfram_polydisk_area(disks_params)
area = cls.analytical_polydisk_area(disks_params)
particle_attributes_list = [str(disks_num), coordinates_type]
# TODO Maybe do it in a more simple way
particle_attributes_list.extend(disks_params.astype(np.unicode).tolist())
particle_attributes_list.append(str(area))
return " ".join(particle_attributes_list)
@opt_class("Frpd")
class FixedRadiiXYPolydiskRSACMAESOpt(PolydiskRSACMAESOpt):
"""
Class for performing CMA-ES optimization of packing fraction of RSA packings built of unions of disks
with unit radius. All disks centers' coordinates are free.
"""
default_rsa_parameters = dict(PolydiskRSACMAESOpt.default_rsa_parameters, # super().default_rsa_parameters,
**{"maxVoxels": "4000000",
"requestedAngularVoxelSize": "0.3",
"minDx": "0.0",
"from": "0",
"collectors": "5",
"split": "100000",
"boundaryConditions": "periodic"})
def get_arg_signature(self) -> str:
disks_num = self.initial_mean.size // 2
return "disks-" + str(disks_num) + "-initstds-" + str(self.initial_stddevs)
# TODO Check, if constructor has to be overwritten
@staticmethod
def get_initial_mean(opt_mode_args: dict) -> np.ndarray:
return initial_mean_fixed_radii_disks(opt_mode_args)
@classmethod
def arg_to_particle_parameters(cls, arg: np.ndarray) -> np.ndarray:
"""
Function returning polydisk's parameters in a numpy ndarray with c01 c02 r0 c11 c12 r1 ... floats (disks'
coordinates and radii)
"""
arg_with_radii = np.insert(arg, np.arange(2, arg.size + 1, 2), 1.)
return arg_with_radii
@classmethod
def arg_to_polydisk_attributes(cls, arg: np.ndarray) -> Tuple[str, np.ndarray]:
"""
Function returning part of Polydisk's particleAttributes in a tuple, which first element is \"xy\" or \"rt\"
string indicating type of coordinates and the second is a numpy ndarray with c01 c02 r0 c11 c12 r1 ... floats
(disks' coordinates and radii)
"""
arg_with_radii = cls.arg_to_particle_parameters(arg)
return "xy", arg_with_radii
@classmethod
def stddevs_to_particle_stddevs(cls, arg: np.ndarray, stddevs: np.ndarray, covariance_matrix: np.ndarray) \
-> np.ndarray:
stddevs_with_radii = np.insert(stddevs, np.arange(2, stddevs.size + 1, 2), 0.)
return stddevs_with_radii
@opt_class("Cfrpd")
class ConstrFixedRadiiXYPolydiskRSACMAESOpt(PolydiskRSACMAESOpt):
"""
Class for performing CMA-ES optimization of packing fraction of RSA packings built of unions of disks
with unit radius. The last disk is placed at (0, 0), the last but one at (x, 0) and others are free.
"""
default_rsa_parameters = dict(PolydiskRSACMAESOpt.default_rsa_parameters, # super().default_rsa_parameters,
**{"maxVoxels": "4000000",
"requestedAngularVoxelSize": "0.3",
"minDx": "0.0",
"from": "0",
"collectors": "5",
"split": "100000",
"boundaryConditions": "periodic"})
def get_arg_signature(self) -> str:
disks_num = (self.initial_mean.size - 1) // 2 + 2
return "disks-" + str(disks_num) + "-initstds-" + str(self.initial_stddevs)
# TODO Check, if constructor has to be overwritten
@staticmethod
def get_initial_mean(opt_mode_args: dict) -> np.ndarray:
return initial_mean_constr_fixed_radii_disks(opt_mode_args)
@classmethod
def arg_to_particle_parameters(cls, arg: np.ndarray) -> np.ndarray:
"""
Function returning polydisk's parameters in a numpy ndarray with c01 c02 r0 c11 c12 r1 ... floats (disks'
coordinates and radii)
"""
arg_with_standard_disks_radii = np.insert(arg, np.arange(2, arg.size, 2), 1.)
arg_with_all_disks = np.concatenate((arg_with_standard_disks_radii, np.array([0., 1., 0., 0., 1.])))
return arg_with_all_disks
@classmethod
def arg_to_polydisk_attributes(cls, arg: np.ndarray) -> Tuple[str, np.ndarray]:
"""
Function returning part of Polydisk's particleAttributes in a tuple, which first element is \"xy\" or \"rt\"
string indicating type of coordinates and the second is a numpy ndarray with c01 c02 r0 c11 c12 r1 ... floats
(disks' coordinates and radii)
"""
arg_with_all_disks = cls.arg_to_particle_parameters(arg)
return "xy", arg_with_all_disks
@classmethod
def stddevs_to_particle_stddevs(cls, arg: np.ndarray, stddevs: np.ndarray, covariance_matrix: np.ndarray) \
-> np.ndarray:
stddevs_with_standard_disks_radii = np.insert(stddevs, np.arange(2, stddevs.size, 2), 0.)
stddevs_with_all_disks = np.concatenate((stddevs_with_standard_disks_radii, np.zeros(5, dtype=np.float)))
return stddevs_with_all_disks
class PolygonRSACMAESOpt(RSACMAESOptimization, metaclass=abc.ABCMeta):
mode_rsa_parameters: dict = dict(RSACMAESOptimization.mode_rsa_parameters,
surfaceDimension="2", particleType="Polygon")
# string indicating type of coordinates, e.g. "xy" or "rt"
coordinates_type: str = None
@classmethod
@abc.abstractmethod
def select_vertices(cls, points: np.ndarray) -> np.ndarray:
"""
Function selecting polygon's vertices from points returned by the arg_to_points_coordinates method. Given
a NumPy ndarray of shape (n, 2) with points' coordinates, it returns indices of the subsequent polygon's
vertices in a NumPy ndarray of shape (n,) with integers
"""
pass
@classmethod
@abc.abstractmethod
def arg_to_points_coordinates(cls, arg: np.ndarray) -> np.ndarray:
"""
Function returning coordinates of points being candidates for becoming polygon's vertices in a NumPy ndarray of
shape (n, 2) with floats
"""
pass
@classmethod
@abc.abstractmethod
def points_coordinates_to_arg(cls, points: np.ndarray) -> np.ndarray:
"""
Inverse of the arg_to_points_coordinates function
"""
pass
@classmethod
def arg_to_particle_parameters(cls, arg: np.ndarray) -> np.ndarray:
"""
Function returning points' (not polygon's) parameters based on arg
"""
points_parameters = cls.arg_to_points_coordinates(arg).flatten()
return points_parameters
@classmethod
def arg_to_particle_attributes(cls, arg: np.ndarray) -> str:
"""Function returning rsa3d program's parameter particleAttributes based on arg"""
points_parameters = cls.arg_to_particle_parameters(arg)
points_coordinates = points_parameters.reshape(-1, 2)
vertices = points_coordinates[cls.select_vertices(points_coordinates)].flatten()
vertices_num = vertices.size // 2
particle_attributes_list = [str(vertices_num), cls.coordinates_type]
particle_attributes_list.extend(vertices.astype(np.unicode).tolist())
particle_attributes_list.append(str(vertices_num))
particle_attributes_list.extend(np.arange(vertices_num).astype(np.unicode).tolist())
return " ".join(particle_attributes_list)
@classmethod
def stddevs_to_points_stddevs(cls, arg: np.ndarray, stddevs: np.ndarray, covariance_matrix: np.ndarray) \
-> np.ndarray:
"""
Function returning standard deviations of points being candidates for becoming polygon's vertices in a form of
a NumPy ndarray of shape (n, 2) with floats representing points' standard deviations. By default it calculates
points' standard deviations by sampling.
"""
return super().stddevs_to_particle_stddevs(arg, stddevs, covariance_matrix).reshape(-1, 2)
@classmethod
def stddevs_to_particle_stddevs(cls, arg: np.ndarray, stddevs: np.ndarray, covariance_matrix: np.ndarray) \
-> np.ndarray:
points_stddevs = cls.stddevs_to_points_stddevs(arg, stddevs, covariance_matrix)
points_coordinates = cls.arg_to_points_coordinates(arg)
return points_stddevs[cls.select_vertices(points_coordinates)].flatten()
@classmethod
def draw_particle(cls, particle_attributes: str, scaling_factor: float, color: str,
arg: Optional[np.ndarray] = None, std_devs: Optional[np.ndarray] = None,
covariance_matrix: Optional[np.ndarray] = None, part_std_devs: Optional[np.ndarray] = None) \
-> matplotlib.offsetbox.DrawingArea:
# TODO Implement it
pass
class ConvexPolygonRSACMAESOpt(PolygonRSACMAESOpt, metaclass=abc.ABCMeta):
@classmethod
def select_vertices(cls, points: np.ndarray) -> np.ndarray:
if cls.coordinates_type != "xy":
# ConvexHull constructor requires Cartesian coordinates, so a conversion has to be made
conversions = {
"rt": lambda point: np.array([point[0] * np.cos(point[1]), point[0] * np.sin(point[1])])
}
if cls.coordinates_type in conversions:
# TODO Test this conversion
points = np.apply_along_axis(func1d=conversions[cls.coordinates_type], axis=1, arr=points)
else:
raise NotImplementedError("Conversion of {} coordinates into Cartesian coordinates is not implemented"
"yet.".format(cls.coordinates_type))
if np.all(points == points[0, :]):
# Degenerate case of initializing mean of the distribution with a sequence of equal points
# TODO Check, if this is the right thing to do
return np.array([0])
# TODO Maybe deal with other degenerate cases
convex_hull = ConvexHull(points)
return convex_hull.vertices
@classmethod
def swap_arg(cls, arg: np.ndarray) -> np.ndarray:
points = cls.arg_to_points_coordinates(arg)
if cls.coordinates_type != "xy":
conversions = {
"rt": lambda point: np.array([point[0] * np.cos(point[1]), point[0] * np.sin(point[1])])
}
if cls.coordinates_type in conversions:
points_xy = np.apply_along_axis(func1d=conversions[cls.coordinates_type], axis=1, arr=points)
else:
raise NotImplementedError("Conversion of {} coordinates into Cartesian coordinates is not implemented"
"yet.".format(cls.coordinates_type))
else:
points_xy = points
points_num = points_xy.shape[0]
vertices_indices = cls.select_vertices(points_xy)
vertices_num = vertices_indices.size
vertices = points_xy[vertices_indices]
center = np.mean(vertices, axis=0)
inner_points_indices = np.setdiff1d(np.arange(points_num), vertices_indices, assume_unique=True)
transformed_inner_points_xy = np.empty((0, 2), dtype=np.float)
for inner_point_index in inner_points_indices:
inner_point = points_xy[inner_point_index]
inner_point_vector = inner_point - center
i = 0
while i < vertices_num - 1:
if np.cross(vertices[i] - center, inner_point_vector) \
* np.cross(vertices[i + 1] - center, inner_point_vector) < 0:
break
i += 1
# TODO Handle case in which vertices[i + 1, 0] == vertices[i, 0]
side_slope = (vertices[i + 1, 1] - vertices[i, 1]) / (vertices[i + 1, 0] - vertices[i, 0])
transf_line_slope = inner_point_vector[1] / inner_point_vector[0]
transf_inner_point_x = (transf_line_slope * center[0] - center[1]
- side_slope * vertices[i, 0] + vertices[i, 1]) / (transf_line_slope - side_slope)
transf_inner_point_y = transf_line_slope * (transf_inner_point_x - center[0]) + center[1]
transformed_inner_point_xy = np.array([transf_inner_point_x, transf_inner_point_y])
transformed_inner_points_xy = np.vstack((transformed_inner_points_xy, transformed_inner_point_xy))
if cls.coordinates_type != "xy":
conversions = {
"rt": lambda point: np.array([np.sqrt(point[0] ** 2 + point[1] ** 2),
np.arctan(point[1] / point[0]) if point[0] != 0
else np.pi / 2 if point[1] > 0
else 3 * np.pi / 2 if point[1] < 0 else 0])
}
if cls.coordinates_type in conversions:
transformed_inner_points = np.apply_along_axis(func1d=conversions[cls.coordinates_type],
axis=1,
arr=transformed_inner_points_xy)
else:
raise NotImplementedError("Conversion of Cartesian coordinates into {} coordinates is not implemented"
"yet.".format(cls.coordinates_type))
else:
transformed_inner_points = transformed_inner_points_xy
points[inner_points_indices] = transformed_inner_points
swapped_arg = cls.points_coordinates_to_arg(points)
return swapped_arg
class StarShapedPolygonRSACMAESOpt(PolygonRSACMAESOpt, metaclass=abc.ABCMeta):
@classmethod
def select_vertices(cls, points: np.ndarray) -> np.ndarray:
points = np.copy(points) # NumPy arrays are passed by reference, so this prevents modifying passed object
if cls.coordinates_type != "xy":
# Polygonization algorithm converts coordinates to radial with respect to the mean (center of weight) of the
# points. A conversion is made to make calculations simple. In case of the radial coordinates ("rt"), the
# center point may be used as the reference, but then it might happen that it is placed outside the convex
# hull of the points, so the polygonization algorithm would not work. Apart from that, this point might not
# be near to the center of weight, which would cause that the algorithm would create a non-optimal (highly
# concave) polygon.
conversions = {
"rt": lambda point: np.array([point[0] * np.cos(point[1]), point[0] * np.sin(point[1])])
}
if cls.coordinates_type in conversions:
# TODO Test this conversion
points = np.apply_along_axis(func1d=conversions[cls.coordinates_type], axis=1, arr=points)
else:
raise NotImplementedError("Conversion of {} coordinates into Cartesian coordinates is not implemented"
"yet.".format(cls.coordinates_type))
if np.all(points == points[0, :]):
# Degenerate case of initializing mean of the distribution with a sequence of equal points
# TODO Check, if this is the right thing to do
return np.array([0])
# TODO Maybe deal with other degenerate cases
mean_point = np.mean(points, axis=0)
points -= mean_point
def to_polar_coordinates(point: np.ndarray) -> np.ndarray:
x, y = point
r = np.sqrt(x * x + y * y)
if r == 0:
t = 0
else:
angle = np.arccos(x / r)
if y >= 0:
t = angle
else:
t = 2 * np.pi - angle
return np.array([r, t])
points_polar = np.apply_along_axis(func1d=to_polar_coordinates, axis=1, arr=points)
sorted_indices = np.argsort(points_polar[:, 1])
points_polar = points_polar[sorted_indices]
# TODO Maybe test better the following management of exotic cases
# Remove duplicates (equal points)
i = 0
while i < sorted_indices.size - 1:
j = i + 1
while j < sorted_indices.size and points_polar[i, 1] == points_polar[j, 1]:
j += 1
j -= 1 # j is the index of the last point with the same t coordinate
if j > i:
# Some points have the same azimuthal angle
k = i
while k < j:
l = k + 1
while l <= j:
if points_polar[k, 0] == points_polar[l, 0]:
print("Warning: two points are equal, so one point is deleted")
sorted_indices = np.delete(arr=sorted_indices, obj=l)
points_polar = np.delete(arr=points_polar, obj=l, axis=0)
j -= 1
else:
l += 1
k += 1
i = j + 1
else:
i += 1
# Manage situations when multiple points have the same azimuthal angle
i = 0
while i < sorted_indices.size - 1:
if points_polar[i, 1] == points_polar[i + 1, 1]: # Maybe use np.isclose instead
print("Information: some points have the same azimuthal angle")
j = i + 2 # Finally j will be equal 1 + (the index of the last point with the same t coordinate)
while j < sorted_indices.size and points_polar[i, 1] == points_polar[j, 1]:
j += 1
if j - i > 2:
print("Warning: more than two points have the same azimuthal angle, so at least one point will not"
" be included in the polygonization, because then subsequent sides would be collinear")
radii = points_polar[i:j, 0]
min_radii_index = np.argmin(radii) + i
max_radii_index = np.argmax(radii) + i
if i == 0:
# From the point with the minimal r to the point with the maximal r (other option: compare somehow
# with the last point, but the last point may also have the same azimuthal angle as another point)
min_radii_point_index = sorted_indices[min_radii_index]
max_radii_point_index = sorted_indices[max_radii_index]
min_radii_point_polar = points_polar[min_radii_index]
max_radii_point_polar = points_polar[max_radii_index]
sorted_indices = np.delete(arr=sorted_indices, obj=np.s_[i:j])
points_polar = np.delete(arr=points_polar, obj=np.s_[i:j], axis=0)
sorted_indices = np.insert(arr=sorted_indices, obj=i, values=[min_radii_point_index,
max_radii_point_index])
points_polar = np.insert(arr=points_polar, obj=i, values=[min_radii_point_polar,
max_radii_point_polar], axis=0)
else:
# Choose a point from two points with minimal and maximal r that is closer to the previous point
# (with smaller t). Connect the previous point with this point, and this point with the other point
# of this two points
previous_point = points[sorted_indices[i - 1]]
indices = np.array([min_radii_index, max_radii_index])
# min_radii_point_index = sorted_indices[min_radii_index]
# max_radii_point_index = sorted_indices[max_radii_index]
# min_radii_point_polar = points_polar[min_radii_index]
# max_radii_point_polar = points_polar[max_radii_index]
# squared_distances = np.array([np.power(points[min_radii_point_index] - previous_point, 2),
# np.power(points[max_radii_point_index] - previous_point, 2)])
points_indices = sorted_indices[indices]
polars = points_polar[indices]
squared_distances = np.sum(np.power(points[points_indices] - previous_point, 2), axis=1)
sorted_indices = np.delete(arr=sorted_indices, obj=np.s_[i:j])
points_polar = np.delete(arr=points_polar, obj=np.s_[i:j], axis=0)
order = np.argsort(squared_distances)
sorted_indices = np.insert(arr=sorted_indices, obj=i, values=points_indices[order])
points_polar = np.insert(arr=points_polar, obj=i, values=polars[order], axis=0)
# The next point (i + 1)-th need not to be checked, because it must have different t than the (i + 2)-th
i += 2
else:
i += 1
if points_polar[sorted_indices.size - 1, 1] == points_polar[0, 1]:
print("Warning: all points are collinear")
# Can logger be set as a class attribute, so that it would be accessible in the class methods?
# Removing collinear points on the sides
i = 1
while i < sorted_indices.size - 1:
if np.cross(points[sorted_indices[i]] - points[sorted_indices[i - 1]],
points[sorted_indices[i + 1]] - points[sorted_indices[i]]) == 0:
print("Warning: two sides collinear, so one point is deleted")
sorted_indices = np.delete(arr=sorted_indices, obj=i)
else:
i += 1
if np.cross(points[sorted_indices[sorted_indices.size - 1]] - points[sorted_indices[sorted_indices.size - 2]],
points[sorted_indices[0]] - points[sorted_indices[sorted_indices.size - 1]]) == 0:
print("Warning: two sides collinear, so one point is deleted")
sorted_indices = np.delete(arr=sorted_indices, obj=sorted_indices.size - 1)
if np.cross(points[sorted_indices[0]] - points[sorted_indices[sorted_indices.size - 1]],
points[sorted_indices[1]] - points[sorted_indices[0]]) == 0:
print("Warning: two sides collinear, so one point is deleted")
sorted_indices = np.delete(arr=sorted_indices, obj=0)
return sorted_indices
class ConstrXYPolygonRSACMAESOpt(PolygonRSACMAESOpt, metaclass=abc.ABCMeta):
default_rsa_parameters = dict(PolygonRSACMAESOpt.default_rsa_parameters, # super().default_rsa_parameters,
**{"maxVoxels": "4000000",
"requestedAngularVoxelSize": "0.3",
"minDx": "0.0",
"from": "0",
"collectors": "5",
"split": "100000",
"boundaryConditions": "periodic"})
coordinates_type: str = "xy"
def get_arg_signature(self) -> str:
vertices_num = (self.initial_mean.size - 1) // 2 + 2
return "vertices-" + str(vertices_num) + "-initstds-" + str(self.initial_stddevs)
@classmethod
def arg_to_points_coordinates(cls, arg: np.ndarray) -> np.ndarray:
arg_with_all_coordinates = np.concatenate((arg, np.zeros(3))).reshape(-1, 2)
return arg_with_all_coordinates
@classmethod
def points_coordinates_to_arg(cls, points: np.ndarray) -> np.ndarray:
return points.flatten()[:-3]
@classmethod
def stddevs_to_points_stddevs(cls, arg: np.ndarray, stddevs: np.ndarray, covariance_matrix: np.ndarray) \
-> np.ndarray:
stddevs_with_all_coordinates = np.concatenate((stddevs, np.zeros(3))).reshape(-1, 2)
return stddevs_with_all_coordinates
class ConstrXYConvexPolygonRSACMAESOpt(ConstrXYPolygonRSACMAESOpt, ConvexPolygonRSACMAESOpt):
pass
class ConstrXYStarShapedPolygonRSACMAESOpt(ConstrXYPolygonRSACMAESOpt, StarShapedPolygonRSACMAESOpt):
pass
class UniformTPolygonRSACMAESOpt(PolygonRSACMAESOpt, metaclass=abc.ABCMeta):
min_radial_coordinate_optclattr: float = None
max_radial_coordinate_optclattr: float = None
rad_coord_trans_steepness_optclattr: float = None
coordinates_type: str = "rt"
def get_arg_signature(self) -> str:
return "vertices-" + str(self.initial_mean.size - 1) + "-initstds-" + str(self.initial_stddevs) \
+ "-inr-" + str(self.min_radial_coordinate_optclattr) \
+ "-outr-" + str(self.max_radial_coordinate_optclattr)
@classmethod
def select_vertices(cls, points: np.ndarray) -> np.ndarray:
return np.arange(points.shape[0])
@classmethod
def arg_to_points_coordinates(cls, arg: np.ndarray) -> np.ndarray:
radial_coordinates = logistic(arg,
cls.min_radial_coordinate_optclattr,
cls.max_radial_coordinate_optclattr,
cls.rad_coord_trans_steepness_optclattr)
azimuthal_coordinates = np.linspace(start=0, stop=2 * np.pi, num=arg.size, endpoint=False)
return np.stack((radial_coordinates, azimuthal_coordinates), axis=1)
@classmethod
def points_coordinates_to_arg(cls, points: np.ndarray) -> np.ndarray:
# TODO Implement it
pass
class RoundedPolygonRSACMAESOpt(PolygonRSACMAESOpt, metaclass=abc.ABCMeta):
mode_rsa_parameters: dict = dict(RSACMAESOptimization.mode_rsa_parameters,
particleType="RoundedPolygon")
@classmethod
@abc.abstractmethod
def arg_to_radius_and_polygon_arg(cls, arg: np.ndarray) -> Tuple[float, np.ndarray]:
"""
Function returning tuple containing radius of rounding of the polygon and the polygon argument based on the
rounded polygon argument
TODO Redact docstring
"""
pass
@classmethod
@abc.abstractmethod
def stddevs_to_radius_and_polygon_stddevs(cls, stddevs: np.ndarray) -> Tuple[float, np.ndarray]:
"""
Function returning tuple containing rounding radius' standard deviation and the polygon argument's standard
deviations based on the rounded polygon argument's standard deviations
TODO Redact docstring
"""
pass
@classmethod
def arg_to_particle_attributes(cls, arg: np.ndarray) -> str:
"""Function returning rsa3d program's parameter particleAttributes based on arg"""
radius, polygon_arg = cls.arg_to_radius_and_polygon_arg(arg)
polygon_particle_attributes = super().arg_to_particle_attributes(polygon_arg)
if issubclass(cls, ConvexPolygonRSACMAESOpt):
# If the polygon is convex, then don't pass its area
return str(radius) + " " + polygon_particle_attributes
# Extract particle data
particle_attributes_list = polygon_particle_attributes.split(" ")
vertices_num = int(particle_attributes_list[0])
coordinates_type = particle_attributes_list[1]
part_data = np.array(particle_attributes_list[2:2 + 2 * vertices_num],
dtype=np.float).reshape(-1, 2)
if coordinates_type != "xy":
conversions = {
"rt": lambda point: np.array([point[0] * np.cos(point[1]), point[0] * np.sin(point[1])])
}
if coordinates_type in conversions:
# TODO Test this conversion
part_data = np.apply_along_axis(func1d=conversions[coordinates_type], axis=1, arr=part_data)
else:
raise NotImplementedError("Conversion of {} coordinates into Cartesian coordinates is not implemented"
"yet.".format(coordinates_type))
if ConvexHull(part_data).vertices.size == vertices_num:
# If the polygon is convex, then don't pass its area
return str(radius) + " " + polygon_particle_attributes
else:
# Polygon is concave, calculate and pass its area
# Method of calculation valid for simple polygons
polygon = shapely.geometry.Polygon(shell=part_data)
rounded_polygon = polygon.buffer(distance=radius, resolution=10 ** 6)
# For the resolution of 10^6, the relative error for calculation of the unitary disk area approximately
# equals 4.0 * 10^-13 and the time of this calculation is a few seconds
area = rounded_polygon.area
return " ".join([str(radius), polygon_particle_attributes, str(area)])
@classmethod
def stddevs_to_particle_stddevs(cls, arg: np.ndarray, stddevs: np.ndarray, covariance_matrix: np.ndarray) \
-> np.ndarray:
radius_stddev, polygon_stddevs = cls.stddevs_to_radius_and_polygon_stddevs(stddevs)
polygon_arg = cls.arg_to_radius_and_polygon_arg(arg)[1]
polygon_covariance_matrix = covariance_matrix[1:, 1:] # TODO Maybe create a method for that
polygon_particle_stddevs = super().stddevs_to_particle_stddevs(polygon_arg,
polygon_stddevs,
polygon_covariance_matrix)
return np.insert(polygon_particle_stddevs, 0, radius_stddev)
@classmethod
def draw_particle(cls, particle_attributes: str, scaling_factor: float, color: str,
arg: Optional[np.ndarray] = None, std_devs: Optional[np.ndarray] = None,
covariance_matrix: Optional[np.ndarray] = None, part_std_devs: Optional[np.ndarray] = None) \
-> matplotlib.offsetbox.DrawingArea:
# Extract particle data
particle_attributes_list = particle_attributes.split(" ")
radius = float(particle_attributes_list[0])
vertices_num = int(particle_attributes_list[1])
coordinates_type = particle_attributes_list[2]
part_data = np.array(particle_attributes_list[3:3 + 2 * vertices_num],
dtype=np.float).reshape(-1, 2)
if coordinates_type != "xy":
conversions = {
"rt": lambda point: np.array([point[0] * np.cos(point[1]), point[0] * np.sin(point[1])])
}
if coordinates_type in conversions:
# TODO Test this conversion
part_data = np.apply_along_axis(func1d=conversions[coordinates_type], axis=1, arr=part_data)
else:
raise NotImplementedError("Conversion of {} coordinates into Cartesian coordinates is not implemented"
"yet.".format(coordinates_type))
if np.all(part_data == part_data[0, :]):
# Degenerate case of initializing mean of the distribution with a sequence of equal points
sqrt_area = np.sqrt(np.pi) * radius
disk_center = part_data[0, :] / sqrt_area
radius /= sqrt_area
if part_std_devs is None:
drawing_area = matplotlib.offsetbox.DrawingArea(scaling_factor * 2 * radius,
scaling_factor * 2 * radius,
scaling_factor * -(disk_center[0] - radius),
scaling_factor * -(disk_center[1] - radius))
else:
polygon_arg = cls.arg_to_radius_and_polygon_arg(arg)[1]
radius_std_dev, polygon_std_devs = cls.stddevs_to_radius_and_polygon_stddevs(std_devs)
polygon_covariance_matrix = covariance_matrix[1:, 1:] # TODO Maybe create a method for that
points_std_devs = cls.stddevs_to_points_stddevs(polygon_arg,
polygon_std_devs,
polygon_covariance_matrix)
# TODO t coordinate's (angle's) standard deviation should not be scaled
points_std_devs_data = points_std_devs.reshape(-1, 2) / sqrt_area
radius_std_dev /= sqrt_area
# TODO Maybe add drawing rounding radius' standard deviation
max_x = np.max(np.append(points_std_devs_data[:, 0], radius))
max_y = np.max(np.append(points_std_devs_data[:, 1], radius))
drawing_area = matplotlib.offsetbox.DrawingArea(scaling_factor * 2 * max_x,
scaling_factor * 2 * max_y,
scaling_factor * -(disk_center[0] - max_x),
scaling_factor * -(disk_center[1] - max_y))
disk = matplotlib.patches.Circle((scaling_factor * disk_center[0], scaling_factor * disk_center[1]),
scaling_factor * radius,
color=color)
drawing_area.add_artist(disk)
if part_std_devs is not None:
arrow_style = matplotlib.patches.ArrowStyle("|-|", widthA=0, widthB=1.0)
center = (scaling_factor * disk_center[0], scaling_factor * disk_center[1])
if coordinates_type == "xy":
for point_std_dev in points_std_devs_data:
ticks = [(center[0] + scaling_factor * point_std_dev[0], center[1]),
(center[0] - scaling_factor * point_std_dev[0], center[1]),
(center[0], center[1] + scaling_factor * point_std_dev[1]),
(center[0], center[1] - scaling_factor * point_std_dev[1])]
for tick in ticks:
std_dev_arrow = matplotlib.patches.FancyArrowPatch(
center,
tick,
arrowstyle=arrow_style,
shrinkA=0,
shrinkB=0)
drawing_area.add_artist(std_dev_arrow)
elif coordinates_type == "rt":
center_r = np.sqrt(center[0] * center[0] + center[1] * center[1])
for point_std_dev in points_std_devs_data:
arrow_r = (scaling_factor * point_std_dev[0] * center[0] / center_r,
scaling_factor * point_std_dev[0] * center[1] / center_r)
arrow_t = (scaling_factor * point_std_dev[1] * center[1] / center_r,
-scaling_factor * point_std_dev[1] * center[0] / center_r)
ticks = [(center[0] + arrow_r[0], center[1] + arrow_r[1]),
(center[0] - arrow_r[0], center[1] - arrow_r[1]),
(center[0] + arrow_t[0], center[1] + arrow_t[1]),
(center[0] - arrow_t[0], center[1] - arrow_t[1])]
for tick in ticks:
std_dev_arrow = matplotlib.patches.FancyArrowPatch(
center,
tick,
arrowstyle=arrow_style,
shrinkA=0,
shrinkB=0)
drawing_area.add_artist(std_dev_arrow)
return drawing_area
# Calculate particle area
if issubclass(cls, ConvexPolygonRSACMAESOpt) or ConvexHull(part_data).vertices.size == vertices_num:
# Method of calculation valid for convex polygons
# TODO Maybe add a method calculating particle's area
center_of_mass = np.mean(part_data, axis=0)
area = 0.
for vert_num in range(vertices_num):
prev_vert_num = vert_num - 1 if vert_num > 0 else vertices_num - 1
next_vert_num = (vert_num + 1) % vertices_num
area += np.abs(np.cross(part_data[vert_num] - center_of_mass,
part_data[next_vert_num] - center_of_mass)) / 2
first_segment_vec = part_data[prev_vert_num] - part_data[vert_num]
second_segment_vec = part_data[next_vert_num] - part_data[vert_num]
triangle_side_vec = part_data[next_vert_num] - part_data[prev_vert_num]
triangle_height = np.abs(np.cross(first_segment_vec, second_segment_vec)) \
/ np.linalg.norm(triangle_side_vec)
angle = np.arccos(triangle_height / np.linalg.norm(first_segment_vec)) \
+ np.arccos(triangle_height / np.linalg.norm(second_segment_vec))
area += radius * (np.linalg.norm(first_segment_vec) + radius * (np.pi - angle) / 2.)
else:
# Area is given in the particleAttributes parameter
area = float(particle_attributes_list[-1])
sqrt_area = np.sqrt(area)
part_data /= sqrt_area
radius /= sqrt_area
if arg is not None:
polygon_arg = cls.arg_to_radius_and_polygon_arg(arg)[1]
points_coordinates = cls.arg_to_points_coordinates(polygon_arg)
# coordinates_type is overwritten, although it should be the same
if coordinates_type != "xy":
conversions = {
"rt": lambda point: np.array([point[0] * np.cos(point[1]), point[0] * np.sin(point[1])])
}
if coordinates_type in conversions:
# TODO Test this conversion
points_coordinates = np.apply_along_axis(func1d=conversions[coordinates_type],
axis=1,
arr=points_coordinates)
else:
raise NotImplementedError("Conversion of {} coordinates into Cartesian coordinates is not"
"implemented yet.".format(coordinates_type))
points_data = points_coordinates.reshape(-1, 2) / sqrt_area
if part_std_devs is not None:
polygon_arg = cls.arg_to_radius_and_polygon_arg(arg)[1]
radius_std_dev, polygon_std_devs = cls.stddevs_to_radius_and_polygon_stddevs(std_devs)
polygon_covariance_matrix = covariance_matrix[1:, 1:] # TODO Maybe create a method for that
points_std_devs = cls.stddevs_to_points_stddevs(polygon_arg, polygon_std_devs, polygon_covariance_matrix)
points_std_devs_data = points_std_devs.reshape(-1, 2) / sqrt_area
# std_devs_data = part_std_devs.reshape(-1, 2) / sqrt_area
radius_std_dev /= sqrt_area
# TODO Maybe add drawing rounding radius' standard deviation
# Draw particle
# Get polygon drawing's width and height
if part_std_devs is None:
shown_points_data = points_data if arg is not None else part_data
x_min = np.min(shown_points_data[:, 0] - radius)
x_max = np.max(shown_points_data[:, 0] + radius)
y_min = np.min(shown_points_data[:, 1] - radius)
y_max = np.max(shown_points_data[:, 1] + radius)
else:
# If part_std_devs are given, arg and std_devs should also be given
if coordinates_type == "xy":
x_min = np.min(np.concatenate((points_data[:, 0] - radius,
points_data[:, 0] - points_std_devs_data[:, 0])))
x_max = np.max(np.concatenate((points_data[:, 0] + radius,
points_data[:, 0] + points_std_devs_data[:, 0])))
y_min = np.min(np.concatenate((points_data[:, 1] - radius,
points_data[:, 1] - points_std_devs_data[:, 1])))
y_max = np.max(np.concatenate((points_data[:, 1] + radius,
points_data[:, 1] + points_std_devs_data[:, 1])))
elif coordinates_type == "rt":
arrows_list = []
for point_num, point in enumerate(points_data):
point_r = np.sqrt(point[0] * point[0] + point[1] * point[1])
arrow_r = (points_std_devs_data[point_num][0] * point[0] / point_r,
points_std_devs_data[point_num][0] * point[1] / point_r)
arrow_t = (points_std_devs_data[point_num][1] * point[1] / point_r,
-points_std_devs_data[point_num][1] * point[0] / point_r)
ticks = [(point[0] + arrow_r[0], point[1] + arrow_r[1]),
(point[0] - arrow_r[0], point[1] - arrow_r[1]),
(point[0] + arrow_t[0], point[1] + arrow_t[1]),
(point[0] - arrow_t[0], point[1] - arrow_t[1])]
arrows_list.extend(ticks)
arrows = np.array(arrows_list)
x_min = np.min(np.concatenate((points_data[:, 0] - radius, arrows[:, 0])))
x_max = np.max(np.concatenate((points_data[:, 0] + radius, arrows[:, 0])))
y_min = np.min(np.concatenate((points_data[:, 1] - radius, arrows[:, 1])))
y_max = np.max(np.concatenate((points_data[:, 1] + radius, arrows[:, 1])))
drawing_area = matplotlib.offsetbox.DrawingArea(scaling_factor * (x_max - x_min),
scaling_factor * (y_max - y_min),
scaling_factor * -x_min,
scaling_factor * -y_min)
# TODO Check if the scale of the radius is correct - rather yes
# TODO Check why a strange artefact appeared
polygon = matplotlib.patches.Polygon(scaling_factor * part_data, linewidth=scaling_factor * 2 * radius,
joinstyle="round", capstyle="round", color=color)
drawing_area.add_artist(polygon)
if part_std_devs is None:
pass
# for point_num, point_args in enumerate(shown_points_data):
# is_vertex = np.any([np.allclose(point_args, vertex_args) for vertex_args in part_data])
# point_label = matplotlib.text.Text(x=scaling_factor * point_args[0],
# y=scaling_factor * point_args[1],
# text=str(point_num),
# horizontalalignment="center",
# verticalalignment="center",
# fontsize=11 if is_vertex else 9,
# fontweight="normal" if is_vertex else "bold")
# drawing_area.add_artist(point_label)
else:
if coordinates_type == "xy":
for point_num, point_args in enumerate(points_data):
# point_label = matplotlib.text.Text(x=scaling_factor * point_args[0] + scaling_factor / 10,
# y=scaling_factor * point_args[1] + scaling_factor / 10,
# text=str(point_num),
# horizontalalignment="center",
# verticalalignment="center",
# fontsize=9)
# drawing_area.add_artist(point_label)
# TODO Maybe add dots marking the positions of the points, especially the point(s) with 0 standard
# deviations
# arrow_style = matplotlib.patches.ArrowStyle("->", head_length=0.)
arrow_style = matplotlib.patches.ArrowStyle("|-|", widthA=0, widthB=1.0)
# arrow_style = matplotlib.patches.ArrowStyle("simple", head_width=1.2) # Causes a bug in matplotlib
# arrow_style = matplotlib.patches.ArrowStyle("->", head_width=0.8)
# Head lengths are not scaled and for small standard deviations heads are longer than arrow, so one
# solution is to make them not visible
# TODO Make arrows lengths correct while using arrows without heads
center = (scaling_factor * point_args[0], scaling_factor * point_args[1])
ticks = [(center[0] + scaling_factor * points_std_devs_data[point_num][0], center[1]),
(center[0] - scaling_factor * points_std_devs_data[point_num][0], center[1]),
(center[0], center[1] + scaling_factor * points_std_devs_data[point_num][1]),
(center[0], center[1] - scaling_factor * points_std_devs_data[point_num][1])]
for tick in ticks:
std_dev_arrow = matplotlib.patches.FancyArrowPatch(
center,
tick,
arrowstyle=arrow_style,
shrinkA=0,
shrinkB=0)
drawing_area.add_artist(std_dev_arrow)
elif coordinates_type == "rt":
# TODO Make arrows lengths correct while using arrows without heads
for point_num, point_args in enumerate(points_data):
# point_label = matplotlib.text.Text(x=scaling_factor * point_args[0] + scaling_factor / 10,
# y=scaling_factor * point_args[1] + scaling_factor / 10,
# text=str(point_num),
# horizontalalignment="center",
# verticalalignment="center",
# fontsize=9)
# drawing_area.add_artist(point_label)
# TODO Maybe add dots marking the positions of the points, especially the point(s) with 0 standard
# deviations
disk = matplotlib.patches.Circle(
(scaling_factor * point_args[0], scaling_factor * point_args[1]),
scaling_factor * 0.03,
color="k")
drawing_area.add_artist(disk)
arrow_style = matplotlib.patches.ArrowStyle("|-|", widthA=0, widthB=1.0)
center = (scaling_factor * point_args[0], scaling_factor * point_args[1])
for tick in scaling_factor * arrows[4 * point_num:4 * point_num + 4]:
std_dev_arrow = matplotlib.patches.FancyArrowPatch(
center,
tick,
arrowstyle=arrow_style,
shrinkA=0,
shrinkB=0)
drawing_area.add_artist(std_dev_arrow)
return drawing_area
class FixedRadiiRoundedPolygonRSACMAESOpt(RoundedPolygonRSACMAESOpt, metaclass=abc.ABCMeta):
@classmethod
def arg_to_radius_and_polygon_arg(cls, arg: np.ndarray) -> Tuple[float, np.ndarray]:
return 1, arg
@classmethod
def stddevs_to_radius_and_polygon_stddevs(cls, stddevs: np.ndarray) -> Tuple[float, np.ndarray]:
return 0, stddevs
class VariableRadiiRoundedPolygonRSACMAESOpt(RoundedPolygonRSACMAESOpt, metaclass=abc.ABCMeta):
@classmethod
def arg_to_radius_and_polygon_arg(cls, arg: np.ndarray) -> Tuple[float, np.ndarray]:
return softplus(arg[0]), arg[1:]
@classmethod
def stddevs_to_radius_and_polygon_stddevs(cls, stddevs: np.ndarray) -> Tuple[float, np.ndarray]:
# TODO Correct this transformation
return softplus(stddevs[0]), stddevs[1:]
@opt_class("Cfrcpg")
class ConstrXYFixedRadiiRoundedConvexPolygonRSACMAESOpt(FixedRadiiRoundedPolygonRSACMAESOpt,
ConstrXYConvexPolygonRSACMAESOpt):
@staticmethod
def get_initial_mean(opt_mode_args: dict) -> np.ndarray:
return initial_mean_constr_vertices(opt_mode_args)
@classmethod
def swap_arg(cls, arg: np.ndarray) -> np.ndarray:
radius, polygon_arg = cls.arg_to_radius_and_polygon_arg(arg)
swapped_polygon_arg = super().swap_arg(polygon_arg)
swapped_arg = np.insert(swapped_polygon_arg, 0, radius)
return swapped_arg
@opt_class("Cfrsspg")
class ConstrXYFixedRadiiRoundedStarShapedPolygonRSACMAESOpt(FixedRadiiRoundedPolygonRSACMAESOpt,
ConstrXYStarShapedPolygonRSACMAESOpt):
@staticmethod
def get_initial_mean(opt_mode_args: dict) -> np.ndarray:
return initial_mean_constr_vertices(opt_mode_args)
@opt_class("Cvrcpg")
class ConstrXYVariableRadiiRoundedConvexPolygonRSACMAESOpt(VariableRadiiRoundedPolygonRSACMAESOpt,
ConstrXYConvexPolygonRSACMAESOpt):
@staticmethod
def get_initial_mean(opt_mode_args: dict) -> np.ndarray:
return initial_mean_polygon_rounding(initial_mean_constr_vertices(opt_mode_args), opt_mode_args)
@classmethod
def swap_arg(cls, arg: np.ndarray) -> np.ndarray:
radius, polygon_arg = cls.arg_to_radius_and_polygon_arg(arg)
swapped_polygon_arg = super().swap_arg(polygon_arg)
swapped_arg = np.insert(swapped_polygon_arg, 0, radius)
return swapped_arg
@opt_class("Cvrsspg")
class ConstrXYVariableRadiiRoundedStarShapedPolygonRSACMAESOpt(VariableRadiiRoundedPolygonRSACMAESOpt,
ConstrXYStarShapedPolygonRSACMAESOpt):
@staticmethod
def get_initial_mean(opt_mode_args: dict) -> np.ndarray:
return initial_mean_polygon_rounding(initial_mean_constr_vertices(opt_mode_args), opt_mode_args)
@opt_class("Vrutpg")
class VariableRadiiRoundedUniformTPolygonRSACMAESOpt(VariableRadiiRoundedPolygonRSACMAESOpt,
UniformTPolygonRSACMAESOpt):
@staticmethod
def get_initial_mean(opt_mode_args: dict) -> np.ndarray:
return initial_mean_polygon_rounding(initial_mean_uniform_t_vertices(opt_mode_args), opt_mode_args)
def initial_mean_fixed_radii_disks(opt_mode_args: dict) -> np.ndarray:
return np.zeros(2 * opt_mode_args["disks_num"])
def initial_mean_constr_fixed_radii_disks(opt_mode_args: dict) -> np.ndarray:
return np.zeros(2 * opt_mode_args["disks_num"] - 3)
def initial_mean_constr_vertices(opt_mode_args: dict) -> np.ndarray:
if opt_mode_args["polygon_initial_mean"] == "origin":
return np.zeros(2 * opt_mode_args["vertices_num"] - 3)
elif opt_mode_args["polygon_initial_mean"] == "regular_polygon":
vertices_num = opt_mode_args["vertices_num"]
polygon_radius = opt_mode_args["initial_mean_params"]["polygon_radius"]
angles = np.pi * (3 / 2 - np.arange(start=3, stop=2 * vertices_num + 2, step=2) / vertices_num)
vertices_centered = np.apply_along_axis(func1d=lambda angle: np.array([polygon_radius * np.cos(angle),
polygon_radius * np.sin(angle)]),
axis=0,
arr=angles).T
shift_angle = np.pi * (1 / 2 - 1 / vertices_num)
vertices = vertices_centered + np.array([polygon_radius * np.cos(shift_angle),
polygon_radius * np.sin(shift_angle)])
return vertices.flatten()[:-3]
def initial_mean_uniform_t_vertices(opt_mode_args: dict) -> np.ndarray:
if opt_mode_args["polygon_initial_mean"] == "regular_polygon":
return np.full(shape=opt_mode_args["vertices_num"],
fill_value=opt_mode_args["initial_mean_params"]["polygon_radius"])
def initial_mean_polygon_rounding(polygon_initial_mean: np.ndarray, opt_mode_args: dict) -> np.ndarray:
return np.insert(polygon_initial_mean, 0, opt_mode_args["rounding_initial_mean"])
def load_optimization_input(file: str) -> dict:
with open(_input_dir + "/" + file, "r") as opt_input_file:
return yaml.load(opt_input_file)
@mod_arg_parser.command(parsers=["opt_input_file"])
def optimize(file: str) -> None:
"""Run optimization"""
optimization_input = load_optimization_input(file)
optimization = RSACMAESOptimization.create_optimization(optimization_input)
optimization.run()
@mod_arg_parser.argument("optimization_link",
help="file with optimization signature from ./input directory")
@mod_arg_parser.command("initializeopt", parsers=["opt_input_file"])
def initialize_optimization(file: str, optimization_link: str) -> None:
"""Initialize optimization"""
optimization_input = load_optimization_input(file)
optimization = RSACMAESOptimization.create_optimization(optimization_input)
optimization.pickle()
print("Optimization signature: {}".format(optimization.signature), file=optimization.stdout)
with open(_input_dir + "/" + optimization_link, "w+") as opt_signature_file:
opt_signature_file.write(optimization.signature)
# TODO Maybe do it in another way
@mod_arg_parser.argument("-c", "--config",
help="name of graph configuration YAML file from optimization directory")
@mod_arg_parser.command("plotcmaesoptdata", parsers=["opt_signature"])
def plot_cmaes_optimization_data(signature: str, config: Optional[str] = None) -> None:
"""Plot CMA-ES optimization data"""
opt_class_name = signature.split("-")[5]
# Get optimization class from current module.
# If the class is not in current module, module's name has to be passed as sys.modules dictionary's key,
# so such classes should put the module name to optimization signature.
opt_class = getattr(sys.modules[__name__], opt_class_name)
opt_class.set_optimization_class_attributes(signature=signature)
config_file_name = config if config is not None else "graph_config.yaml"
opt_class.plot_optimization_data(signature=signature, config_file_name=config_file_name)
# TODO Test and improve it
@mod_arg_parser.argument("-f", "--file",
help="YAML resume-optimization input file from ./input directory")
@mod_arg_parser.command("resumeoptimization", parsers=["opt_signature"])
def resume_optimization(signature: str, file: Optional[str] = None) -> None:
"""Resume optimization"""
opt_class_name = signature.rpartition("/")[2].split("-")[5]
# Get optimization class from current module.
# If the class is not in current module, module's name has to be passed as sys.modules dictionary's key,
# so such classes should put the module name to optimization signature. Such a class also needs to be explicitly
# imported before unpickling.
opt_class = getattr(sys.modules[__name__], opt_class_name)
# Optimization directory has to be prepared - e.g. by duplicating original optimization directory and adding
# "-restart-1" suffix at the end of the directory name, then (maybe it is necessary - check it) removing directories
# in outrsa subdirectory corresponding to simulations in interrupted generation, maybe also removing some entries in
# files in outcmaes subdirectory (check, if the call to self.CMAES.logger.add at the beginning of run method won't
# spoil anything (it will cause duplicated CMA-ES generation data records)).
# But many classes' attributes depend on optimization directory - that's why it is better currently to duplicate
# original optimization directory and add "-original" suffix at the end of the copied directory name and resume
# optimization in original directory
optimization = opt_class.unpickle(signature)
optimization.logger.info(msg="")
optimization.logger.info(msg="")
optimization.logger.info(msg="Resuming optimization")
interrupted_simulations_dirs = glob.glob(optimization.rsa_output_dir
+ "/{:03d}_*".format(optimization.CMAES.countiter))
if len(interrupted_simulations_dirs) > 0:
optimization.logger.info(msg="Moving interrupted generation simulations' directories {} to the unused"
" simulations folder".format(", ".join(map(os.path.basename,
interrupted_simulations_dirs))))
try:
unused_simulations_dir = optimization.rsa_output_dir + "/unused_simulations"
if not os.path.exists(unused_simulations_dir):
os.makedirs(unused_simulations_dir)
for directory in interrupted_simulations_dirs:
shutil.move(directory, unused_simulations_dir)
except Exception as exception:
optimization.logger.warning(msg="Exception raised when moving interrupted generation simulations'"
" directories; {}: {}\n{}".format(type(exception).__name__, exception,
traceback.format_exc(limit=6).strip()))
# TODO Deal with this error, check what would happen if the directories weren't (re)moved, maybe use another
# way to move directories
# Set optimization class attributes
optimization.set_optimization_class_attributes(optimization_input=optimization.optimization_input)
# Overwrite optimization options, if the file argument was given
if file is not None:
resume_signature = datetime.datetime.now().isoformat(timespec="milliseconds").replace(":", "-") \
.replace(".", "_")
resume_signature += "-optimization-resume-gen-{}".format(optimization.CMAES.countiter)
with open(_input_dir + "/" + file, "r") as opt_input_file:
resume_input = yaml.load(opt_input_file)
if resume_input is not None:
# TODO Test it
if "cma_options" in resume_input:
# After unpickling output is redirected to logger, so CMAEvolutionStrategy classes' errors and warnings
# as e.g. "UserWarning: key popsize ignored (not recognized as versatile) ..." will be logged
optimization.CMAES.opts.set(resume_input["cma_options"])
# optimization.cma_options is not updated
if "rsa_parameters" in resume_input:
for param in list(optimization.mode_rsa_parameters) + ["particleAttributes"]:
if param in resume_input["rsa_parameters"]:
del resume_input["rsa_parameters"][param]
optimization.logger.warning(msg="Resume RSA parameter {} ignored".format(param))
optimization.rsa_parameters.update(resume_input["rsa_parameters"])
if not optimization.input_given:
optimization.all_rsa_parameters.update(resume_input["rsa_parameters"])
for attr in ["accuracy", "parallel", "particle_attributes_parallel", "okeanos", "max_nodes_number",
"okeanos_parallel", "nodes_number", "collectors_per_task"]:
if attr in resume_input:
setattr(optimization, attr, resume_input[attr])
if "min_collectors_number" in resume_input:
optimization.min_collectors_number = max(resume_input["min_collectors_number"], 2)
if "threads" in resume_input:
optimization.parallel_threads_number = resume_input["threads"]
if any(attr in resume_input for attr in ["threads", "okeanos", "max_nodes_number"]):
if not optimization.okeanos:
optimization.parallel_simulations_number = min(optimization.parallel_threads_number,
optimization.CMAES.popsize)
else:
optimization.parallel_simulations_number = min(optimization.max_nodes_number - 1,
optimization.CMAES.popsize) \
if optimization.max_nodes_number is not None else optimization.CMAES.popsize
# TODO Set (and maybe check) other attributes, if needed
if ("rsa_parameters" in resume_input and len(resume_input["rsa_parameters"]) > 0) \
or "accuracy" in resume_input or "okeanos_parallel" in resume_input:
# All attributes that are used in optimization.rsa_proc_arguments have to be set already, if present
optimization.set_rsa_proc_arguments()
if "rsa_parameters" in resume_input and len(resume_input["rsa_parameters"]) > 0:
rsa_input_filename = optimization.output_dir + "/" + resume_signature + "-rsa-input.txt"
with open(rsa_input_filename, "w+") as rsa_input_file:
# TODO Maybe use resume_input["rsa_parameters"] instead
rsa_parameters = optimization.rsa_parameters if optimization.input_given \
else optimization.all_rsa_parameters
rsa_input_file.writelines(["{} = {}\n".format(param_name, param_value)
for param_name, param_value in rsa_parameters.items()])
optimization.logger.info(msg="Resume RSA input file: {}-rsa-input.txt".format(resume_signature))
# Generate used optimization input file in output directory
opt_input_filename = optimization.output_dir + "/" + resume_signature + "-input.yaml"
with open(opt_input_filename, "w+") as opt_input_file:
yaml.dump(resume_input, opt_input_file)
optimization.logger.info(msg="Optimization resume input file: {}-input.yaml".format(resume_signature))
# If optimization is to be run on Okeanos in parallel mode, try to set nodes_number attribute to the number of nodes
# actually allocated to the SLURM job, unless nodes_number was given in resume input file
if optimization.okeanos_parallel and (file is None or (resume_input is not None
and "nodes_number" not in resume_input)):
slurm_job_num_nodes = os.getenv("SLURM_JOB_NUM_NODES")
if slurm_job_num_nodes is not None:
optimization.nodes_number = int(slurm_job_num_nodes)
else:
optimization.logger.warning(msg="Unable to get number of nodes allocated to the job; SLURM_JOB_NUM_NODES"
" environment variable is not set")
if optimization.nodes_number is not None:
optimization.logger.warning(msg="Using the previously set value of the nodes_number attribute")
else:
optimization.logger.warning(msg="Setting the value of the nodes_number attribute to 1 + (population"
" size)")
optimization.nodes_number = 1 + optimization.CMAES.popsize
# Run optimization
optimization.run()
# TODO Maybe read optimization directories' and files' names from a YAML file and use pathlib module
# TODO Add managing errors of the rsa3d program and forcing recalculation (or max n recalculations) or resampling of
# the parameter point (maybe return None immediately after rsa_process.wait() in case of a failure) (old note)
# TODO Maybe prepare a Makefile like in https://docs.python-guide.org/writing/structure/ and with creating
# virtual environment (check how PyCharm creates virtualenvs)
# TODO Maybe other methods for plotting and visualization of the saved data
# TODO Maybe add a method calling rsa3d program in wolfram mode on specified .bin file
# - particleAttributes may be taken from rsa-simulation-input.txt file
# TODO Adding box constraints based on feasible anisotropy (probably)
# TODO Does writing to the single packing-fraction-vs-params.txt file by paralleled processes pose a problem?
# TODO Maybe decreasing packing fraction error in subsequent generations by accuracy mode
# or increasing the number of collectors. Maybe combine that with decreasing the population.
# TODO Algorithms for noisy optimization: UH-CMA-ES (cma.NoiseHandler)? DX-NES-UE?
# TODO Maybe single, big collectors and uncertainty handling (variable numbers of reevaluations,
# thus variable numbers of collectors - UH-CMA-ES)?
# TODO Does storing data affect performance?
# TODO Think, if demanding given accuracy (rsa3d accuracy mode) is the right thing to do
# TODO Check, if rsa3d options are well chosen (especially split) and wonder, if they should be somehow
# automatically adjusted during optimization
if __name__ == "__main__":
mod_arg_parser()
|
#!/usr/bin/env python
#
#
# Tutorial 10. Demonstration that the views are linked, but the
# data is not.
from tutorial9b import PlotFrame2
from enthought.chaco.tools.api import LineInspector
class PlotFrame3(PlotFrame2):
def _create_plot(self):
container = super(PlotFrame3, self)._create_plot()
self.left_plot.overlays.append(LineInspector(component=self.left_plot,
write_metadata=True, is_listener=True))
self.right_plot.overlays.append(LineInspector(component=self.right_plot,
write_metadata=True, is_listener=True))
return container
if __name__ == "__main__":
import wx
app = wx.PySimpleApp()
frame = PlotFrame3(None)
app.MainLoop()
|
#
# abc006 c
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """3 9"""
output = """1 1 1"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """7 23"""
output = """1 3 3"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """10 41"""
output = """-1 -1 -1"""
self.assertIO(input, output)
def resolve():
N, M = map(int, input().split())
if M < N*2 or N*4 < M:
print("-1 -1 -1")
else:
for z in range(N+1):
y = M - 2*N - 2*z
x = 3*N + z - M
if x < 0 or N < x or y < 0 or N < y:
continue
print(x, y, z)
break
if __name__ == "__main__":
# unittest.main()
resolve()
|
"""Iterable functions"""
import typing
def it_erable(val: typing.Any) -> bool:
"""Determine if `val` is a non-string iterable
Arguments:
val {typing.Any} -- any value
Returns:
bool -- value is nont-string iterable
"""
return not isinstance(val, str) and hasattr(val, '__iter__')
def it_erate(val: typing.Any) -> typing.Tuple[typing.Any]:
"""Cast `val` as an iterable
Arguments:
val {typing.Any} -- any value
Returns:
typing.Iterable -- value as iterable
"""
return val if it_erable(val) else (val, )
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that is used to support Cloud Pub/Sub commands."""
import abc
import re
from googlecloudsdk.api_lib.util import exceptions as sdk_ex
from googlecloudsdk.core import properties
from googlecloudsdk.core.resource import resource_projector
# Maximum number of results that can be passed in pageSize to list operations.
MAX_LIST_RESULTS = 10000
# Regular expression to match full paths for Cloud Pub/Sub resource identifiers.
# TODO(user): These are going away, since we are moving to
# collection paths in CL/125390647.
PROJECT_PATH_RE = re.compile(r'^projects/(?P<Project>[^/]+)$')
SNAPSHOTS_PATH_RE = re.compile(
r'^projects/(?P<Project>[^/]+)/snapshots/(?P<Resource>[^/]+)$')
SUBSCRIPTIONS_PATH_RE = re.compile(
r'^projects/(?P<Project>[^/]+)/subscriptions/(?P<Resource>[^/]+)$')
TOPICS_PATH_RE = re.compile(
r'^projects/(?P<Project>[^/]+)/topics/(?P<Resource>[^/]+)$')
# Collection for various subcommands.
TOPICS_COLLECTION = 'pubsub.projects.topics'
TOPICS_PUBLISH_COLLECTION = 'pubsub.topics.publish'
SNAPSHOTS_COLLECTION = 'pubsub.projects.snapshots'
SNAPSHOTS_LIST_COLLECTION = 'pubsub.snapshots.list'
SUBSCRIPTIONS_COLLECTION = 'pubsub.projects.subscriptions'
SUBSCRIPTIONS_ACK_COLLECTION = 'pubsub.subscriptions.ack'
SUBSCRIPTIONS_LIST_COLLECTION = 'pubsub.subscriptions.list'
SUBSCRIPTIONS_MOD_ACK_COLLECTION = 'pubsub.subscriptions.mod_ack'
SUBSCRIPTIONS_MOD_CONFIG_COLLECTION = 'pubsub.subscriptions.mod_config'
SUBSCRIPTIONS_PULL_COLLECTION = 'pubsub.subscriptions.pull'
SUBSCRIPTIONS_SEEK_COLLECTION = 'pubsub.subscriptions.seek'
# TODO(b/32275946): Use core.resources.Resource instead of this custom class.
class ResourceIdentifier(object):
"""Base class to build resource identifiers."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def _RegexMatch(self, resource_path):
"""Return a match object from applying a regexp to this resource identifier.
This function needs to be overriden in subclasses to use the appropriate
regular expression for a resource identifier type (subscriptions, topics).
Args:
resource_path: (string) Full (ie. projects/my-proj/topics/my-topic)
or partial (my-topic) project or resource path.
"""
pass
@abc.abstractmethod
def _ResourceType(self):
"""Returns the valid resource identifier type for this instance.
This function needs to be overriden in subclasses to return a valid
resource identifier type (subscriptions, topics, or snapshots).
"""
pass
def __init__(self, *args, **kwargs):
self.Parse(*args, **kwargs)
def Parse(self, resource_path, project_path=''):
"""Initializes a new ResourceIdentifier.
Args:
resource_path: (string) Full (e.g., projects/my-proj/topics/my-topic)
or partial (my-topic) resource path.
project_path: (string) Full (projects/my-project) or
partial (my-project) project path.
If empty, the SDK environment default
(gcloud config set project) will be used.
Returns:
A ResourceIdentifier instance that captures the subcomponents of the
resource identifier.
Raises:
HttpException if the provided resource path is not a valid resource
path/name.
"""
if '/' in resource_path:
match = self._RegexMatch(resource_path)
if match is None:
raise sdk_ex.HttpException(
'Invalid {0} Identifier'.format(self._ResourceType().capitalize()))
self.project = ProjectIdentifier(match.groupdict()['Project'])
self.resource_name = match.groupdict()['Resource']
return
self.project = ProjectIdentifier(project_path)
self.resource_name = resource_path
def GetFullPath(self):
return '{0}/{1}/{2}'.format(self.project.GetFullPath(),
self._ResourceType(),
self.resource_name)
class ProjectIdentifier(ResourceIdentifier):
"""Represents a Cloud project identifier."""
def Parse(self, project_path=''):
"""Initializes a new ProjectIdentifier.
Args:
project_path: (string) Full (projects/my-proj) or partial (my-proj)
project path.
If empty, the SDK environment default
(gcloud config set project) will be used.
Returns:
An instantiated ProjectIdentifier with correct project information.
Raises:
HttpException if the provided project path is not a valid project
path/name or if a default project have not been set.
"""
if not project_path:
self.project_name = properties.VALUES.core.project.Get(required=True)
return
if '/' in project_path:
match = self._RegexMatch(project_path)
if match is None:
raise sdk_ex.HttpException('Invalid Project Identifier')
self.project_name = match.groupdict()['Project']
return
self.project_name = project_path
def _ResourceType(self):
return 'projects'
def _RegexMatch(self, resource_path):
return PROJECT_PATH_RE.match(resource_path)
def GetFullPath(self):
"""Returns a valid full project path."""
return '{0}/{1}'.format(self._ResourceType(), self.project_name)
class SnapshotIdentifier(ResourceIdentifier):
"""Represents a Cloud Pub/Sub snapshot identifier."""
def _RegexMatch(self, resource_path):
return SNAPSHOTS_PATH_RE.match(resource_path)
def _ResourceType(self):
return 'snapshots'
class SubscriptionIdentifier(ResourceIdentifier):
"""Represents a Cloud Pub/Sub subscription identifier."""
def _RegexMatch(self, resource_path):
return SUBSCRIPTIONS_PATH_RE.match(resource_path)
def _ResourceType(self):
return 'subscriptions'
class TopicIdentifier(ResourceIdentifier):
"""Represents a Cloud Pub/Sub topic identifier."""
def _RegexMatch(self, resource_path):
return TOPICS_PATH_RE.match(resource_path)
def _ResourceType(self):
return 'topics'
def ProjectFormat(project_name=''):
return ProjectIdentifier(project_name).GetFullPath()
def TopicFormat(topic_name, topic_project=''):
"""Formats a topic name as a fully qualified topic path.
Args:
topic_name: (string) Name of the topic to convert.
topic_project: (string) Name of the project the given topic belongs to.
If not given, then the project defaults to the currently
selected cloud project.
Returns:
Returns a fully qualified topic path of the
form project/foo/topics/topic_name.
"""
return TopicIdentifier(topic_name, topic_project).GetFullPath()
def SubscriptionFormat(subscription_name, project_name=''):
"""Formats a subscription name as a fully qualified subscription path.
Args:
subscription_name: (string) Name of the subscription to convert.
project_name: (string) Name of the project the given subscription belongs
to. If not given, then the project defaults to the currently
selected cloud project.
Returns:
Returns a fully qualified subscription path of the
form project/foo/subscriptions/subscription_name.
"""
return SubscriptionIdentifier(subscription_name, project_name).GetFullPath()
def SnapshotFormat(snapshot_name, project_name=''):
"""Formats a snapshot name as a fully qualified snapshot path.
Args:
snapshot_name: (string) Name of the snapshot to convert.
project_name: (string) Name of the project the given snapshot belongs
to. If not given, then the project defaults to the currently
selected cloud project.
Returns:
Returns a fully qualified snapshot path of the form
project/foo/snapshots/snapshot_name.
"""
return SnapshotIdentifier(snapshot_name, project_name).GetFullPath()
# TODO(b/32276674): Remove the use of custom *DisplayDict's.
def TopicDisplayDict(topic, error_msg=''):
"""Creates a serializable from a Cloud Pub/Sub Topic operation for display.
Args:
topic: (Cloud Pub/Sub Topic) Topic to be serialized.
error_msg: (string) An error message to be added to the serialized
result, if any.
Returns:
A serialized object representing a Cloud Pub/Sub Topic
operation (create, delete).
"""
topic_display_dict = resource_projector.MakeSerializable(topic)
topic_display_dict['topicId'] = topic.name
topic_display_dict['success'] = not error_msg
topic_display_dict['reason'] = error_msg or ''
del topic_display_dict['name']
return topic_display_dict
def SubscriptionDisplayDict(subscription, error_msg=''):
"""Creates a serializable from a Cloud Pub/Sub Subscription op for display.
Args:
subscription: (Cloud Pub/Sub Subscription) Subscription to be serialized.
error_msg: (string) An error message to be added to the serialized
result, if any.
Returns:
A serialized object representing a Cloud Pub/Sub Subscription
operation (create, delete, update).
"""
push_endpoint = ''
subscription_type = 'pull'
if subscription.pushConfig:
if subscription.pushConfig.pushEndpoint:
push_endpoint = subscription.pushConfig.pushEndpoint
subscription_type = 'push'
return {
'subscriptionId': subscription.name,
'topic': subscription.topic,
'type': subscription_type,
'pushEndpoint': push_endpoint,
'ackDeadlineSeconds': subscription.ackDeadlineSeconds,
'retainAckedMessages': bool(subscription.retainAckedMessages),
'messageRetentionDuration': subscription.messageRetentionDuration,
'success': not error_msg,
'reason': error_msg or '',
}
def SnapshotDisplayDict(snapshot, error_msg=''):
"""Creates a serializable from a Cloud Pub/Sub Snapshot operation for display.
Args:
snapshot: (Cloud Pub/Sub Snapshot) Snapshot to be serialized.
error_msg: (string) An error message to be added to the serialized
result, if any.
Returns:
A serialized object representing a Cloud Pub/Sub Snapshot operation (create,
delete).
"""
return {
'snapshotId': snapshot.name,
'topic': snapshot.topic,
'expirationTime': snapshot.expirationTime,
'success': not error_msg,
'reason': error_msg or '',
}
|
# -*- coding: utf-8 -*-
'''
Created on 2020.05.19
@author: Jiahua Rao, Weiming Li, Hui Yang, Jiancong Xie
--no_parallel
'''
import os
import torch
import warnings
warnings.filterwarnings("ignore")
from pathlib import Path
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', dest='config_file')
parser.add_argument('--experiment', dest='experiment', default='endtoend')
parser.add_argument('--result_folder', dest='result_folder', default='MolRep/Outputs/')
parser.add_argument('--dataset_name', dest='dataset_name', default='none')
parser.add_argument('--outer_folds', dest='outer_folds', default=10)
parser.add_argument('--outer_processes', dest='outer_processes', type=int, default=3)
parser.add_argument('--inner_folds', dest='inner_folds', default=5)
parser.add_argument('--inner_processes', dest='inner_processes', type=int, default=1)
parser.add_argument('--test_size', dest='test_size', default=0.1)
parser.add_argument('--validation_size', dest='validation_size', default=0.1)
parser.add_argument('--no_parallel', action="store_true", dest='no_parallel')
parser.add_argument('--dataset_path', dest='dataset_path', default=None)
parser.add_argument('--smiles_column', dest='smiles_column', default=None)
parser.add_argument('--target_columns', dest='target_columns', default=[])
parser.add_argument('--task_type', dest='task_type', default=None)
parser.add_argument('--metric_type', dest='metric_type', default=None)
parser.add_argument('--split_type', dest='split_type', default=None)
args = parser.parse_args()
config_file = args.config_file
dataset_name = args.dataset_name
experiment = args.experiment
if args.dataset_name in ['QM7b', 'QM8', 'QM9', 'ESOL', 'FreeSolv', 'Lipophilicity', 'PCBA', 'MUV', \
'HIV', 'PDBbind', 'BACE', 'BBBP', 'Tox21', 'SIDER', 'ClinTox']:
endtoend(config_file, dataset_name,
outer_k=args.outer_folds, outer_processes=args.outer_processes,
inner_k=args.inner_folds, inner_processes=args.inner_processes,
test_size=args.test_size, validation_size=args.validation_size,
output_dir=args.result_folder, no_parallel=args.no_parallel)
else:
data_dict = {
'name': args.dataset_name,
'path': args.dataset_path,
'smiles_column': args.smiles_column,
'target_columns': args.target_columns,
'task_type': args.task_type,
'metric_type': args.metric_type,
'split_type': args.split_type
}
endtoend(config_file, dataset_name,
outer_k=int(args.outer_folds), outer_processes=int(args.outer_processes),
inner_k=int(args.inner_folds), inner_processes=int(args.inner_processes),
output_dir=args.result_folder, no_parallel=args.no_parallel, data_stats=data_dict)
|
# -*- coding: utf-8 -*-
from sys import version_info
def get_encodings():
encoding_list = [
['ascii', '646, us-ascii', 'English'],
['big5', 'big5-tw, csbig5', 'Traditional Chinese'],
['big5hkscs', 'big5-hkscs, hkscs', 'Traditional Chinese'],
['cp037', 'IBM037, IBM039', 'English'],
['cp273', '273, IBM273, csIBM273', 'German'],
['cp424', 'EBCDIC-CP-HE, IBM424', 'Hebrew'],
['cp437', '437, IBM437', 'English'],
['cp500', 'EBCDIC-CP-BE, EBCDIC-CP-CH, IBM500', 'Western Europe'],
['cp720', '', 'Arabic'],
['cp737', '', 'Greek'],
['cp775', 'IBM775', 'Baltic languages'],
['cp850', '850, IBM850', 'Western Europe'],
['cp852', '852, IBM852', 'Central and Eastern Europe'],
['cp855', '855, IBM855', 'Bulgarian, Byelorussian, Macedonian, Russian, Serbian'],
['cp856', '', 'Hebrew'],
['cp857', '857, IBM857', 'Turkish'],
['cp858', '858, IBM858', 'Western Europe'],
['cp860', '860, IBM860', 'Portuguese'],
['cp861', '861, CP-IS, IBM861', 'Icelandic'],
['cp862', '862, IBM862', 'Hebrew'],
['cp863', '863, IBM863', 'Canadian'],
['cp864', 'IBM864', 'Arabic'],
['cp865', '865, IBM865', 'Danish, Norwegian'],
['cp866', '866, IBM866', 'Russian'],
['cp869', '869, CP-GR, IBM869', 'Greek'],
['cp874', '', 'Thai'],
['cp875', '', 'Greek'],
['cp932', '932, ms932, mskanji, ms-kanji', 'Japanese'],
['cp949', '949, ms949, uhc', 'Korean'],
['cp950', '950, ms950', 'Traditional Chinese'],
['cp1006', '', 'Urdu'],
['cp1026', 'ibm1026', 'Turkish'],
['cp1125', '1125, ibm1125, cp866u, ruscii', 'Ukrainian'],
['cp1140', 'ibm1140', 'Western Europe'],
['cp1250', 'windows-1250', 'Central and Eastern Europe'],
['cp1251', 'windows-1251', 'Bulgarian, Byelorussian, Macedonian, Russian, Serbian'],
['cp1252', 'windows-1252', 'Western Europe'],
['cp1253', 'windows-1253', 'Greek'],
['cp1254', 'windows-1254', 'Turkish'],
['cp1255', 'windows-1255', 'Hebrew'],
['cp1256', 'windows-1256', 'Arabic'],
['cp1257', 'windows-1257', 'Baltic languages'],
['cp1258', 'windows-1258', 'Vietnamese'],
['cp65001', 'Windows UTF-8', 'Windows only'],
['euc_jp', 'eucjp, ujis, u-jis', 'Japanese'],
['euc_jis_2004', 'jisx0213, eucjis2004', 'Japanese'],
['euc_jisx0213', 'eucjisx0213', 'Japanese'],
['euc_kr', 'euckr, korean, ksc5601, ks_c-5601, ks_c-5601-1987, ksx1001, ks_x-1001', 'Korean'],
['gb2312', 'chinese, csiso58gb231280, euc-cn, euccn, eucgb2312-cn, gb2312-1980, gb2312-80, iso-ir-58', 'Simplified Chinese'],
['gbk', '936, cp936, ms936', 'Unified Chinese'],
['gb18030', 'gb18030-2000', 'Unified Chinese'],
['hz', 'hzgb, hz-gb, hz-gb-2312', 'Simplified Chinese'],
['iso2022_jp', 'csiso2022jp, iso2022jp, iso-2022-jp', 'Japanese'],
['iso2022_jp_1', 'iso2022jp-1, iso-2022-jp-1', 'Japanese'],
['iso2022_jp_2', 'iso2022jp-2, iso-2022-jp-2', 'Japanese, Korean, Simplified Chinese, Western Europe, Greek'],
['iso2022_jp_2004', 'iso2022jp-2004, iso-2022-jp-2004', 'Japanese'],
['iso2022_jp_3', 'iso2022jp-3, iso-2022-jp-3', 'Japanese'],
['iso2022_jp_ext', 'iso2022jp-ext, iso-2022-jp-ext', 'Japanese'],
['iso2022_kr', 'csiso2022kr, iso2022kr, iso-2022-kr', 'Korean'],
['latin', 'iso-8859-1, iso8859-1, 8859, cp819, latin_1, latin1, L1', 'West Europe'],
['iso8859-2', 'iso-8859-2, latin2, L2', 'Central and Eastern Europe'],
['iso8859-3', 'iso-8859-3, latin3, L3', 'Esperanto, Maltese'],
['iso8859-4', 'iso-8859-4, latin4, L4', 'Baltic languages'],
['iso8859-5', 'iso-8859-5, cyrillic', 'Bulgarian, Byelorussian, Macedonian, Russian, Serbian'],
['iso8859-6', 'iso-8859-6, arabic', 'Arabic'],
['iso8859-7', 'iso-8859-7, greek, greek8', 'Greek'],
['iso8859-8', 'iso-8859-8, hebrew', 'Hebrew'],
['iso8859-9', 'iso-8859-9, latin5, L5', 'Turkish'],
['iso8859-10', 'iso-8859-10, latin6, L6', 'Nordic languages'],
['iso8859-11', 'iso-8859-11, thai', 'Thai languages'],
['iso8859-13', 'iso-8859-13, latin7, L7', 'Baltic languages'],
['iso8859-14', 'iso-8859-14, latin8, L8', 'Celtic languages'],
['iso8859-15', 'iso-8859-15, latin9, L9', 'Western Europe'],
['iso8859-16', 'iso-8859-16, latin10, L10', 'South-Eastern Europe'],
['johab', 'cp1361, ms1361', 'Korean'],
['koi8_r', '', 'Russian'],
['koi8_t', '', 'Tajik'],
['koi8_u', '', 'Ukrainian'],
['kz1048', 'kz_1048, strk1048_2002, rk1048', 'Kazakh'],
['mac_cyrillic', 'maccyrillic', 'Bulgarian, Byelorussian, Macedonian, Russian, Serbian'],
['mac_greek', 'macgreek', 'Greek'],
['mac_iceland', 'maciceland', 'Icelandic'],
['mac_latin2', 'maclatin2, maccentraleurope', 'Central and Eastern Europe'],
['mac_roman', 'macroman, macintosh', 'Western Europe'],
['mac_turkish', 'macturkish', 'Turkish'],
['ptcp154', 'csptcp154, pt154, cp154, cyrillic-asian', 'Kazakh'],
['shift_jis', 'csshiftjis, shiftjis, sjis, s_jis', 'Japanese'],
['shift_jis_2004', 'shiftjis2004, sjis_2004, sjis2004', 'Japanese'],
['shift_jisx0213', 'shiftjisx0213, sjisx0213, s_jisx0213', 'Japanese'],
['utf_32', 'U32, utf32', 'All languages'],
['utf_32_be', 'UTF-32BE', 'All languages'],
['utf_32_le', 'UTF-32LE', 'All languages'],
['utf_16', 'U16, utf16', 'All languages'],
['utf_16_be', 'UTF-16BE', 'All languages'],
['utf_16_le', 'UTF-16LE', 'All languages'],
['utf_7', 'utf-7, U7, unicode-1-1-utf-7', 'All languages'],
['utf-8', 'U8, UTF, utf_8', 'All languages'],
['utf_8_sig', '', 'All languages'],
]
if version_info < (3, 0):
for del_it in ('cp273', 'cp1125', 'cp65001', 'koi8_t', 'kz1048'):
for item in encoding_list:
if item[0] == del_it:
encoding_list.remove(item)
break
return encoding_list
|
import draw
import argparse
import math
import copy
import sys
# a dictionary specifying required codeword counts. Ordered by version:correction:codewords
ERROR_CORRECTION_DICT = {
1:{'L':19,'M':16,'Q':13,'H':9},
2:{'L':34, 'M':28, 'Q':22, 'H':16},
3:{'L':55, 'M':44, 'Q':34, 'H':26},
4:{'L':80, 'M':64, 'Q':48, 'H':36},
5:{'L':108, 'M':86, 'Q':62, 'H':46},
6:{'L':136, 'M':108, 'Q':76, 'H':60},
7:{'L':156, 'M':124, 'Q':88, 'H':66},
8:{'L':194, 'M':154, 'Q':110, 'H':86},
9:{'L':232, 'M':182, 'Q':132, 'H':100},
10:{'L':274, 'M':216, 'Q':154, 'H':122}
}
# maximum character counts are stored as a dict
# MAX_CHAR_COUNTS[error_level][version-1]
MAX_CHAR_COUNTS = {
'L': [25, 47, 77, 114, 154,195, 224, 279, 335, 395],
'M': [20, 38, 61, 90, 122, 154, 178, 221, 262, 311],
'Q': [16, 29, 47, 67, 87, 108, 125, 157, 189,221],
'H': [10, 20, 35, 50, 64, 84, 93, 122, 143, 174]
}
MODE_INDICATOR = '0010'
def encode_message(msg):
alphanumericValueTable = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D',
'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z', ' ', '$', '%', '*', '+', '-', '.', '/', ':']
messageEncoding = ""
for i in range(len(msg)//2):
firstValue = 45 * alphanumericValueTable.index(msg[2*i])
secondValue = alphanumericValueTable.index(msg[2*i+1])
value = firstValue+secondValue
binaryValue = bin(value)[2:].zfill(8)
paddingBits = 11 - len(binaryValue)
for bit in range(paddingBits):
binaryValue = str(0)+str(binaryValue)
messageEncoding = messageEncoding + str(binaryValue)
if (len(msg) % 2 != 0):
lastValue = alphanumericValueTable.index(msg[-1])
binaryValue = bin(lastValue)[2:]
paddingBits = 6 - len(binaryValue)
for bit in range(paddingBits):
binaryValue = str(0)+str(binaryValue)
messageEncoding = messageEncoding + str(binaryValue)
return messageEncoding
def get_version(msg_len, error_level="L"):
version = 1
while version <= 6 and MAX_CHAR_COUNTS[error_level][version-1] < msg_len:
version += 1
if version > 4:
raise RuntimeError("message is too long for largest supported version size!")
return version
def format_char_count(msg_len, version=1):
if version == 10:
indicatorSize = 11
else:
indicatorSize = 9
messageLengthBinary = bin(msg_len)[2:].zfill(8)
paddingBits = indicatorSize - len(messageLengthBinary)
charCountValue=""
for bit in range(paddingBits):
charCountValue = str(0)+str(charCountValue)
charCountValue = charCountValue + messageLengthBinary
return charCountValue
def get_formatted_data(msg, version=1, error_level="L"):
msg = msg.upper()
char_count = format_char_count(len(msg), version=version)
requiredWords = ERROR_CORRECTION_DICT[version][error_level]
requiredBits = 8*requiredWords
encodedMessage = encode_message(msg)
terminalBits = min(4, requiredBits-len(encodedMessage))
for bit in range(terminalBits):
encodedMessage = str(encodedMessage)+str(0)
temporaryString = MODE_INDICATOR+str(char_count)+str(encodedMessage)
while not (len(temporaryString) % 8 == 0):
temporaryString = temporaryString + str(0)
if (len(temporaryString) != requiredBits):
extraBitsNeeded = requiredBits - len(temporaryString)
padBytesNeeded = int(extraBitsNeeded / 8)
temporaryString
for i in range(padBytesNeeded):
if (i % 2) == 0:
temporaryString = temporaryString + '11101100'
else:
temporaryString = temporaryString + '00010001'
return_string = ""
for i in range(len(temporaryString)):
if i % 8 == 0:
return_string = return_string+" "
return_string = return_string+temporaryString[i]
return return_string
BLOCKING_DICT = {
'L':{1:([19],None), 2:([34],None), 3:([55],None), 4:([80],None), 5:([108],None), 6:([68, 68],None), 7:([78, 78],None), 8:([97, 97],None), 9:([116, 116],None), 10:([68, 68],[69, 69])},
'M':{1:([16],None), 2:([28],None), 3:([44],None), 4:([32, 32],None), 5:([43, 43],None), 6:([27, 27, 27, 27],None), 7:([31, 31, 31, 31],None), 8:([38, 38],[39, 39]), 9:([36, 36, 36],[37, 37]), 10:([43, 43, 43, 43],[44])},
'Q':{1:([13],None), 2:([22],None), 3:([17, 17],None), 4:([24, 24],None), 5:([15, 15],[16, 16]), 6:([19, 19, 19, 19],None), 7:([14, 14],[15, 15, 15, 15]), 8:([18, 18, 18, 18],[19, 19]), 9:([16, 16, 16, 16],[17, 17, 17, 17]), 10:([19, 19, 19, 19, 19, 19],[20, 20])},
'H':{1:([9],None), 2:([16],None), 3:([13, 13],None), 4:([9, 9, 9, 9],None), 5:([11, 11],[12, 12]), 6:([15, 15, 15, 15],None), 7:([13, 13, 13, 13],[14]), 8:([14, 14, 14, 14],[15, 15]), 9:([12, 12, 12, 12],[13, 13, 13, 13]), 10:([15, 15, 15, 15, 15, 15],[16, 16])}
}
# returns list of groups [(num_blocks_in_group, num_codewords_per_block, ec)]
def get_blocking_counts(error_level="L", version=1):
(g1, g2) = BLOCKING_DICT[error_level][version]
if g2 == None:
return [(len(g1), g1[0])]
else:
return [(len(g1), g1[0]), (len(g2), g2[0])]
# table of error correcting codewords per block for various versions and error levels
EC_TABLE = {
'L':{1:7, 2:10, 3:15, 4:20, 5:26, 6:18, 7:20, 8:26, 9:30, 10:18},
'M':{1:10, 2:16, 3:26, 4:18, 5:24, 6:16, 7:18, 8:22, 9:22, 10:26},
'Q':{1:13, 2:22, 3:18, 4:26, 5:18, 6:24, 7:18, 8:22, 9:20, 10:24},
'H':{1:17, 2:28, 3:22, 4:16, 5:22, 6:28, 7:26, 8:26, 9:24, 10:28}
}
def error_correction(encoded_msg, error_level="L", version=1):
error_cw_per_block = EC_TABLE[error_level][version]
gen_poly_template = find_generator_poly(error_cw_per_block)
group_blockings = get_blocking_counts(error_level=error_level, version=version)
msg_poly = list(filter(len, encoded_msg.split(" "))) # filter out empty strings
# convert each binary string into a number, which become coefficients of the gen polynomial
msg_poly = list(map(lambda i: int(i,2), msg_poly))
group_data = []
group_err = []
for (num_blocks, codewords_per_block) in group_blockings:
# pull numbers from global message polynomial into groupings
blocks = []
for block_num in range(num_blocks):
block_data = []
for codeword_num in range(codewords_per_block):
block_data.append(msg_poly[block_num*codewords_per_block + codeword_num])
blocks.append(block_data)
group_data.append(blocks)
# multiply gen poly so degree is same as eventual error correction poly
err_codewords = []
for block in blocks:
# copy block into result polynomial, multiply by number of error codewords per block so that
# the lead term's exponent doesn't run out during divison
# this is called the message polynomial too, but it has this nameh to distinguish from the
# global msg_poly
result_poly = block[:]
for _ in range(error_cw_per_block):
result_poly.append(0)
# division loop: number of division steps is equal to number of terms in message polynomial
for j in range(codewords_per_block):
# copy gen poly base, multiply so degree is same as result poly
gen_poly = gen_poly_template[:]
while len(gen_poly) < len(result_poly):
gen_poly.append(0)
# multiply generator poly by lead term of of msg poly
for i in range(len(gen_poly)):
gen_poly[i] = field_mult(result_poly[0], gen_poly[i])
# xor result with msg polynomial
for i in range(len(result_poly)):
result_poly[i] = gen_poly[i]^result_poly[i]
# slice away the now-zero leading term
result_poly = result_poly[1:]
err_codewords.append(result_poly)
group_err.append(err_codewords)
for a in [group_data, group_err]:
for b in a:
for c in b:
for i in range(len(c)):
c[i] = bin(c[i])[2:].zfill(8)
max_data_cw = max(map(lambda i: i[1], group_blockings))
max_group = group_blockings[0][0]
interleaved_msg = ""
interleaved_err = ""
for j in range(max_data_cw):
for i in range(max_group):
for group in group_data:
if i < len(group) and j < len(group[i]):
interleaved_msg += str(group[i][j])
for j in range(error_cw_per_block):
for i in range(max_group):
for group in group_err:
if i < len(group) and j < len(group[i]):
interleaved_err += str(group[i][j])
finalMessage = interleaved_msg+interleaved_err
remainderBits = [0, 7, 7, 7, 7, 7, 0, 0, 0, 0]
numRemainderBits = remainderBits[version - 1]
for bit in range(numRemainderBits):
finalMessage = finalMessage + str(0)
return finalMessage
# Alpha-to-int
AI = [
1, 2, 4, 8, 16, 32, 64, 128, 29, 58, 116, 232, 205, 135, 19, 38,
76, 152, 45, 90, 180, 117, 234, 201, 143, 3, 6, 12, 24, 48, 96,
192, 157, 39, 78, 156, 37, 74, 148, 53, 106, 212, 181, 119, 238,
193, 159, 35, 70, 140, 5, 10, 20, 40, 80, 160, 93, 186, 105, 210,
185, 111, 222, 161, 95, 190, 97, 194, 153, 47, 94, 188, 101, 202,
137, 15, 30, 60, 120, 240, 253, 231, 211, 187, 107, 214, 177, 127,
254, 225, 223, 163, 91, 182, 113, 226, 217, 175, 67, 134, 17, 34,
68, 136, 13, 26, 52, 104, 208, 189, 103, 206, 129, 31, 62, 124,
248, 237, 199, 147, 59, 118, 236, 197, 151, 51, 102, 204, 133, 23,
46, 92, 184, 109, 218, 169, 79, 158, 33, 66, 132, 21, 42, 84, 168,
77, 154, 41, 82, 164, 85, 170, 73, 146, 57, 114, 228, 213, 183,
115, 230, 209, 191, 99, 198, 145, 63, 126, 252, 229, 215, 179, 123,
246, 241, 255, 227, 219, 171, 75, 150, 49, 98, 196, 149, 55, 110,
220, 165, 87, 174, 65, 130, 25, 50, 100, 200, 141, 7, 14, 28, 56,
112, 224, 221, 167, 83, 166, 81, 162, 89, 178, 121, 242, 249, 239,
195, 155, 43, 86, 172, 69, 138, 9, 18, 36, 72, 144, 61, 122, 244,
245, 247, 243, 251, 235, 203, 139, 11, 22, 44, 88, 176, 125, 250,
233, 207, 131, 27, 54, 108, 216, 173, 71, 142
]
# multiple two numbers in the bitwise field
def field_mult(a, b):
if a == 0 or b == 0:
return 0
a_alpha = AI.index(a)
b_alpha = AI.index(b)
res_alpha = (a_alpha + b_alpha) % 255
return AI[res_alpha]
def find_generator_poly(numberECWords):
# if numberECWords == 7:
# return [2**0, 2**87, 2**229, 2**146, 2**149, 2**238, 2**102, 2**21]
# else:
# previousPoly = self.find_generator_poly(numberECWords - 1)
# multPoly = [1, 2**(numberECWords-1)]
# returnPoly = []
# for term in range(numberECWords):
# returnPoly.append(0)
# for i in range(len(previousPoly)):
# for j in range(2):
# prevPolyExp = math.log(previousPoly[i], 2)
# multPolyExp = math.log(multPoly[j], 2)
# resultExp = ((prevPolyExp+multPolyExp)%256)+math.floor((prevPolyExp+multPolyExp/256))
# returnPoly[i+j] = returnPoly[i+j]^int((2**resultExp))
# return returnPoly
generatorPolynomials = {
7: [AI[0], AI[87], AI[229], AI[147], AI[149], AI[238], AI[102], AI[21]],
8: [AI[0], AI[175], AI[238], AI[208], AI[249], AI[215], AI[252], AI[196], AI[28]],
9: [AI[0], AI[95], AI[246], AI[137], AI[231], AI[235], AI[149], AI[11], AI[123], AI[36]],
10: [AI[0], AI[251], AI[67], AI[46], AI[61], AI[118], AI[70], AI[64], AI[94], AI[32], AI[45]],
11: [AI[0], AI[220], AI[192], AI[91], AI[194], AI[172], AI[177], AI[209], AI[116], AI[227], AI[10], AI[55]],
12: [AI[0], AI[102], AI[43], AI[98], AI[121], AI[187], AI[113], AI[198], AI[143], AI[131], AI[87], AI[157], AI[66]],
13: [AI[0], AI[74], AI[152], AI[176], AI[100], AI[86], AI[100], AI[106], AI[104], AI[130], AI[218], AI[206], AI[140], AI[78]],
14: [AI[0], AI[199], AI[249], AI[155], AI[48], AI[190], AI[124], AI[218], AI[137], AI[216], AI[87], AI[207], AI[59], AI[22], AI[91]],
15: [AI[0], AI[8], AI[183], AI[61], AI[91], AI[202], AI[37], AI[51], AI[58], AI[58], AI[237], AI[140], AI[124], AI[5], AI[99], AI[105]],
16: [AI[0], AI[120], AI[104], AI[107], AI[109], AI[102], AI[161], AI[76], AI[3], AI[191], AI[147], AI[169], AI[182], AI[194], AI[225], AI[120]],
17: [AI[0], AI[43], AI[139], AI[206], AI[78], AI[43], AI[239], AI[123], AI[206], AI[214], AI[147], AI[24], AI[99], AI[150], AI[39], AI[243], AI[163], AI[136]],
18: [AI[0], AI[215], AI[234], AI[158], AI[94], AI[184], AI[97], AI[118], AI[170], AI[79], AI[187], AI[152], AI[148], AI[252], AI[179], AI[5], AI[98], AI[96], AI[153]],
19: [AI[0], AI[57], AI[3], AI[105], AI[153], AI[52], AI[90], AI[83], AI[17], AI[150], AI[159], AI[44], AI[128], AI[153], AI[133], AI[252], AI[222], AI[138], AI[220], AI[171]],
20: [AI[0], AI[17], AI[60], AI[79], AI[50], AI[61], AI[163], AI[26], AI[187], AI[202], AI[180], AI[221], AI[225], AI[83], AI[239], AI[156], AI[164], AI[212], AI[212], AI[188], AI[190]],
21: [AI[0], AI[240], AI[233], AI[104], AI[247], AI[181], AI[140], AI[67], AI[98], AI[85], AI[200], AI[210], AI[115], AI[148], AI[137], AI[230], AI[36], AI[122], AI[254], AI[148], AI[175], AI[210]],
22: [AI[0], AI[210], AI[171], AI[247], AI[242], AI[93], AI[230], AI[14], AI[109], AI[221], AI[53], AI[200], AI[74], AI[8], AI[172], AI[98], AI[80], AI[219], AI[134], AI[160], AI[105], AI[165], AI[231]],
23: [AI[0], AI[171], AI[102], AI[146], AI[91], AI[49], AI[103], AI[65], AI[17], AI[193], AI[150], AI[14], AI[25], AI[183], AI[248], AI[94], AI[164], AI[224], AI[192], AI[1], AI[78], AI[56], AI[147], AI[253]],
24: [AI[0], AI[229], AI[121], AI[135], AI[48], AI[211], AI[117], AI[251], AI[126], AI[159], AI[180], AI[169], AI[152], AI[192], AI[226], AI[228], AI[218], AI[111], AI[0], AI[117], AI[232], AI[87], AI[96], AI[227], AI[21]],
25: [AI[0], AI[231], AI[181], AI[156], AI[39], AI[170], AI[26], AI[12], AI[59], 215, AI[148], AI[201], AI[54], AI[66], AI[237], AI[208], AI[99], AI[167], AI[144], AI[182], AI[95], AI[243], AI[129], AI[178], AI[252], AI[45]],
26: [AI[0], AI[173], AI[125], AI[158], AI[2], AI[103], AI[182], AI[118], AI[17], AI[145], AI[201], AI[111], AI[28], AI[165], AI[53], AI[161], AI[21], AI[245], AI[142], AI[13], AI[102], AI[48], AI[227], AI[153], AI[145], AI[218], AI[70]],
27: [AI[0], AI[79], AI[228], AI[8], AI[165], AI[227], AI[21], AI[180], AI[29], AI[9], AI[237], AI[70], AI[99], AI[45], AI[58], AI[138], AI[135], AI[73], AI[126], AI[172], AI[94], AI[216], AI[193], AI[157], AI[26], AI[17], AI[149], AI[96]],
28: [AI[0], AI[168], AI[223], AI[200], AI[104], AI[224], AI[234], AI[108], AI[180], AI[110], AI[190], AI[195], AI[147], AI[205], AI[27], AI[232], AI[201], AI[21], AI[43], AI[245], AI[87], AI[42], AI[195], AI[212], AI[119], AI[242], AI[37], AI[9], AI[123]]
}
return generatorPolynomials[numberECWords]
def get_qr(msg, error_level='M'):
version = get_version(len(msg), error_level=error_level)
encoded_msg = get_formatted_data(msg, version=version, error_level=error_level)
fullyEncodedMessage = error_correction(encoded_msg, version=version, error_level=error_level)
draw.generate_qr(version, fullyEncodedMessage)
def main():
if len(sys.argv) == 1:
print("Usage: qr.py <msg> <opt error level>")
elif len(sys.argv) == 2:
get_qr(sys.argv[1], error_level="Q")
else:
get_qr(sys.argv[1], error_level=sys.argv[2])
if __name__ == "__main__":
main() |
import numpy as np
import math
import random
import cv2
from tkinter import filedialog
import codecs
import shutil
import os
filename = filedialog.askopenfile()
dirold = filedialog.askdirectory()
dirnew = (dirold +"/wallpaper")
print(filename.name)
class img(object):
def __init__(self,file):
self.file = file
def run(self):
self.file = cv2.imread(filename.name)
weight,height =self.file.shape[1],self.file.shape[0]#(w,h)
if(weight > height):
os.rename(dirold,dirnew)
#shutil.move(dirold,dirnew)
if not os.path.exists(dirnew):
os.makedirs(dirnew)
if __name__ == "__main__":
img(filename)
|
from __future__ import absolute_import
import os
from celery import shared_task
from django.core import management
from leonardo.decorators import catch_result
from django.conf import settings
@shared_task
@catch_result
def sync_search_indexes():
management.call_command('rebuild_index', interactive=False)
# patch whoosh backend
haystack = getattr(settings, 'HAYSTACK_CONNECTIONS', None)
if 'default' in haystack and 'whoosh' in haystack['default']['ENGINE']:
try:
os.remove(os.path.join(
haystack['default']['PATH'], 'MAIN_WRITELOCK'))
except:
pass
return {'result': 'Rebuild index OK'}
|
from django.conf import settings
from django.contrib.auth.models import User
from model_mommy import mommy
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from ...models import DOCUMENT_CLASSIFICATION, Example
from .utils import (TestUtilsMixin, assign_user_to_role, create_default_roles,
remove_all_role_mappings)
class TestStatisticsAPI(APITestCase, TestUtilsMixin):
@classmethod
def setUpTestData(cls):
cls.super_user_name = 'super_user_name'
cls.super_user_pass = 'super_user_pass'
cls.other_user_name = 'other_user_name'
cls.other_user_pass = 'other_user_pass'
create_default_roles()
# Todo: change super_user to project_admin.
super_user = User.objects.create_superuser(username=cls.super_user_name,
password=cls.super_user_pass,
email='fizz@buzz.com')
other_user = User.objects.create_user(username=cls.other_user_name,
password=cls.other_user_pass,
email='bar@buzz.com')
cls.project = mommy.make(
_model='TextClassificationProject',
project_type=DOCUMENT_CLASSIFICATION,
users=[super_user]
)
doc1 = mommy.make('Example', project=cls.project)
doc2 = mommy.make('Example', project=cls.project)
mommy.make('Category', example=doc1, user=super_user)
mommy.make('Category', example=doc2, user=other_user)
cls.url = reverse(viewname='statistics', args=[cls.project.id])
cls.doc = Example.objects.filter(project=cls.project)
assign_user_to_role(project_member=other_user, project=cls.project,
role_name=settings.ROLE_ANNOTATOR)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
def test_returns_exact_progress(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.data['total'], 2)
self.assertEqual(response.data['remaining'], 1)
def test_returns_exact_progress_with_collaborative_annotation(self):
self._patch_project(self.project, 'collaborative_annotation', True)
self.client.login(username=self.other_user_name,
password=self.other_user_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.data['total'], 2)
self.assertEqual(response.data['remaining'], 0)
def test_returns_user_count(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.get(self.url, format='json')
self.assertIn('label', response.data)
self.assertIsInstance(response.data['label'], dict)
def test_returns_label_count(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.get(self.url, format='json')
self.assertIn('user', response.data)
self.assertIsInstance(response.data['user'], dict)
def test_returns_partial_response(self):
self.client.login(username=self.super_user_name,
password=self.super_user_pass)
response = self.client.get(f'{self.url}?include=user', format='json')
self.assertEqual(list(response.data.keys()), ['user'])
|
while ser_bytes:
decoded_bytes = str(ser_bytes[0:len(ser_bytes)-2].decode("ascii"))
t = decoded_bytes.split(' ')[0]
p = decoded_bytes.split(' ')[1]
b = decoded_bytes.split(' ')[2]
g = decoded_bytes.split(' ')[3]
current_time = datetime.datetime.now()
datetime_now = current_time.strftime('%Y-%m-%d_%H:%M:%S.%f')
t_ = t.split(': ')[1]
p_ = p.split(': ')[1]
b_ = b.split(': ')[1]
g_ = g.split(': ')[1]
data = [t, p, b, g, datetime_now]
data_ = [t_, p_, b_, g_, datetime_now]
convert_to_CSV(data_)
data_list.append(data)
ser_bytes = ser.readline() |
__author__ = 'artemr'
initList = [[1], [2, 3, [4, [5, 6, 7], 8], [9]], 10, [11, [12], 13], 14]
def get_flat(lVals):
while any(isinstance(x, list) for x in lVals):
tmp = []
for val in lVals:
tmp.extend(val) if isinstance(val, list) else tmp.append(val)
lVals = tmp[:]
return lVals[:]
print get_flat(initList)
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Julien Danjou
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer import publish
from ceilometer.openstack.common import cfg
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
class AgentManager(object):
def __init__(self, extension_manager):
self.ext_manager = extension_manager
def publish_counters_from_one_pollster(self, ext, manager, context,
*args, **kwargs):
"""Used to invoke the plugins loaded by the ExtensionManager.
"""
try:
LOG.info('Polling %s', ext.name)
for c in ext.obj.get_counters(manager, *args, **kwargs):
LOG.debug('Publishing counter: %s', c)
publish.publish_counter(context, c,
cfg.CONF.metering_topic,
cfg.CONF.metering_secret,
cfg.CONF.counter_source,
)
except Exception as err:
LOG.warning('Continuing after error from %s: %s',
ext.name, err)
LOG.exception(err)
|
import cs50
import math
cash = 0
while cash == 0:
temp = cs50.get_float("Change owed: ")
if temp > 0:
cash = temp
else:
cash = 0
pennies = 1
nickels = 5
dimes = 10
quarters = 25
total = 0
cashLeft = round(cash * 100)
# quarters
total = math.floor(cashLeft / quarters)
cashLeft = cashLeft - (total * quarters)
# dimes
total = total + math.floor(((cashLeft % quarters) / dimes))
cashLeft = cashLeft - (math.floor(((cashLeft % quarters) / dimes)) * dimes)
# nickels
total = total + math.floor((cashLeft % dimes) / nickels)
cashLeft = cashLeft - (math.floor(((cashLeft % dimes) / nickels)) * nickels)
# pennies
total = total + math.floor(cashLeft % nickels)
print(total) |
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import pandas as pd
import csv
import sys
import json
import os
class DynamicScrapper:
def __init__(self, config_path):
self.data = DynamicScrapper.load_config_file(config_path)
self.parent_XPATH = self.data["parent"]
self.childs_XPATHS = self.data["childs"]
self.driver_path = self.data["driver_path"]
self.url = self.data["web_url"]
@staticmethod
def load_config_file(path):
if os.path.exists(path):
with open(path, "r") as f:
return json.load(f)
else:
raise Exception("config file not found!")
def set_driver(self):
options = Options()
options.headless = True
self.driver = webdriver.Firefox(
executable_path=self.driver_path, options=options)
def run(self):
self.set_driver()
self.driver.get(self.url)
row = []
parents = self.driver.find_elements_by_xpath(self.parent_XPATH)
for parent in parents:
for i in range(0, len(self.childs_XPATHS)):
obj = parent.find_element_by_xpath(self.childs_XPATHS[i]).text
m = {f"{i+1}": obj}
row.append(m)
self.convert_json(self.reformatter(row))
self.driver.close()
self.driver.quit()
def reformatter(self, row):
print(row)
r_row = []
counter = 1
values = []
for i in row:
if counter < len(self.childs_XPATHS):
values.append(i[str(counter)])
counter += 1
else:
values.append(i[str(counter)])
counter = 1
m = {}
keys = range(0, len(self.childs_XPATHS))
values_ = values
for j in keys:
m[j+1] = values_[j]
r_row.append(m)
values = []
return r_row
def convert_csv(self, row):
df = pd.DataFrame(row)
df.to_csv("data4.csv", index=False)
def convert_json(self, row):
with open("data2.json", "w", encoding="utf-8") as f:
json.dump(row, f, ensure_ascii=False)
if __name__ == "__main__":
obj = DynamicScrapper("imdb.json")
obj.run()
|
import basebrowser
import datetime
import json
class Scraper(object):
def __init__(self):
ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:31.0) Gecko/20100101 Firefox/31.0'
self.reqheaders = [
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
('Accept-Encoding', 'gzip, deflate'),
('Accept-Language', 'en-US,en;q=0.5'),
('Connection', 'keep-alive'),
('Host', 'web.smsbus.cl'),
('Referer', 'http://web.smsbus.cl/web/buscarAction.do?d=cargarServicios'),
('User-Agent', ua)
]
self.browser = basebrowser.WebBrowser(uAgent=ua)
self.last_reload = datetime.datetime.now()
self.reload_cookies()
self.reload_quantum = 300
def reload_cookies(self):
return self.browser.fetch('http://web.smsbus.cl/web/buscarAction.do?d=cargarServicios', method='GET')
def _parse(self, parse_tree):
out = list()
aux_k = ['service', 'bus','eta', 'dist']
for div in parse_tree.find('//div[@id="contenido_respuesta_2"]').xpath('//div[@id="siguiente_respuesta"] | //div[@id="proximo_solo_paradero"]'):
pt = filter(lambda x: len(x), [e.strip() for e in div.itertext()])
out.append(dict(zip(aux_k, pt)))
return out
def json_output(f):
return lambda *a, **k: json.dumps(f(*a, **k))
@json_output
def scrap(self, pid):
#pid = 'PC616'
deltatime = self.last_reload - datetime.datetime.now()
if deltatime.total_seconds() > self.reload_quantum:
self.reload_cookies()
self.last_reload = datetime.datetime.now()
params = (
('d', 'busquedaParadero'),
('destino_nombre', 'rrrrr'),
('servicio', '-1'),
('destino', '-1'),
('paradero', '-1'),
('busqueda_rapida', 'PC616 C08'),
('ingresar_paradero', pid)
)
response = self.browser.fetch('http://web.smsbus.cl/web/buscarAction.do', data=params, headers=self.reqheaders)
return self._parse(response) |
#Função que fará o cálculo da média, dado uma soma de notas.
def calcularMedia(soma):
qtdNotas = 3
media = soma/qtdNotas
return media
notas = [['Pedro', 4.5, 7, 9], ['Tiago', 9.4, 10, 8.6], ['Maria', 0, 5, 3.3]]
#Variável que guarda a soma das 3 notas para o cálculo da média
somaNotas = 0
#Variável que guarda o índice que representa o nome do aluno nas sublistas
indiceNome = 0
#Variável que guardará o nome do aluno ao qual iremos calcular a média
nomeAluno = ""
#For que varre a lista notas, com o objetivo de obter a média do aluno Tiago
for cont in range(len(notas)):
#Quando cont for 1, temos o que estamos procurando.
#Os dados do aluno Tiago estão no índice 1 da lista notas.
if(cont == 1):
nomeAluno = notas[cont][indiceNome]
#For que varrerá as notas do aluno Tiago
#Note que começamos do índice 1, pois é a partir dele que temos as notas
for k in range(1, len(notas[cont])):
#A linha a seguir pode ser substituída por:
#somaNotas = somaNotas + notas[cont][k]
somaNotas+=notas[cont][k]
media = calcularMedia(somaNotas)
#O símbolo "%.2f" % media serve para limitar o número de casas decimais a serem exibidas
#Nesse caso, foi limitado a 2 caracteres após a vírgula
print("O aluno " + nomeAluno + " possui a média igual a " + "%.2f" % media )
|
#!/usr/bin/python
import sys
import ply.yacc as yacc
sys.path.insert(0, '../lab-1-scanner')
import scanner
sys.path.insert(0, '../lab-3-syntax-tree')
from AST import *
scanner = scanner
tokens = scanner.tokens
precedence = (
("nonassoc", "IFX"),
("nonassoc", "ELSE"),
("nonassoc", 'LESS_THAN', 'GREATER_THAN', 'LESS_OR_EQUAL_THAN', 'GREATER_OR_EQUAL_THAN', 'NOT_EQUAL', 'EQUAL'),
("left", 'PLUS', 'MINUS', 'DOTADD', 'DOTSUB'),
("left", 'TIMES', 'DIVIDE', 'DOTMUL', 'DOTDIV'),
("right", '='),
("right", 'ADDASSIGN', 'SUBASSIGN'),
("right", 'MULASSIGN', 'DIVASSIGN'),
("right", 'APOSTROPHE'),
("right", 'UMINUS'))
# OK
def p_program(p):
"""program : instructions"""
p[0] = Program(p[1], p.lineno(1))
# OK
def p_instructions(p):
"""instructions : instruction
| instruction instructions"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = [p[1]] + p[2]
# OK
def p_instruction(p):
"""instruction : assignment
| ifx
| if_else
| for
| while
| command
| return
| print
| compound"""
p[0] = p[1]
# OK
def p_assignment(p):
"""assignment : ID '=' expression ';'
| ID ADDASSIGN expression ';'
| ID SUBASSIGN expression ';'
| ID MULASSIGN expression ';'
| ID DIVASSIGN expression ';'
| ID '[' vector ']' '=' expression ';'"""
if len(p) == 5:
p[0] = Assignment(Variable(p[1],p.lineno(1)), p[2], p[3], p.lineno(1))
else:
p[0] = Assignment(
ref = Refference(
id = Variable(
name = p[1],
line = p.lineno(1)
),
vector = p[3],
line = p.lineno(1)
),
op = p[5],
value = p[6],
line = p.lineno(1))
# OK ??
def p_assignment_refference(p):
"""assignment : ID '=' ID '[' vector ']' ';'"""
p[0] = Assignment(
ref = Variable(
p[1],
p.lineno(1)
),
op = p[2],
value = Refference(
id = Variable(
p[3],
p.lineno(1)
),
vector = p[5],
line = p.lineno(1)
),
line = p.lineno(1)
)
# OK
def p_ifx(p):
"""ifx : IF '(' condition ')' instruction %prec IFX"""
p[0] = If(p[1], p[3], p[5])
# OK
def p_if_else(p):
"""if_else : IF '(' condition ')' instruction ELSE instruction"""
p[0] = IfElse(p[1], p[3], p[5], p[6], p[7])
# OK
def p_for(p):
"""for : FOR iterator instruction"""
p[0] = For(p[1], p[2], p[3])
# OK
def p_iterator(p):
"""iterator : ID '=' factor ':' factor """
p[0] = Iterator(Variable(p[1],p.lineno(1)), p[3], p[5], p.lineno(1))
# OK
def p_while(p):
"""while : WHILE '(' condition ')' instruction"""
p[0] = While(p[1], p[3], p[5])
# OK
def p_command(p):
"""command : BREAK ';'
| CONTINUE ';'"""
p[0] = Command(p[1], p.lineno(1))
# OK
def p_return(p):
"""return : RETURN ';' """
p[0] = Return(p[1], p.lineno(1))
# OK
def p_return_expression(p):
"""return : RETURN expression ';' """
p[0] = Return(p[1], p.lineno(1), p[2])
# OK
def p_print(p):
"""print : PRINT DOUBLEAPOSTROPHE expression DOUBLEAPOSTROPHE ';'
| PRINT vector ';' """
if len(p) == 4:
p[0] = Print(p[1], p[2], p.lineno(1))
else: p[0] = Print(p[1], p[3], p.lineno(1))
# OK
def p_compound(p):
"""compound : '{' program '}'
| '{' '}' """
if len(p) == 3: p[0] = Compound()
else: p[0] = Compound(p[2])
# OK
def p_condition(p):
"""condition : expression_comparison"""
p[0] = p[1]
# OK
def p_variable_factor(p):
"""factor : ID"""
p[0] = Variable(p[1],p.lineno(1))
# OK
def p_numerical_factor(p):
"""factor : number"""
p[0] = p[1]
# OK
def p_int_number(p):
"""number : INTNUM"""
p[0] = Integer(p[1], p.lineno(1))
# OK
def p_float_number(p):
"""number : FLOATNUM"""
p[0] = Float(p[1], p.lineno(1))
# OK
def p_expression(p):
"""expression : factor
| expression_bin_op
| expression_dot_bin_op
| expression_uminus
| expression_transposition
| expression_comparison
| expression_matrix
| expression_table"""
p[0] = p[1]
# OK
def p_expression_bin_op(p):
"""expression_bin_op : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression"""
p[0] = BinOp(p[1], p[2], p[3], p.lineno(1))
# OK
def p_expression_dot_bin_op(p):
"""expression_dot_bin_op : expression DOTADD expression
| expression DOTSUB expression
| expression DOTMUL expression
| expression DOTDIV expression"""
p[0] = DotBinOp(p[1], p[2], p[3], p.lineno(1))
# ok
def p_expression_uminus(p):
"""expression_uminus : MINUS expression %prec UMINUS"""
p[0] = Uminus(p[2])
# OK -> expression changed for ID (ID macierzy)
def p_expression_transposition(p):
"""expression_transposition : ID APOSTROPHE"""
p[0] = Transposition(Variable(p[1], p.lineno(1)), line = p.lineno(1))
# OK
def p_expression_comparison(p):
"""expression_comparison : expression LESS_THAN expression
| expression GREATER_THAN expression
| expression LESS_OR_EQUAL_THAN expression
| expression GREATER_OR_EQUAL_THAN expression
| expression NOT_EQUAL expression
| expression EQUAL expression"""
p[0] = Comparision(p[1], p[2], p[3], p.lineno(1))
# OK
def p_expression_matrix(p):
"""expression_matrix : ZEROS '(' factor ')'
| EYE '(' factor ')'
| ONES '(' factor ')' """
p[0] = SpecialMatrix(p[1], p[3], p.lineno(1))
# OK
def p_empty_list(p):
"""expression_table : '[' ']' """
p[0] = Vector(p.lineno(1))
#nothing will be append
# OK
def p_non_empty_matrix(p):
"""expression_table : '[' matrix ']' """
#matrix = Matrix(p.lineno(1))
#matrix.vectors.append(p[2])
p[0] = p[2]
# OK
def p_matrix(p):
"""matrix : vector ';' matrix
| vector """
if len(p) == 2:
p[0] = Matrix(p.lineno(1))
p[0].vectors.append(p[1])
else:
p[3].vectors.append(p[1])
p[0] = p[3]
# OK
def p_vector(p):
"""vector : factor ',' vector
| factor """
if len(p) == 2:
p[0] = Vector(p.lineno(1))
p[0].elements.append(p[1])
else:
p[3].elements.append(p[1])
p[0] = p[3]
# TODO ??
def p_error(p):
if p:
print("{0} Parser: LexToken({1}, '{2}')".format(p.lineno, p.type, p.value))
#error = Error("Syntax error at line {0}: LexToken({1}, '{2}')".format(p.lineno, p.type, p.value))
#error.printTree()
else:
error = Error("Unexpected end of input")
#error.printTree()
parser = yacc.yacc()
|
#!/usr/bin/env python
import os
import sys
def cmp3(a, b):
'''
cmp for python3
'''
return (a > b) - (a < b)
|
import math
from typing import Set, Optional
import gi
from graph_tool import VertexPropertyMap, Vertex
from zxopt.util import is_interactive
gi.require_version('Rsvg', '2.0')
from gi.repository import Rsvg
import cairo
from graph_tool.draw import graph_draw
from zxopt.data_structures.diagram.diagram import VERTEX_BOUNDARY, INPUT, VERTEX_SPIDER_GREEN, Diagram
from zxopt.visualization.render_util import to_cairo_color
from zxopt.visualization.renderer import Renderer
GREEN_SPIDER_COLOR = "#96FAAA"
RED_SPIDER_COLOR = "#FF9191"
BOUNDARY_COLOR = "#BBBBBB"
VERTEX_BORDER_COLOR = "#333333"
EDGE_COLOR = "#444444"
HADAMARD_EDGE_COLOR = "#4444DD"
TEMP_SVG_FILENAME = "diagram_render.svg"
DIAGRAM_RENDER_OFFSET = [30, 30]
DIAGRAM_RENDER_SPACING = 70
DIAGRAM_INPUT_GRAB_SCALE = 0.80
class DiagramRenderer(Renderer):
diagram: Diagram
diagram_width: int
diagram_height: int
disable_alignment: bool
vertex_pos: Optional[VertexPropertyMap]
grabbed_vertex: Optional[Vertex]
def __init__(self, diagram: Diagram, width: int = 500, height: int = 300, diagram_width=800, diagram_height=600, disable_alignment=False):
super().__init__(width, height)
self.diagram = diagram
self.diagram_width = diagram_width
self.diagram_height = diagram_height
self.disable_alignment = disable_alignment
self.vertex_pos = None
self.grabbed_vertex = None
# def on_resize(self):
# super().on_resize()
# self.diagram_width = self.drawing_area.get_allocated_width()
# self.diagram_height = self.drawing_area.get_allocated_height()
def render(self, ctx: cairo.Context):
self.render_to_image(TEMP_SVG_FILENAME)
# source_surface = cairo.SVGSurface.create_from_png(TEMP_PNG_FILENAME)
# ctx.set_source_surface(source_surface)
# ctx.paint()
handle = Rsvg.Handle()
svg = handle.new_from_file(TEMP_SVG_FILENAME)
svg.render_cairo(ctx)
# os.remove(TEMP_PNG_FILENAME)
# https://graph-tool.skewed.de/static/doc/draw.html#graph_tool.draw.graph_draw
def render_to_image(self, filename: str):
g = self.diagram.g
vertex_labels = g.new_vertex_property("string")
vertex_fill_colors = g.new_vertex_property("string")
for v in g.vertices():
if self.diagram.vertex_type_prop[v] == VERTEX_BOUNDARY:
vertex_labels[v] = "I" if self.diagram.boundary_type_prop[v] == INPUT else "O"
vertex_fill_colors[v] = BOUNDARY_COLOR
else:
phase = str(round(self.diagram.phase_prop[v] / math.pi * 100.0) / 100.0) if self.diagram.phase_prop[v] != 0.0 else ""
# if phase == "1.0":
# phase = "π"
vertex_labels[v] = phase
if self.diagram.vertex_type_prop[v] == VERTEX_SPIDER_GREEN:
vertex_fill_colors[v] = GREEN_SPIDER_COLOR
else:
vertex_fill_colors[v] = RED_SPIDER_COLOR
edge_colors = g.new_edge_property("string")
for e in g.edges():
edge_colors[e] = HADAMARD_EDGE_COLOR if self.diagram.hadamard_prop[e] else EDGE_COLOR
# vertex positions
if not self.disable_alignment:
if self.vertex_pos is None:
self.vertex_pos = self.calculate_vertex_positons()
vertex_pos = self.vertex_pos
else:
vertex_pos = None
graph_draw(g,
pos = vertex_pos,
vertex_text = vertex_labels,
vertex_fill_color = vertex_fill_colors,
vertex_color = VERTEX_BORDER_COLOR,
vertex_size = 20,
edge_color = edge_colors,
output_size = (self.diagram_width, self.diagram_height),
output = filename,
fmt = "svg",
bg_color = to_cairo_color("#FFFFFF") if is_interactive() else to_cairo_color("#FFFFFF"),
inline = False,
fit_view=False,
fit_view_ink=False,
adjust_aspect=False
)
def calculate_vertex_positons(self) -> VertexPropertyMap:
diagram = self.diagram
g = diagram.g
pos = g.new_vertex_property("vector<double>")
for v in g.vertices():
pos[v] = [400, 400]
inputs = diagram.get_inputs()
outputs = diagram.get_outputs()
spiders = diagram.get_spiders()
# Alignment algorithm see notes
# BFS
processed: Set[Vertex] = set()
to_process: Set[Vertex] = set()
for input in inputs:
pos[input] = [diagram.get_boundary_index(input) + DIAGRAM_RENDER_OFFSET[0], DIAGRAM_RENDER_SPACING * diagram.get_boundary_index(input) + DIAGRAM_RENDER_OFFSET[1]]
processed.add(input)
for n in input.all_neighbors():
to_process.add(n)
# alignment algorithm
current_step = 1
last_step_skipped = False
while len(to_process) > 0:
# if to process only contains outputs, place them
if all([diagram.is_output(v) for v in to_process]):
for output in to_process:
pos[output] = [DIAGRAM_RENDER_SPACING * current_step + DIAGRAM_RENDER_OFFSET[0], DIAGRAM_RENDER_SPACING * diagram.get_boundary_index(output) + DIAGRAM_RENDER_OFFSET[1]]
to_process.clear()
break
could_be_placed = set()
# for qubit_index in range(len(diagram.get_inputs())):
# for spider in to_process:
# if diagram.get_spider_qubit_index(spider) == qubit_index:
# could_be_placed.add(spider)
# break
# iterate over all canidates, only place if all other neighbors can also get placed
for vertex in [v for v in to_process if diagram.is_spider(v)]:
if vertex in could_be_placed: # skip vertex if already in placement due to neighbor getting placed
continue
vertex_qubit_index = diagram.get_spider_qubit_index(vertex)
if len([v for v in could_be_placed if diagram.get_spider_qubit_index(v) == vertex_qubit_index]) == 0: # ensure there is no spider getting placed on this qubit already
non_placed_neighbors = [v for v in vertex.all_neighbors() if v not in could_be_placed and v not in processed and diagram.is_spider(v)]
non_placed_other_qubit_neighbors = [v for v in non_placed_neighbors if diagram.get_spider_qubit_index(v) != vertex_qubit_index]
# those nodes could also have neighbors, but let's ignore them for now
all_placeable = True
for neighbor in non_placed_other_qubit_neighbors:
neighbor_qubit_index = diagram.get_spider_qubit_index(neighbor)
if len([v for v in could_be_placed if diagram.get_spider_qubit_index(v) == neighbor_qubit_index]) != 0: # ensure there is no spider getting placed on this qubit already
if neighbor not in to_process and not last_step_skipped: # this will block placement till this one has been discovered via a different route, gets disabled after a placement fail
all_placeable = False
break
if all_placeable:
could_be_placed.add(vertex)
for n in non_placed_other_qubit_neighbors:
could_be_placed.add(n)
if len(could_be_placed) == 0:
last_step_skipped = True
continue
else:
last_step_skipped = False
# place spiders
for spider in could_be_placed:
pos[spider] = [DIAGRAM_RENDER_SPACING * current_step + DIAGRAM_RENDER_OFFSET[0], DIAGRAM_RENDER_SPACING * diagram.get_spider_qubit_index(spider) + DIAGRAM_RENDER_OFFSET[1]]
if spider in to_process:
to_process.remove(spider)
processed.add(spider)
for neighbor in [v for v in spider.all_neighbors() if not v in processed and not v in to_process and not v in could_be_placed]:
to_process.add(neighbor)
# TODO: break loops in case something goes wrong
current_step += 1
return pos
def mouse_moved(self, x: int, y: int):
x *= DIAGRAM_INPUT_GRAB_SCALE
y *= DIAGRAM_INPUT_GRAB_SCALE
if self.grabbed_vertex is not None:
self.vertex_pos[self.grabbed_vertex] = [x - (x % 10), y - (y % 10)]
self.drawing_area.queue_draw()
def mouse_pressed(self, x: int, y: int, button: int):
x *= DIAGRAM_INPUT_GRAB_SCALE
y *= DIAGRAM_INPUT_GRAB_SCALE
# find closest vertex
closest_vertex, closest_dist_squared = (None, 1000000)
for v in self.diagram.g.vertices():
dist_squared = (self.vertex_pos[v][0] - x)**2 + (self.vertex_pos[v][1] - y)**2
if dist_squared < closest_dist_squared:
closest_dist_squared = dist_squared
closest_vertex = v
if closest_dist_squared < 150**2:
self.grabbed_vertex = closest_vertex
def mouse_released(self, x: int, y: int, button: int):
x *= DIAGRAM_INPUT_GRAB_SCALE
y *= DIAGRAM_INPUT_GRAB_SCALE
self.grabbed_vertex = None
self.drawing_area.queue_draw()
|
import requests
from bs4 import BeautifulSoup
source = requests.get('https://ua.sinoptik.ua').text
# print(source)
soup = BeautifulSoup(source, 'lxml')
# print(soup.prettify())
location = soup.find('h1', class_='isMain').text.lstrip()
# print(location)
for day in soup.find_all('div', class_='main '):
day_name = day.find('p', class_='day-link').text
# print(day_name)
day_num = day.find('p', class_='date dateFree').text
# print(day_num)
month = day.find('p', class_='month').text
# print(month)
temperature_min = day.find('div', class_='temperature').find('div', class_='min').span.text
# print(temperature_min)
temperature_max = day.find('div', class_='temperature').find('div', class_='max').span.text
# print(temperature_max)
print(f'{day_name} {day_num} {month}: температура повітря коливатиметься в межах від {temperature_min} до {temperature_max}') |
#coding=utf-8
f=open('ecshop_url_txt')
f2=open('rs.txt','w')
tmp=set()
while 1:
line=f.readline()
tmp.add(line)
if line =='':
break
for x in tmp:
f2.write(x)
f.close()
f2.close() |
import re
# a1 = '12.12.2000'
# a2 = '1.1'
# a3 = '1'
#
# print(re.match("[0-9]+.[0-9]+.[0-9]+", a1))
# print(re.match("[0-9].[0-9].[0-9]", a2))
# print(re.match("[0-9].[0-9].[0-9]", a3))
text = "a123b45с6d"
exp = r'(\d+)' # Тут напишите своё регулярное выражение
print(re.findall(exp, text))
|
def main():
with open('domain.txt', 'r') as f:
domains = f.read().split('\n')[0:-1]
while 1:
input_domain = input()
if not input_domain:
break
loc = find_str('.', input_domain)
count = input_domain.count('.')
start = 0
while start < count:
pattern = '*' + input_domain[loc[start]:]
if pattern in domains:
print(input_domain, '\t', pattern)
break
start += 1
def find_str(s, string):
res = []
for idx, i in enumerate(string):
if s == i:
res.append(idx)
return res
main()
|
# #########################################################################
# Copyright (c) 2018, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2018. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENTn SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
from PyQt5 import QtGui, QtCore, QtWidgets
import scipy.fftpack as spf
from scipy import ndimage, optimize, signal
import tomopy
from skimage import filters
from skimage.measure import regionprops
from skimage.feature import register_translation
import numpy as np
class SinogramActions(QtWidgets.QWidget):
def __init__(self):
super(SinogramActions, self).__init__()
self.x_shifts = None
self.y_shifts = None
self.original_data = None
# def runCenterOfMass(self, element, data, thetas):
# '''
# Center of mass alignment
# Variables
# -----------
# element: int
# element index
# data: ndarray
# 4D xrf dataset ndarray [elements, theta, y,x]
# thetas: ndarray
# sorted projection angle list
# '''
# num_projections = data.shape[1]
# com = zeros(num_projections)
# temp = zeros(data.shape[3])
# temp2 = zeros(data.shape[3])
# for i in arange(num_projections):
# temp = sum(data[element, i, :, :] - data[element, i, :10, :10].mean(), axis=0)
# numb2 = sum(temp)
# for j in arange(data.shape[3]):
# temp2[j] = temp[j] * j
# if numb2 <= 0:
# numb2 = 1
# numb = float(sum(temp2)) / numb2
# if numb == NaN:
# numb = 0.000
# com[i] = numb
# x=thetas
# fitfunc = lambda p, x: p[0] * sin(2 * pi / 360 * (x - p[1])) + p[2]
# errfunc = lambda p, x, y: fitfunc(p, x) - y
# p0 = [100, 100, 100]
# self.centers, success = optimize.leastsq(errfunc, p0, args=(x, com))
# centerOfMassDiff = fitfunc(self.centers, x) - com
# #set some label within the sinogram widget to the string defined in the line below
# # self.lbl.setText("Center of Mass: " + str(p1[2]))
# num_projections = data.shape[1]
# for i in arange(num_projections):
# self.x_shifts[i] += int(centerOfMassDiff[i])
# data[:, i, :, :] = np.roll(data[:, i, :, :], int(round(self.x_shifts[i])), axis=2)
# #set some status label
# self.alignmentDone()
# # return data, self.x_shifts, self.centers
# return data, self.x_shifts
def runCenterOfMass(self, element, data, thetas, weighted = True, shift_y = False):
# '''
# Center of mass alignment
# Variables
# -----------
# element: int
# element index
# data: ndarray
# 4D xrf dataset ndarray [elements, theta, y,x]
# thetas: ndarray
# sorted projection angle list
# weighted: bool
# run center of mass or weighted center of mass
# shift_y: bool
# align in y as well as x
# '''
num_projections = data.shape[1]
view_center_x = data.shape[3]//2
view_center_y = data.shape[2]//2
x_shifts = []
y_shifts = []
w_x_shifts = []
w_y_shifts = []
tmp_lst = []
if weighted:
for i in range(num_projections):
image = data[element, i]
threshold_value = filters.threshold_otsu(image)
labeled_foreground = (image > threshold_value).astype(int)
properties = regionprops(labeled_foreground, image)
weighted_center_of_mass = properties[0].weighted_centroid
w_x_shifts.append(int(round(view_center_x - weighted_center_of_mass[1])))
w_y_shifts.append(int(round(view_center_y - weighted_center_of_mass[0])))
data = self.shiftProjection(data, w_x_shifts[i], 0, i)
if shift_y:
data = self.shiftProjection(data, 0, w_y_shifts[i], i)
if not shift_y:
w_y_shifts = np.asarray(w_y_shifts)*0
return data, np.asarray(w_x_shifts), -np.asarray(w_y_shifts)
if not weighted:
for i in range(num_projections):
image = data[element, i]
threshold_value = filters.threshold_otsu(image)
labeled_foreground = (image > threshold_value).astype(int)
properties = regionprops(labeled_foreground, image)
center_of_mass = properties[0].centroid
x_shifts.append(int(round(view_center_x -center_of_mass[1])))
y_shifts.append(int(round(view_center_y - center_of_mass[0])))
data = self.shiftProjection(data, x_shifts[i], 0, i)
if shift_y:
data = self.shiftProjection(data, 0, y_shifts[i], i)
if not shift_y:
y_shifts = np.asarray(y_shifts)*0
return data, np.asarray(x_shifts), -np.asarray(y_shifts)
def shiftProjection(self, data, x, y, index):
X = int(x//1)
Y = int(y//1)
x = x - X
y = y - Y
if x > 0:
x_dir = 1
elif x < 0:
x_dir = -1
else:
x_dir = 0
if y > 0:
y_dir = 1
elif x < 0:
y_dir = -1
else:
y_dir = 0
data[:,index] = np.roll(data[:,index], Y, axis=1) #negative because image coordinates are flipped
data[:,index] = np.roll(data[:,index], X, axis=2)
if x_dir == 0 and y_dir == 0:
return data
else:
data_a = data*x
data_b = data*(1-x)
data_b = self.shiftProjection(data_b,x_dir,0, index)
data_c = data_a+data_b
data_a = data_c*y
data_b = data_c*(1-y)
data_b = self.shiftProjection(data_b,0,y_dir, index)
data = data_a+data_b
return data
def shiftStack(self, data, x, y):
X = int(x//1)
Y = int(y//1)
x = x - X
y = y - Y
if x > 0:
x_dir = 1
elif x < 0:
x_dir = -1
else:
x_dir = 0
if y > 0:
y_dir = 1
elif x < 0:
y_dir = -1
else:
y_dir = 0
for i in range(data.shape[1]):
data[:,i] = np.roll(data[:,i],Y,axis=1)
for i in range(data.shape[1]):
data[:,i] = np.roll(data[:,i],X, axis=2)
if x_dir == 0 and y_dir == 0:
return data
else:
data_a = data*x
data_b = data*(1-x)
data_b = self.shiftStack(data_b,x_dir,0)
data_c = data_a+data_b
data_a = data_c*y
data_b = data_c*(1-y)
data_b = self.shiftStack(data_b,0,y_dir)
data = data_a+data_b
return data
def shift(self, sinogramData, data, shift_number, col_number):
'''
shifts sinogram column of pixels up or down.
Variables
-----------
sinogramData: ndarray
3D array containing sinogram images for each row of data
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
shift_number: int
amount of pixel shifting done per column
col_number: int
'''
num_projections = data.shape[1]
regShift = np.zeros(sinogramData.shape[0], dtype=np.int)
sinogramData[col_number * 10:col_number * 10 + 10, :] = np.roll(sinogramData[col_number * 10:col_number * 10 + 10, :], shift_number, axis=1)
regShift[col_number] += shift_number
for i in range(num_projections):
# data[:,i,:,:] = np.roll(data[:,i,:,:], regShift[i], axis=2)
data = self.shiftProjection(data, regShift[i],0,i)
return data, sinogramData
def slope_adjust(self, sinogramData, data, shift, delta):
'''
Sinograms are oftwen skewed when using xcor alignment method. slope_adjust offsets the sinogram's slope by 'delta' pixels
Variables
-----------
sinogramData: ndarray
3D array containing sinogram images for each row of data
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
shift: int
number of pixel to shift sinogram up or down by.
delta: int
number of pixels to shift by at right-hand side of sinogram.
'''
num_projections = data.shape[1]
step = round(delta/num_projections)
lin_shift = [int(x) for x in np.linspace(0, delta, num_projections)]
#TODO: make this a continuou (subpixel/fractional) shift
lin_shift = [x + shift for x in lin_shift]
for i in range(num_projections):
data, sinogramData = self.shift(sinogramData, data, lin_shift[i], i)
# data[:,i] = np.roll(data[:,i],shift,axis=1)
# data = self.shiftProjection(data,shift,0,i)
return lin_shift, data, sinogramData
def crossCorrelate(self, element, data):
'''
cross correlate image registration
Variables
-----------
element: int
element index
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
'''
num_projections = data.shape[1]
x_shifts = np.zeros(num_projections)
y_shifts = np.zeros(num_projections)
for i in range(num_projections - 1):
a = data[element, i, :, :]
b = data[element, i + 1, :, :]
fa = spf.fft2(a)
fb = spf.fft2(b)
shape = a.shape
c = abs(spf.ifft2(fa * fb.conjugate()))
t0, t1 = np.unravel_index(np.argmax(c), a.shape)
if t0 > shape[0] // 2:
t0 -= shape[0]
if t1 > shape[1] // 2:
t1 -= shape[1]
# data[:, i + 1] = np.roll(data[:, i + 1], t0, axis=1)
# data[:, i + 1] = np.roll(data[:, i + 1], t1, axis=2)
data = self.shiftProjection(data,t1,t0,i+1)
x_shifts[i + 1] += t1
y_shifts[i + 1] += t0
self.alignmentDone()
return data, x_shifts, -y_shifts
def crossCorrelate2(self, element, data):
'''
cross correlate image registration aplies to all loaded elements.
Variables
-----------
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
'''
num_projections = data.shape[1]
x_shifts = np.zeros(num_projections)
y_shifts = np.zeros(num_projections)
for i in range(1, num_projections):
shift, error, diffphase = register_translation(data[element,i-1], data[element,i])
# shift, error, diffphase = register_translation(data[element,i-1], data[element,i], 100)
x_shifts[i] += round(shift[1],2)
y_shifts[i] += round(shift[0],2)
# data[:, i] = np.roll(data[:, i], y_shifts[i], axis=1)
# data[:, i] = np.roll(data[:, i], x_shifts[i], axis=2)
data = self.shiftProjection(data,x_shifts[i],y_shifts[i],i)
self.alignmentDone()
return data, x_shifts, -y_shifts
def phaseCorrelate(self, element, data):
'''
Phase correlate image registration
Variables
-----------
element: int
element index
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
'''
num_projections = data.shape[1]
x_shifts = np.zeros(num_projections)
y_shifts = np.zeros(num_projections)
for i in range(num_projections - 1):
# onlyfilenameIndex=self.fileNames[i+1].rfind("/")
a = data[element, i, :, :]
b = data[element, i + 1, :, :]
fa = spf.fft2(a)
fb = spf.fft2(b)
shape = a.shape
c = abs(spf.ifft2(fa * fb.conjugate() / (abs(fa) * abs(fb))))
t0, t1 = np.unravel_index(np.argmax(c), a.shape)
if t0 > shape[0] // 2:
t0 -= shape[0]
if t1 > shape[1] // 2:
t1 -= shape[1]
# data[:, i + 1] = np.roll(data[:, i + 1], t0, axis=1)
# data[:, i + 1] = np.roll(data[:, i + 1], t1, axis=2)
data = self.shiftProjection(data,t1,t0,i+1)
x_shifts[i + 1] += t1
y_shifts[i + 1] += -t0
self.alignmentDone()
return data, x_shifts, y_shifts
# def align_y_top(self, element, data):
# '''
# This alingment method sets takes a hotspot or a relatively bright and isolated part of the projection and moves it to the
# top of the ROI boundary. It does this for all projections, effectively adjusting for vertical drift or stage wobble.
# Variables
# -----------
# element: int
# element index
# data: ndarray
# 4D xrf dataset ndarray [elements, theta, y,x]
# '''
# self.data = data
# num_projections = data.shape[1]
# tmp_data = data[element,:,:,:]
# bounds = self.get_boundaries(tmp_data,5)
# y_bot = np.asarray(bounds[3])
# translate = y_bot[0]-y_bot
# # self.data = np.roll(data, int(np.round(self.y_shifts)), axis=1)
# self.y_shifts -=translate
# for i in range(num_projections):
# self.data[:,i,:,:] = np.roll(data[:,i,:,:], int(np.round(translate[i])), axis=1)
# self.alignmentDone()
# return self.y_shifts, self.data
def align2edge(self, element, data, loc, threshold):
'''
This alingment method sets takes a hotspot or a relatively bright and isolated part of the projection and moves it to the
top of the ROI boundary. It does this for all projections, effectively adjusting for vertical drift or stage wobble.
Variables
-----------
element: int
element index
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
loc: bool
0 = bottom, 1 = top
'''
num_projections = data.shape[1]
y_shifts = np.zeros(num_projections)
tmp_data = data[element,:,:,:]
bounds = self.get_boundaries(tmp_data,threshold)
edge = np.asarray(bounds[2+loc])
translate = -edge
# self.data = np.roll(data, int(np.round(self.y_shifts)), axis=1)
y_shifts -= translate
for i in range(num_projections):
# data[:,i] = np.roll(data[:,i], int(np.round(translate[i])), axis=1)
data = self.shiftProjection(data, 0, np.round(translate[i],2), i)
self.alignmentDone()
return y_shifts, data
def get_boundaries(self, data, coeff):
'''
Identifies the saple's envelope and creates a rectangular boundary over each projection, then return a dictionary containing the
left, right, top, and bottom boundary positions.
Variables
-----------
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
coeff: int
element index
'''
bounds = {}
bounds[0] = [] # x_left
bounds[1] = [] # x_right
bounds[2] = [] # y_top
bounds[3] = [] # y_bottom
num_proj = len(data)
for i in range(num_proj):
col_sum = np.sum(data[i], axis=0) / data[i].shape[1]
row_sum = np.sum(data[i], axis=1) / data[i].shape[0]
noise_col = np.sort(col_sum[col_sum > 0])[:1]
noise_row = np.sort(row_sum[row_sum > 0])[:1]
if noise_col <= noise_row:
noise = noise_col
else:
noise = noise_row
upper_thresh_col = np.sort(col_sum)[::-1][:1]
diffcol = upper_thresh_col - noise
y_thresh = diffcol * coeff / 100 + noise
upper_thresh_row = np.sort(row_sum)[::-1][:1]
diffrow = upper_thresh_row - noise
x_thresh = diffrow * coeff / 100 + noise
for j in range(len(col_sum)):
if col_sum[j] >= y_thresh:
bounds[0].append(j)
break
for j in range(len(col_sum)):
if col_sum[len(col_sum) - j - 1] >= y_thresh:
bounds[1].append(len(col_sum) - j - 1)
break
for j in range(len(row_sum)):
if row_sum[len(row_sum) - j - 1] >= x_thresh:
bounds[2].append(len(row_sum) - j - 1)
break
for j in range(len(row_sum)):
if row_sum[j] >= x_thresh:
bounds[3].append(j)
break
return bounds
def iterative_align(self, element, data, thetas, pad, blur_bool, rin, rout, center, algorithm, upsample_factor, save_bool, debug_bool, iters=5):
'''
iterative alignment method from TomoPy
Variables
-----------
element: int
element index
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
thetas: ndarray
sorted projection angle list
iters: int
number of iterations
'''
num_projections = data.shape[1]
x_shifts = np.zeros(num_projections)
y_shifts = np.zeros(num_projections)
prj = data[element]
# prj = np.sum(data, axis=0)
prj = tomopy.remove_nan(prj, val=0.0)
prj[np.where(prj == np.inf)] = 0.0
# self.get_iter_paraeters()
prj, sx, sy, conv = tomopy.align_joint(prj, thetas, iters=iters, pad=pad,
blur=blur_bool, rin=rin, rout=rout, center=center, algorithm=algorithm,
upsample_factor=upsample_factor, save=save_bool, debug=debug_bool)
x_shifts = np.round(sx,2)
y_shifts = np.round(sy,2)
for i in range(num_projections):
# data[:,i,:,:] = np.roll(data[:,i,:,:], int(np.round(y_shifts[i])), axis=1)
# data[:,i,:,:] = np.roll(data[:,i,:,:], int(np.round(x_shifts[i])), axis=2)
data = self.shiftProjection(data, x_shifts[i], y_shifts[i], i)
return x_shifts, y_shifts, data
def alignFromText2(self, fileName, data):
'''
align by reading text file that saved prior image registration
alignment info is saved in following format: name of the file, xshift, yshift
by locating where the comma(,) is we can extract information:
name of the file(string before first comma),
yshift(string after first comma before second comma),
xshift(string after second comma)
Variables
-----------
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
'''
try:
#unalign first, y_axis possibly inverted.
num_projections = data.shape[1]
x_shifts = self.x_shifts
y_shifts = self.y_shifts
for i in range(num_projections):
data = self.shiftProjection(data,-x_shifts[i],y_shifts[i], i)
#read alignment data
file = open(fileName[0], 'r')
read = file.readlines()
datacopy = np.zeros(data.shape)
datacopy[...] = data[...]
data[np.isnan(data)] = 1
num_projections = data.shape[1]
y_shifts = np.zeros(num_projections)
x_shifts = np.zeros(num_projections)
for i in range(num_projections):
j = i + 1
secondcol = round(float(read[j].split(",")[2]))
firstcol = round(float(read[j].split(",")[1]))
y_shifts[i] = secondcol
x_shifts[i] = firstcol
# data[:, i] = np.roll(data[:, i], x_shifts[i], axis=2)
# data[:, i] = np.roll(data[:, i], y_shifts[i],, axis=1)
# TODO: check padding amount and adjust alignment if necessary
x_shifts[i] = self.unwind(x_shifts[i], data.shape[3])
data = self.shiftProjection(data,x_shifts[i],-y_shifts[i],i)
file.close()
self.alignmentDone()
return data, x_shifts, y_shifts
except IndexError:
print("index missmatch between align file and current dataset ")
except IOError:
print("choose file please")
except TypeError:
print("choose file please")
return
def unwind(self, x_shift, x_range):
if x_shift >= x_range/2:
x_shift = x_shift - x_range
elif x_shift <= -x_range/2:
x_shift = x_shift + x_range
return x_shift
def alignmentDone(self):
'''send message that alignment has been done'''
print("Alignment has been completed")
def find_center(self, tomo, thetas, slice_index, init_center, tol, mask_bool, ratio):
center = tomopy.find_center(tomo, thetas, slice_index, init_center, tol, mask_bool, ratio)
return center[0]
def move_rot_axis(self, thetas, center, rAxis_pos, theta_pos):
#set 0th angle to
num_theas = thetas.shape[0]
pos_from_center = [rAxis_pos - center]
angle_offset = -180-theta_pos
thetas = thetas + angle_offset
rads = np.radians(thetas)
# angle_increments = rads[1:]-rads[:-1]
offsets = pos_from_center[0]*np.cos(rads)
# adjustment = pos_from_center[0]*-1 - offsets[0]
# offsets += adjustment
return offsets
def rot_center3(self, thetasum, ave_mode = None, limit = None, return_all = False):
# thetasum: 1d or 2d array of summed projections. (z,x)
if thetasum.ndim == 1:
thetasum = thetasum[None,:]
T = spf.fft(thetasum, axis = 1)
# Collect real and imaginary coefficients.
real, imag = T[:,1].real, T[:,1].imag
rows = thetasum.shape[0]
cols = thetasum.shape[1]
# In a sinogram the feature may be more positive or less positive than the background (i.e. fluorescence vs
# absorption contrast). This can mess with the T_phase value so we multiply by the sign of the even function
# to account for this.
T_phase = np.arctan2(imag*np.sign(real),real*np.sign(real))
if ave_mode == 'Mean':
# Use the mean of the centers from each row as center shift.
# Good for objects filling the field of view (i.e. local/roi tomography)
return np.mean(T_phase)/(np.pi*2)*cols
elif ave_mode == 'Median':
# Use median value as center shift.
return np.median(T_phase)/(np.pi*2)*cols
elif ave_mode == 'Local':
# Use local mean from window about the median vlimitalue as center shift.
# Good for objects fully contained within the field of view.
# Default window is 2*rows//10
med = np.median(T_phase)
if limit == None:
return np.tmean(T_phase, limits = (med-10, med+10))/(np.pi*2)*cols
else:
return np.tmean(T_phase, limits = (med-limit, med+limit))/(np.pi*2)*cols
else:
# Use value from center row as center shift.
# Fastest option.
if return_all:
return T_phase/(np.pi*2)*cols
return T_phase[rows//2]/(np.pi*2)*cols
def hotspot2line(self, element, x_size, y_size, hs_group, posMat, data):
'''
aligns projections to a line based on hotspot information
Variables
-----------
element: int
element index
x_size: int
ROI pixel dimension in x
y_size: int
ROI pixel dimension in y
hs_group: int
hotspot group number
posMat: ndarray
position matrix.
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
'''
#TODO: onsider having posMat as part of the history state and have it update one level up.
self.posMat = posMat
self.posMat[0] = posMat[0] + x_size//2
self.posMat[1] = posMat[1] + y_size//2
hs_x_pos, hs_y_pos, firstPosOfHotSpot, hotSpotX, hotSpotY, data = self.alignment_parameters(element, x_size, y_size, hs_group, posMat, data)
#****************
num_projections = data.shape[1]
y_shifts = np.zeros(num_projections)
x_shifts = np.zeros(num_projections)
for j in range(num_projections):
if hs_x_pos[j] != 0 and hs_y_pos[j] != 0:
yyshift = int(round(y_size//2 - hotSpotY[j] - hs_y_pos[j] + hs_y_pos[firstPosOfHotSpot]))
xxshift = int(round(x_size//2 - hotSpotX[j] - hs_x_pos[j] + hs_x_pos[firstPosOfHotSpot]))
# data[:, j, :, :] = np.roll(np.roll(data[:, j, :, :], xxshift, axis=2), yyshift, axis=1)
data = self.shiftProjection(data, xxshift,yyshift,j)
if hs_x_pos[j] == 0:
xxshift = 0
if hs_y_pos[j] == 0:
yyshift = 0
x_shifts[j] = xxshift
y_shifts[j] = yyshift
print("align done")
return data, x_shifts, y_shifts
def hotspot2sine(self, element, x_size, y_size, hs_group, posMat, data, thetas):
'''
aligns projections to a sine curve based on hotspot information
Variables
-----------
element: int
element index
x_size: int
ROI pixel dimension in x
y_size: int
ROI pixel dimension in y
hs_group: int
hotspot group number
posMat: ndarray
position matrix. 2
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
thetas: ndarray
sorted projection angle list
'''
self.posMat = posMat
self.posMat[0] = posMat[0] + x_size//2
self.posMat[1] = posMat[1] + y_size//2
hs_x_pos, hs_y_pos, firstPosOfHotSpot, hotSpotX, hotSpotY, data = self.alignment_parameters(element, x_size, y_size, hs_group, self.posMat, data)
#****************
num_projections = data.shape[1]
y_shifts = np.zeros(num_projections)
x_shifts = np.zeros(num_projections)
thetas = np.asarray(thetas)
for j in range(num_projections):
if hs_x_pos[j] != 0 and hs_y_pos[j] != 0:
xxshift = int(round(x_size//2 - hotSpotX[j]))
yyshift = int(round(y_size//2 - hotSpotY[j]))
if hs_x_pos[j] == 0:
xxshift = 0
if hs_y_pos[j] == 0:
yyshift = 0
x_shifts[j] = xxshift
y_shifts[j] = yyshift
hotspotXPos = np.zeros(num_projections, dtype=np.int)
hotspotYPos = np.zeros(num_projections, dtype=np.int)
for i in range(num_projections):
hotspotYPos[i] = int(round(hs_y_pos[i]))
hotspotXPos[i] = int(round(hs_x_pos[i]))
hotspotProj = np.where(hotspotXPos != 0)[0]
theta_tmp = thetas[hotspotProj]
com = hotspotXPos[hotspotProj]
if hs_group == 0:
self.fitCenterOfMass(com, x=theta_tmp)
else:
self.fitCenterOfMass2(com, self.centers, x=theta_tmp)
self.alignCenterOfMass2(hotspotProj, data)
## yfit
for i in hotspotProj:
y_shifts[i] = int(hotspotYPos[hotspotProj[0]]) - int(hotspotYPos[i])
# data[:, i] = np.roll(data[:, i], y_shifts[i], axis=1)
data = self.shiftProjection(data, 0,y_shifts[i],i)
#update reconstruction slider value
# self.recon.sld.setValue(self.centers[2])
print("align done")
self.centers = list(np.round(self.centers))
return data, x_shifts, y_shifts
def setY(self, element, x_size, y_size, hs_group, posMat, data):
'''
aligns projections vertically
Variables
-----------
element: int
element index
x_size: int
ROI pixel dimension in x
y_size: int
ROI pixel dimension in y
hs_group: int
hotspot group number
posMat: ndarray
position matrix. 2
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
'''
self.posMat = posMat
self.posMat[0] = posMat[0] + x_size//2
self.posMat[1] = posMat[1] + y_size//2
hs_x_pos, hs_y_pos, firstPosOfHotSpot, hotSpotX, hotSpotY, data = self.alignment_parameters(element, x_size, y_size, hs_group, self.posMat, data)
num_projections = data.shape[1]
y_shifts = np.zeros(num_projections)
for j in range(num_projections):
if hs_x_pos[j] != 0 and hs_y_pos[j] != 0:
yyshift = int(round(y_size//2 - hotSpotY[j] - hs_y_pos[j] + hs_y_pos[firstPosOfHotSpot]))
# data[:, j] = np.roll(data[:, j], yyshift, axis=1)
data = self.shiftProjection(data,0, yyshift,j)
if hs_y_pos[j] == 0:
yyshift = 0
y_shifts[j] = -yyshift
print("align done")
return data, y_shifts
def alignment_parameters(self, element, x_size, y_size, hs_group, posMat, data):
'''
gathers parameters for alignment functions
Variables
-----------
element: int
element index
x_size: int
ROI pixel dimension in x
y_size: int
ROI pixel dimension in y
hs_group: int
hotspot group number
posMat: ndarray
position matrix. 2
data: ndarray
4D xrf dataset ndarray [elements, theta, y,x]
'''
self.posMat = posMat
num_projections = data.shape[1]
hs_x_pos = np.zeros(num_projections, dtype=np.int)
hs_y_pos = np.zeros(num_projections, dtype=np.int)
hs_array = np.zeros([num_projections, y_size//2*2, x_size//2*2], dtype=np.int)
for i in range(num_projections):
hs_x_pos[i] = int(round(self.posMat[hs_group, i, 0]))
hs_y_pos[i] = int(abs(round(self.posMat[hs_group, i, 1])))
if hs_x_pos[i] != 0 and hs_y_pos[i] != 0:
if hs_y_pos[i] > (data.shape[2] - y_size//2): # if ROI is past top edge of projection
hs_y_pos[i] = data.shape[2] - y_size//2
if hs_y_pos[i] < y_size//2: # if ROI is past bottom of projection
hs_y_pos[i] = y_size//2
if hs_x_pos[i] < x_size//2: # if ROI is past left edge of projection
hs_x_pos[i] = x_size//2
if hs_x_pos[i] > (data.shape[3] - x_size//2): # if ROI is past right edge of projection
hs_x_pos[i] = data.shape[3] - x_size//2
y0 = hs_y_pos[i] - y_size//2
y1 = hs_y_pos[i] + y_size//2
x0 = hs_x_pos[i] - x_size//2
x1 = hs_x_pos[i] + x_size//2
hs_array[i, :, :] = data[element, i, (data.shape[2] - y1):(data.shape[2] - y0), x0:x1]
hotSpotX = np.zeros(num_projections, dtype=np.int)
hotSpotY = np.zeros(num_projections, dtype=np.int)
new_hs_array = np.zeros(hs_array.shape, dtype=np.int)
new_hs_array[...] = hs_array[...]
firstPosOfHotSpot = 0
add = 1
for i in range(num_projections):
if hs_x_pos[i] == 0 and hs_y_pos[i] == 0:
firstPosOfHotSpot += add
if hs_x_pos[i] != 0 or hs_y_pos[i] != 0:
img = hs_array[i, :, :]
a, x, y, b, c = self.fitgaussian(img)
hotSpotY[i] = x
hotSpotX[i] = y
yshift_tmp = int(round(y_size - hotSpotY[i]))
xshift_tmp = int(round(x_size - hotSpotX[i]))
new_hs_array[i, :, :] = np.roll(new_hs_array[i, :, :], xshift_tmp, axis=1)
new_hs_array[i, :, :] = np.roll(new_hs_array[i, :, :], yshift_tmp, axis=0)
add = 0
return hs_x_pos, hs_y_pos, firstPosOfHotSpot, hotSpotX, hotSpotY, data
def fitCenterOfMass(self, com, x):
fitfunc = lambda p, x: p[0] * np.sin(2 * np.pi / 360 * (x - p[1])) + p[2]
errfunc = lambda p, x, y: fitfunc(p, x) - y
p0 = [100, 100, 100]
self.centers, success = optimize.leastsq(errfunc, np.asarray(p0), args=(x, com))
self.centerOfMassDiff = fitfunc(p0, x) - com
print(self.centerOfMassDiff)
def alignCenterOfMass2(self, hotspotProj, data):
j = 0
for i in hotspotProj:
self.x_shifts[i] += int(self.centerOfMassDiff[j])
# data[:, i] = np.roll(data[:, i], int(round(self.x_shifts[i])), axis=2)
data = self.shiftProjection(data, self.x_shifts[i],0,i)
j += 1
#set some label to be show that the alignment has completed. perhaps print this in a logbox
def fitCenterOfMass2(self, com, x):
fitfunc = lambda p, x: p[0] * np.sin(2 * np.pi / 360 * (x - p[1])) + self.centers[2]
errfunc = lambda p, x, y: fitfunc(p, x) - y
p0 = [100, 100]
p2, success = optimize.leastsq(errfunc, np.asarray(p0), args=(x, com))
self.centerOfMassDiff = fitfunc(p2, x) - com
print(self.centerOfMassDiff)
def fitgaussian(self, data):
"""
Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution found by a fit
"""
params = self.moments(data)
errorfunction = lambda p: np.ravel(self.gaussian(*p)(*np.indices(data.shape)) - data)
p, success = optimize.leastsq(errorfunction, params)
return p
def moments(self, data):
"""
Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments
"""
total = data.sum()
if total == 0:
x = 0
y = 0
else:
X, Y = np.indices(data.shape)
x = (X * data).sum() / total
y = (Y * data).sum() / total
col = data[:, int(y)]
if col.sum() == 0:
width_x = 0
else:
width_x = np.sqrt(abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())
# TODO: rundime wasrning: invalid value encountered in double_scalars
row = data[int(x), :]
if row.sum() == 0:
width_y = 0
else:
width_y = np.sqrt(abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())
height = data.max()
return height, x, y, width_x, width_y
def gaussian(self, height, center_x, center_y, width_x, width_y):
"""
Returns a gaussian function with the given parameters
"""
width_x = float(width_x)
width_y = float(width_y)
if width_x == 0:
return lambda x, y: 0
if width_y == 0:
return lambda x, y: 0
# ss = lambda x, y: height * exp(-(((center_x - x) / width_x) ** 2 + ((center_y - y) / width_y) ** 2) / 2)
return lambda x, y: height * np.exp(-(((center_x - x) / width_x) ** 2 + ((center_y - y) / width_y) ** 2) / 2)
def clrHotspot(self, posMat):
'''
resets
hotspot position matrix
'''
posMat[...] = np.zeros_like(posMat)
return posMat
|
from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
white = (255,255,255)
red = (255,0,0)
purple = (255,0,255)
ball_position = [4,4]
ball_velocity = [1,1]
ball_velocity2 = [-1,-1]
ball_position2 = [3,3]
sense.set_pixel (0,0,purple)
bat_y = 4
def draw_bat():
sense.set_pixel(0, bat_y, white)
sense.set_pixel(0, bat_y + 1, white)
sense.set_pixel(0, bat_y - 1, white)
def draw_ball():
sense.set_pixel(ball_position[0], ball_position[1], red)
ball_position[0] += ball_velocity[0]
if ball_position[0] == 7 or ball_position[0] == 0:
ball_velocity[0] = -ball_velocity[0]
ball_position[1] += ball_velocity[1]
if ball_position[1] == 7 or ball_position[1] == 0:
ball_velocity[1] = -ball_velocity[1]
if ball_position[0] == 1 :
ball_velocity[0] = -ball_velocity[0]
def draw_ball2():
sense.set_pixel(ball_position2[0], ball_position2[1], red)
ball_position2[0] += ball_velocity2[0]
if ball_position2[0] == 7 or ball_position2[0] == 0:
ball_velocity2[0] = -ball_velocity2[0]
ball_position2[1] += ball_velocity2[1]
if ball_position2[1] == 7 or ball_position2[1] == 0:
ball_velocity2[1] = -ball_velocity2[1]
if ball_position2[0] == 1:
ball_velocity2[0] = -ball_velocity2[0]
sense.clear(0,0,0)
def move_up(event):
global bat_y
if event.action == 'pressed' and bat_y > 1:
bat_y -= 1
def move_down(event):
global bat_y
if event.action == 'pressed' and bat_y > 1:
bat_y -= -1
while True:
draw_bat()
sense.stick.direction_up = move_up
sense.stick.direction_down = move_down
sleep(0.25)
sense.clear(0, 0, 0)
draw_ball()
sense.set_pixel (0,0,purple)
draw_ball2()
|
"""Auxiliary utils for implementing pruning strategies
"""
from collections import OrderedDict, defaultdict
import torch
from torch import nn
from ..models.head import get_classifier_module
def hook_applyfn(hook, model, forward=False, backward=False):
assert forward ^ backward, \
"Either forward or backward must be True"
hooks = []
def register_hook(module):
if (
not isinstance(module, nn.Sequential)
and
not isinstance(module, nn.ModuleList)
and
not isinstance(module, nn.ModuleDict)
and
not (module == model)
):
if forward:
hooks.append(module.register_forward_hook(hook))
if backward:
hooks.append(module.register_backward_hook(hook))
return register_hook, hooks
# def get_modules(module, prefix=""):
# """Recursively find all submodules from torch modules,
# returning them in state_dict format
# """
# # TODO unnecesary given named_modules
# modules = {}
# for n, m in module.named_children():
# modules[prefix+n] = m
# modules.update(get_modules(m, prefix=prefix+n+'.'))
# return modules
def get_params(model, recurse=False):
params = {k: v.detach().cpu().numpy().copy()
for k, v in model.named_parameters(recurse=recurse)}
return params
def get_activations(model, input):
activations = OrderedDict()
def store_activations(module, input, output):
if isinstance(module, nn.ReLU):
# TODO Fix. ResNet18 implementation reuses a
# single ReLU layer?
return
assert module not in activations, \
f"{module} already in activations"
# TODO remove [0], not all models have a single input
activations[module] = (input[0].detach().cpu().numpy().copy(),
output.detach().cpu().numpy().copy(),)
fn, hooks = hook_applyfn(store_activations, model, forward=True)
model.apply(fn)
with torch.no_grad():
model(input)
for h in hooks:
h.remove()
return activations
def get_gradients(model, inputs, outputs):
# TODO implement using model.register_backward_hook()
# So it is harder than it seems, the grad_input contains also the gradients
# with respect to the weights and so far order seems to be (bias, input, weight)
# which is confusing
# Moreover, a lot of the time the output activation we are looking for is the
# one after the ReLU and F.ReLU (or any functional call) will not be called by
# the forward or backward hook
# Discussion here
# https://discuss.pytorch.org/t/how-to-register-hook-function-for-functional-form/25775
# Best way seems to be monkey patching F.ReLU & other functional ops
# That'll also help figuring out how to compute a module graph
pass
def get_param_gradients(model, inputs, outputs, loss_func=None, by_module=True):
gradients = OrderedDict()
if loss_func is None:
loss_func = nn.CrossEntropyLoss()
training = model.training
model.train()
pred = model(inputs)
loss = loss_func(pred, outputs)
loss.backward()
if by_module:
gradients = defaultdict(OrderedDict)
for module in model.modules():
assert module not in gradients
for name, param in module.named_parameters(recurse=False):
if param.requires_grad and param.grad is not None:
gradients[module][name] = param.grad.detach().cpu().numpy().copy()
else:
gradients = OrderedDict()
for name, param in model.named_parameters():
assert name not in gradients
if param.requires_grad and param.grad is not None:
gradients[name] = param.grad.detach().cpu().numpy().copy()
model.zero_grad()
model.train(training)
return gradients
def prunable_modules(model, masked_modules, prune_classifier=True):
modules = model.named_modules()
prunable_modules = {name: module for name, module in modules
if isinstance(module, masked_modules)}
if not prune_classifier:
clf = get_classifier_module(model)
if clf in prunable_modules:
del prunable_modules[clf]
prunable_modules = list(prunable_modules.values())
return prunable_modules
def fraction_to_keep(compression, model, prunable_modules):
""" Return fraction of params to keep to achieve compression ratio
Compression = total / ( fraction * prunable + (total-prunable))
# Using algrebra fraction is equal to
# fraction = total/prunable * (1/compression - 1) + 1
"""
from ..metrics import model_size
# return 1/compression # BYPASS FOR DEBUG
# TODO fix for compounding
total_size = model_size(model)[0]
# [1] is so we use nonzeros, this is useful for compounding
prunable_size = sum([model_size(m)[0] for m in prunable_modules])
nonprunable_size = total_size - prunable_size
fraction = 1 / prunable_size * (total_size/compression - nonprunable_size)
assert 0 < fraction <= 1, \
f"Cannot compress to {1/compression} model with {nonprunable_size/total_size}" + \
"fraction of unprunable parameters"
return fraction
# def fraction_to_keep(compression, model, prunable_modules):
# """ Return fraction of params to keep to achieve compression ratio
# total = prunable + nonprunable + zeros
# Compression = (prunable + nonprunable) / (fraction * prunable + nonprunable)
# # Using algrebra fraction is equal to
# # fraction = (prunable + nonprunable) / (prunable * compression) - (nonprunable / prunable)
# """
# total_size, nonzero_size = model_size(model)
# # [1] is so we use nonzeros, this is useful for compounding
# prunable_size = sum([model_size(m)[1] for m in prunable_modules.values()])
# nonprunable_size = nonzero_size - prunable_size
# fraction = (prunable_size + nonprunable_size) / (prunable_size * compression) - (nonprunable_size / prunable_size)
# assert 0 < fraction <= 1, \
# f"Cannot compress to {1/compression} model with {(total_size-prunable_size)/total_size}" +\
# "fraction of unprunable parameters"
# print(f"{fraction:.2f} <> {prunable_size/total_size:.2f} {nonprunable_size/total_size:.2f}")
# return fraction
|
import csv
with open('grades.csv') as csvfile:
lines = csv.DictReader(csvfile)
for line in lines:
with open('rating_cat.csv', mode='a') as rating_file:
if (line['rating'] > "1" and line['rating'] <="2.5"):
rating_writer = csv.writer(rating_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
rating_writer.writerow(['2-2.5'])
elif (line['rating'] > "2.5" and line['rating'] <= "3.5"):
rating_writer = csv.writer(rating_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
rating_writer.writerow(['3.3.5'])
elif (line['rating'] > "3.5" and line['rating'] <= "4.5"):
rating_writer = csv.writer(rating_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
rating_writer.writerow(['4-4.5'])
else:
rating_writer = csv.writer(rating_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
rating_writer.writerow([line['rating']])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jedisite', '0005_auto_20160107_2136'),
]
operations = [
migrations.RemoveField(
model_name='decks',
name='user',
),
migrations.AddField(
model_name='decks',
name='account',
field=models.OneToOneField(null=True, to='jedisite.GameAccount'),
),
]
|
import Queue, copy
from itertools import tee, izip
from collections import defaultdict
# Pairing tool #
def pairwise(iterable):
a,b = tee(iterable)
next(b,None)
return izip(a,b)
# Cartesian X-Y Point #
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return "({0}, {1})".format(self.x,self.y)
# distance formula
def distance(a, b):
return pow(pow(a.x-b.x,2)+pow(a.y-b.y,2),0.5)
# Search Algorithms #
class Search:
# Requires an adjacency matrix file and a file containing the locations of the points
def __init__(self, ptFile, mtxFile, start, end):
global adjList # store the matrix in an adjacency list
global points # store the point locations
self.start = start
self.end = end
self.adjMtxFile = open(mtxFile,'r')
self.pointFile = open(ptFile,'r')
adjList = defaultdict(list)
points = []
fromA = 0
# Read in the adjacency matrix file, store in a dictionary of lists -> adjList[parent] = [child1, child2, ....]
self.adjMtxFile.readline()
for line in self.adjMtxFile:
line = line[1:]
toB = 0
for i in line.split():
if i.isdigit():
if i == '1':
adjList[fromA].append(toB)
toB += 1
fromA+=1
# Read in the point file, store in a list of points
for line in self.pointFile:
line = line[1:]
tmp=[float(i) for i in line.split(",")]
points.append(Point(tmp[0],tmp[1]))
# Breadth-First Search #
def breadth(self):
# Data structure setup
frontier = Queue.Queue() # paths with the last node being an unvisited node that can be visited from the visited nodes
explored = set() # set of nodes visited -> "explored"
curr = [self.start] # current path
frontier.put(curr)
while not frontier.empty(): # if the frontier empty their is no path to the goal node
curr = frontier.get() # get the top path from the frontier
''' # DEBUG 1
print "PATH", curr
# DEBUG 1 '''
explored.add(curr[-1]) # add the last point in current path to explored
if curr[-1] == self.end: # if the last point in the path is the goal, the algorithm should return the path
return curr
''' # DEBUG 2
print "ADJ", adjList[curr[-1]]
# DEBUG 2 '''
# For each adjacency of the last node in the path, so long as the next node isn't visited:
for i in adjList[curr[-1]]:
if i not in explored:
curr.append( i ) # add the node to the path
frontier.put( copy.deepcopy(curr) ) # add a deep copy of the path to the frontier
explored.add( i ) # add the node to explored
curr.remove( i ) # remove the node from the path, and check the remaining adjacencies
''' # DEBUG 3
print "FRONTIER ", list(frontier.queue)
print "EXPLORED ", explored
# DEBUG 3 '''
# If frontier is empty, their is no path from the self.start to the goal, return an empty path
curr = {}
return curr
# Depth-First Search #
def depth(self):
# Reference BFS for line by line comments
# Data structure setup
frontier = [] # paths are now stored in a list which will act as a stack
explored = set()
curr = [self.start]
frontier.append(curr)
while frontier:
curr = frontier.pop() # use the frontier as a stack
''' # DEBUG 1
print "PATH", curr
# DEBUG 1 '''
explored.add(curr[-1])
if curr[-1] == self.end:
return curr
''' # DEBUG 2
print "ADJ", adjList[curr[-1]]
# DEBUG 2 '''
for i in reversed(adjList[curr[-1]]):
if i not in explored:
curr.append( i )
frontier.append( copy.deepcopy(curr) )
explored.add( i )
curr.remove( i )
''' # DEBUG 3
print "FRONTIER ", list(frontier.queue)
print "EXPLORED ", explored
# DEBUG 3 '''
curr = {}
return curr
# Uniform Search #
def uniform(self):
# Reference BFS for line by line comments
# Data structure setup
frontier = [] # paths will be stored in a list (not acting like a stack as DFS)
explored = set()
curr = [self.start]
frontier.append(curr)
while frontier:
# Calculate the minimum distance path
minDist = []
for front in frontier: # iterate through each possible path, storing the distance from self.start to last node in the path
d = 0
if len(front) > 1:
for v, w in pairwise(front):
d += distance(points[v],points[w])
minDist.append( copy.deepcopy(d) )
curr = frontier.pop(minDist.index(min(minDist))) # current path is the path with the minimum distance
''' # DEBUG 1
print "PATH", curr
# DEBUG 1 '''
explored.add(curr[-1])
if curr[-1] == self.end:
return curr
''' # DEBUG 2
print "ADJ", adjList[curr[-1]]
# DEBUG 2 '''
for i in adjList[curr[-1]]:
if i not in explored:
curr.append( i )
frontier.append( copy.deepcopy(curr) )
explored.add( i )
curr.remove( i )
''' # DEBUG 3
print "FRONTIER ", list(frontier.queue)
print "EXPLORED ", explored
# DEBUG 3 '''
curr = {}
return curr;
|
import biorbd
from time import time
from biorbd_optim import (
OptimalControlProgram,
ProblemType,
Bounds,
QAndQDotBounds,
InitialConditions,
ShowResult,
)
def prepare_ocp(biorbd_model_path, final_time, number_shooting_points, nb_threads):
# --- Options --- #
biorbd_model = biorbd.Model(biorbd_model_path)
torque_min, torque_max, torque_init = -100, 100, 0
n_q = biorbd_model.nbQ()
n_qdot = biorbd_model.nbQdot()
n_tau = biorbd_model.nbGeneralizedTorque()
# Add objective functions
objective_functions = ()
# Dynamics
problem_type = ProblemType.torque_driven
# Constraints
constraints = ()
# Path constraint
X_bounds = QAndQDotBounds(biorbd_model)
X_bounds.min[:, [0, -1]] = 0
X_bounds.max[:, [0, -1]] = 0
X_bounds.min[1, -1] = 3.14
X_bounds.max[1, -1] = 3.14
# Initial guess
X_init = InitialConditions([0] * (n_q + n_qdot))
# Define control path constraint
U_bounds = Bounds(min_bound=[torque_min] * n_tau, max_bound=[torque_max] * n_tau)
U_bounds.min[n_tau - 1, :] = 0
U_bounds.max[n_tau - 1, :] = 0
U_init = InitialConditions([torque_init] * n_tau)
# ------------- #
return OptimalControlProgram(
biorbd_model,
problem_type,
number_shooting_points,
final_time,
X_init,
U_init,
X_bounds,
U_bounds,
objective_functions,
constraints,
nb_threads=nb_threads,
)
if __name__ == "__main__":
ocp = prepare_ocp(biorbd_model_path="pendulum.bioMod", final_time=3, number_shooting_points=100, nb_threads=4)
# --- Solve the program --- #
tic = time()
sol = ocp.solve(show_online_optim=False)
toc = time() - tic
print(f"Time to solve : {toc}sec")
# --- Save the optimal control program and the solution --- #
ocp.save(sol, "pendulum")
# --- Load the optimal control program and the solution --- #
ocp_load, sol_load = OptimalControlProgram.load("pendulum.bo")
# --- Show results --- #
result = ShowResult(ocp_load, sol_load)
result.graphs()
result.animate()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Functions to compute the entropic complexity of a given objective
function.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import (ascii, bytes, chr, dict, filter, hex, input, int,
map, next, oct, open, pow, range, round, str,
super, zip)
import numpy as np
from pytopocomplexity.search import random_hill_climbing
from pytopocomplexity.util import normalize, sample_box
def compute_entropy(objective_function, bounds):
"""Exactly compute the entropy of an objective function (Definition 1 in the
paper).
We will do this using a Monte Carlo method. First we uniformly sample the
model space. Then we run the models downhill and determine in which basin of
attraction each one lies. This lets us compute the volume of the basin of
attraction. Then we can directly compute the entropy.
Parameters
----------
objective_function : function
Objective function whose entropy will be computed
bounds : list
A list of n 2-tuples corresponding to the lower and upper bounds of each
of the n coordinate axes.
Returns
-------
entropy : float
The computed topographical entropy of the objective function.
"""
num_samples = 100
points = sample_box(bounds, num_samples)
minima, frequencies = random_hill_climbing(objective_function, points)
global_minimum = min(minima)
num_minima = len(minima)
# The probability of converging to a given basin of attraction
probabilities = [f/num_samples for f in frequencies]
probabilities = normalize(probabilities)
function_values = [objective_function(m) for m in minima]
normed_function_values = np.array([np.abs(y - global_minimum) for y in
function_values])
sigma = 1/num_minima*np.sum(normed_function_values)
if not np.isclose([sigma], [0]).all():
probabilities = probabilities*np.exp(-np.abs(function_values -
global_minimum)/sigma)
probabilities = normalize(probabilities)
print(np.sum(probabilities))
entropy = -np.sum(probabilities*np.log(probabilities))
return entropy
def estimate_entropy(objective_function, initial_models, tolerance=None,
max_iterations=None):
"""Estimate the entropy of an objective function (Definition 2 in the
paper).
Parameters
----------
objective_function : function
Objective function whose entropy will be estimated
initial_models : array-like
Array containing initial models. These should be chosen uniformly at
random from the model space
tolerance : float
Tolerance corresponding to the stopping criteron for RHC
max_iterations: int
Maximum number of iterations for RHC search
Returns
-------
entropy : float
A statistical estimate of the topological entropy of the objective
function
"""
converged_models, frequencies = random_hill_climbing(objective_function,
initial_models,
tolerance,
max_iterations)
num_minima = len(converged_models)
function_values = np.array([objective_function(m) for m in
converged_models])
min_function_values = np.full([num_minima], function_values.min())
normed_function_values = np.abs(function_values - min_function_values)
sigma = 1/num_minima*np.sum(normed_function_values)
if np.isclose([sigma], [0]).all():
v = np.ones([num_minima])
else:
v = np.exp(-np.abs(function_values - min_function_values)/sigma)
x = frequencies/len(initial_models)
q_unnormed = x*v
q_normed = q_unnormed/np.sum(q_unnormed)
entropy = -np.sum(q_normed*np.log(q_normed))
return entropy
if __name__ == '__main__':
def f(x):
return np.sin(x)
bounds = [(0, np.pi)]
entropy = compute_entropy(f, bounds)
print(entropy)
|
import sys
fname = sys.argv[1]
pattern = sys.argv[2]
outprefix = sys.argv[3]
n = 0
with open(fname) as f:
outf = open(outprefix + str(n),'w')
for l in f:
if pattern in l:
n += 1
outf.close()
outf = open(outprefix + str(n),'w')
outf.write(l)
outf.close()
|
import requests
from basic import Basic
class Menu(object):
def __init__(self):
pass
def create(self, postData, accessToken):
postUrl = "https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s" % accessToken
# if isinstance(postData,unicode):
urlResp = requests.post(postUrl, data=postData.encode('utf8'))
print(urlResp.text)
if __name__ == '__main__':
myMenu = Menu()
postJson = """
{
"button":
[
{
"type":"click",
"name":"开发指引",
"key":"mpGuide"
},
{
"name":"公众平台",
"sub_button":
[
{
"type":"view",
"name":"更新公告",
"url":"http://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1418702138&token=&lang=zh_CN"
},
{
"type":"view",
"name":"接口权限说明",
"url":"http://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1418702138&token=&lang=zh_CN"
},
{
"type":"view",
"name":"返回码说明",
"url":"http://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1433747234&token=&lang=zh_CN"
}
]
},
{
"type":"media_id",
"name":"旅行",
"media_id":"oV-d-Gt0UUGRkmYOoqzwdbxh3AO2JjiVLA3iMdqdW2c"
}
]
}
"""
print(type(postJson))
# accessToken = Basic().get_access_token()
accessToken = '19_3NuYtsVv322Zrsg0pa7ZwGmR9sBmkHAPT73KBe4stEUoDzk5HO2Y5IzjfPpOQusmFsADljC7WFUYYeFH2okiOOdXFeCF173Ckj4cQZyHl44694Yaa42zYVry8jYEBDdBb26AHSfYrME9dUEPNERgAJAULY'
# myMenu.create(postJson,accessToken)
|
# -*- encoding: utf-8 -*-
'''
@File : 661-图片平滑器.py
@Time : 2021/08/13 22:42:36
@Author : TYUT ltf
@Version : v1.0
@Contact : 18235121656@163.com
@License : (C)Copyright 2020-2030, GNU General Public License
'''
# here put the import lib
from typing import List
'''
包含整数的二维矩阵 M 表示一个图片的灰度。你需要设计一个平滑器来让每一个单元的灰度成为平均灰度 (向下舍入) ,平均灰度的计算是周围的8个单元和它本身的值求平均,如果周围的单元格不足八个,则尽可能多的利用它们。
示例 1:
输入:
[[1,1,1],
[1,0,1],
[1,1,1]]
输出:
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
解释:
对于点 (0,0), (0,2), (2,0), (2,2): 平均(3/4) = 平均(0.75) = 0
对于点 (0,1), (1,0), (1,2), (2,1): 平均(5/6) = 平均(0.83333333) = 0
对于点 (1,1): 平均(8/9) = 平均(0.88888889) = 0
注意:
给定矩阵中的整数范围为 [0, 255]。
矩阵的长和宽的范围均为 [1, 150]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/image-smoother
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
class Solution:
def imageSmoother(self, M: List[List[int]]):
# ans = [[0] * len(M[0]) for _ in M]
# for row,col in [(i,j) for i in range(len(M)) for j in range(len(M[0]))]:
# count = 0
# for dx,dy in [(i,j) for i in range(row - 1,row + 2) for j in range(col - 1,col + 2)]:
# if 0 <= dx < len(M) and 0<= dy < len(M[0]):
# ans[row][col] += M[dx][dy]
# count += 1
# ans[row][col] //= count
# return ans
m = len(M[0])
N = [[0.5]+i+[0.5] for i in M]
N = [[0.5]*(m+2)] + N + [[0.5]*(m+2)]
# 卷积
for i in range(1, len(N)-1):
for j in range(1, len(N[0])-1):
total = [N[i-1][j-1], N[i][j-1], N[i+1][j-1], N[i-1][j],
N[i][j], N[i+1][j], N[i-1][j+1], N[i][j+1], N[i+1][j+1]]
sums, k = 0, 0
for _ in total:
if _ != 0.5:
sums += _
else:
k += 1
M[i-1][j-1] = int(sums/(9-k))
return M
|
"""
This module contains the functions necessary to extract a thumbprint for a given image. When run in main,
plots the thumbprint generated by sys.argv[1]
"""
import skimage.filters as skfill
import skimage.morphology as skmorph
import skimage.measure as skmeasure
import skimage.exposure as ske
import matplotlib.pyplot as plt
import numpy as np
import cv2 as cv
import heapq
import sys
def sin2d(u, v, ii, jj, N, M):
"""
Generates a sine wave structuring element to be used in convolving.
Params: u: int. factor of the vertical period of waves (higher value, more waves)
v: int. factor of the horizontal period of waves (higher value, more waves)
ii: NxM 2D numpy array. Horizontal gradient where col0 = 0 and coln = coln-1 + 1
jj: NxM 2D numpy array. Vertical gradient where col0 = 0 and coln = coln-1 + 1
N: int. # rows
M: int. # cols
Output: 2D numpy array. Our sine wave structure element
"""
selem = np.sin(u * ii / (2 * np.pi) + v * jj / (2 * np.pi))
return selem * 255 / (N * M)
def high_freq_detection(imgray):
"""
Given an image, finds the high spatial frequency areas of the image by convolving high
frequency sine waves with the image in question. High frequency areas are thus brighter
and low frequency areas are darker.
Params: imgray: 2D numpy array. B&W image in question
Output: 2D numpy array with high spatial frequency regions outlined
"""
N = 200
M = 200
i = np.linspace(0, N - 1, N)
j = np.linspace(0, M - 1, M)
jj, ii = np.meshgrid(j, i)
# Looked at gabor filter but doesn't work because of the gray region outside of the peak sign region.
# Here we use sign waves of varying frequencies and directions to cature as much of the thumb print
# as possible.
# Creating our filters
fill_1 = sin2d(2, 2, ii, jj, N, M)
fill_2 = sin2d(1, 2, ii, jj, N, M)
fill_3 = sin2d(2, 1, ii, jj, N, M)
fill_4 = sin2d(-2, 1, ii, jj, N, M)
fill_5 = sin2d(-2, 2, ii, jj, N, M)
fill_6 = sin2d(-2, 3, ii, jj, N, M)
# Creating a blank image which we will combine each of our convolved images with
new_img = np.zeros(imgray.shape)
conv_1 = cv.filter2D(imgray, -1, fill_1)
conv_2 = cv.filter2D(imgray, -1, fill_2)
conv_3 = cv.filter2D(imgray, -1, fill_3)
conv_4 = cv.filter2D(imgray, -1, fill_4)
conv_5 = cv.filter2D(imgray, -1, fill_5)
conv_6 = cv.filter2D(imgray, -1, fill_6)
# new_img capures all the high frequency regions located by each output image
new_img += conv_1 + conv_2 + conv_3 + conv_4 + conv_5 + conv_6
return new_img
def thresh_series_mask(imgray):
"""
Generates a mask to be used on original image to crop out the thumb region.
Params: imgray: 2D numpy array. B&W image in question
Output: 2D numpy array. Binary mask for B&W image in question
"""
# first equilize the histogram to emphasize the area around the thumb
imgray_eq = ske.equalize_hist(imgray)
# median filter to perserve the edges while removing spatial frequency
median_img = skfill.median(imgray_eq, selem=skmorph.disk(21))
# some blocks sizes work better than others for different images,
# we try a wide range of them to get the best qualities of each
block_sizes = [21, 41, 51, 61, 81]
masks = []
for block_size in block_sizes:
# some larger some smaller threshold blocks
local_thresh = skfill.threshold_local(median_img, block_size)
binary_local = median_img > local_thresh
# detect regions
labels = skmeasure.label(binary_local)
hight_freq_img = high_freq_detection(imgray)
# take 15 largest area regions detected in our thumb
label_props = skmeasure.regionprops(labels)
areas = [label_props[i]["area"] for i in range(len(label_props))]
max_areas = heapq.nlargest(15, areas)
max_indicies = [areas.index(max_areas[i]) + 1 for i in range(len(max_areas))]
# find the largest/highest frequency region
max_i = 0
max_count = 0
for i in range(len(max_indicies)):
label_vals = labels == max_indicies[i]
# finding our region using our high_freq_detection() output
current_count = (label_vals.astype(int) * hight_freq_img).sum()
if current_count > max_count:
max_count = current_count
max_i = max_indicies[i]
mask = labels == max_i
masks.append(mask)
mask = np.zeros(masks[0].shape).astype(bool)
# combine all masks from each of our thresholds with bitwise or
for m in masks:
mask = mask | m
# clean up the mask
mask = skmorph.binary_closing(mask, selem=skmorph.disk(30))
return skmorph.binary_erosion(mask, selem=skmorph.disk(10))
def extract_print(mask, imgray):
"""
Gets our thumbprint, given a mask generated by thresh_series_mask(imgray) and
imgray, our image in question.
Params: imgray: 2D numpy array. B&W image in question
mask: 2D numpy array. Binary image mask
Output: 2D numpy array. Our final thumbprint
"""
imgray[mask != 1] = 0
# on the cropped image, run localized threshold to extract the print
block_size = 41
local_thresh = skfill.threshold_local(imgray, block_size)
binary_local = imgray > local_thresh
# make background white
binary_local[mask == 0] = 1
# denoise
f_print = skfill.median(binary_local, selem=skmorph.disk(3))
return f_print
if __name__ == "__main__":
imgray = cv.imread(sys.argv[1], cv.IMREAD_GRAYSCALE)
f_print = extract_print(thresh_series_mask(imgray), imgray)
|
#encoding=utf8
default_macros = {
"1":"\u2776",
"2":"\u2777",
"3":"\u2778",
"4":"\u2779",
"5":"\u277a",
"6":"\u277b",
"7":"\u277c",
"8":"\u277d",
"9":"\u277e",
"fig":'',
"next":'`ref:fig-next`',
"prev":'`ref:fig-prev`',
"tip":'> **TIP**\n\n> ',
"source":'> **SOURCE**\n\n> ',
"warning":'> **WARNING**\n\n> ',
"question":'> **QUESTION**\n\n> ',
"link":'> **LINK**\n\n> \n\n> ',
}
def macros_to_text(macros):
return JSON.stringify(macros, None, 2)
def text_to_macros(text):
return JSON.parse(text)
def load(Jupyter, dialog, configmod, utils):
from .utils import register_actions
base_url = utils.get_body_data("baseUrl")
config = configmod.ConfigSection('scpy3_macros', {'base_url': base_url})
config.load()
macros_config = configmod.ConfigWithDefaults(config, {'macros':default_macros})
def on_ok(text):
macros = text_to_macros(text)
macros_config.set('macros', macros)
def show_macro_box(macros):
title = 'Define Macros'
info = 'Define Macros'
el_dialog = jQuery('<div/>')
el_info = jQuery('<p/>').text(info)
el_br = jQuery('<br/>')
css_textarea = {
'font-size': '12px',
'width': '90%',
'font-family': 'monospace'
}
el_textarea = jQuery('<textarea/>').css(css_textarea).attr('rows', '10').val(macros_to_text(macros))
el_dialog.append(el_info)
el_dialog.append(el_br)
el_dialog.append(el_textarea)
def on_click():
text = el_textarea.val()
on_ok(text)
def on_open():
el_textarea.focus()
settings = {
'keyboard_manager': Jupyter.notebook.keyboard_manager,
'title': title,
'body': el_dialog,
'buttons':{
'ok': {
'class': 'btn-primary',
'click': on_click
}
},
'open': on_open
}
dialog.modal(settings)
def key_handler(event):
cm = Jupyter.notebook.get_selected_cell().code_mirror
cursor = cm.getCursor()
line = cm.getLine(cursor.line)
index = cursor.ch - 1
def process(macros):
index = cursor.ch - 1
while index >= 0:
if line[index] == "$":
cmd = line[index+1:cursor.ch]
info = {"line":cursor.line, "ch":index}
if cmd == "":
show_macro_box(macros)
elif cmd in macros:
cm.replaceRange(macros[cmd], info, cursor)
return
index -= 1
index = cursor.ch - 1
while index >= 0:
cmd = line[index:cursor.ch]
if cmd in macros:
info = {"line":cursor.line, "ch":index}
cm.replaceRange(macros[cmd], info, cursor)
return
index -= 1
macros_config.get("macros").then(process)
return True
def main():
actions = dict(
expand_macro = {
"help": 'expand macro',
"key": 'Alt-m',
"handler": lambda event: key_handler(Jupyter, event)
}
)
register_actions(actions, "edit")
return {"load_ipython_extension": main}
imports = ['base/js/namespace',
'base/js/dialog',
'services/config',
'base/js/utils']
define(imports, load)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 15:14:31 2018
@author: kazeem
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__))) |
import sys
from pathlib import Path
import time
import cv2
"""
Usage: python webcam.py <name of person> <number of photos>
Example usage: python webcam.py claudia 10
"""
def take_photos(person, num):
""" Takes <num> number of photos and
saves them to the images/<person> directory
Args:
person (str): Person's name
num (int): Number of photos to take
"""
webcam = cv2.VideoCapture(0)
for i in range(num):
try:
check, frame = webcam.read()
# Take photo
if check:
img_name = "images/%s/%s_%d.jpg" % (person, person, i)
cv2.imwrite(filename=img_name, img=frame)
# webcam.release()
print("Image saved to %s!" % img_name)
except KeyboardInterrupt:
print("Turning off camera.")
webcam.release()
print("Camera off.")
print("Program ended.")
cv2.destroyAllWindows()
break
time.sleep(0.5)
def main():
person = sys.argv[1]
num = int(sys.argv[2])
# Make directory for new person
new_path = "images/" + person
Path(new_path).mkdir(parents=True, exist_ok=True)
take_photos(person, num)
if __name__ == "__main__":
main()
|
import csv
import sys
f = open('test.csv', 'rt')
try:
reader = csv.reader(f)
for row in reader:
parts = row.split(",");
id, timestamp, year, day, holidays, local_event, item_id, price = parts;
print(id);
finally:
f.close() |
import os
import dbus
import common
import util
from service import Characteristic, Descriptor
from settings import Settings
class TimeUpdateCharacteristic(Characteristic):
def __init__(self, service, settings: Settings):
self.notifying = False
self.settings = settings
Characteristic.__init__(
self, common.TIME_UPDATE_CHARACTERISTIC_UUID,
["read","write"], service)
self.add_descriptor(TimeUpdateDescriptor(self))
def WriteValue(self, value, options):
val = ''.join([str(v) for v in value])
print("received time update : "+val)
if self.settings.isAutoUpdate != "F":
print("turning auto time update off")
os.system("sudo timedatectl set-ntp 0")
self.isAutoUpdate = 'F'
cmd = "sudo date +%T -s "+val+":00"
os.system(cmd)
print("time changed")
def get_time(self):
value = []
cmd = "date '+%H:%M'"
now = util.execOne(cmd)
strtemp = str(now.strip())
for c in strtemp:
value.append(dbus.Byte(c.encode()))
return value
def ReadValue(self, options):
value = self.get_time()
return value
class TimeUpdateDescriptor(Descriptor):
TIME_DESCRIPTOR_VALUE = "Time Update HH:MM"
def __init__(self, characteristic):
Descriptor.__init__(
self, common.TIME_UPDATE_DESCRIPTOR_UUID,
["read"],
characteristic)
def ReadValue(self, options):
value = []
desc = self.TIME_UPDATE_DESCRIPTOR_VALUE
for c in desc:
value.append(dbus.Byte(c.encode()))
return value |
"""
Application: SumZero
Author: Roland Zhou
This is the top level script to run the Flask app so that all imports
are global from the top `sum_zero` module. If the app is run from
within the sum_zero/__init__.py, Python will not detect sum_zero as
a package.
The `config.py` module must also be in the root directory since the
app is imported to the root level.
"""
from sum_zero import app, manager
if __name__ == "__main__":
print("Running sum_zero app...")
manager.run()
|
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import unittest
from avro import io
from avro import datafile
from gen_interop_data import DATUM
class TestDataFileInterop(unittest.TestCase):
def testInterop(self):
for f in ['py.avro']:
logging.debug('Reading %s', f)
# read data in binary from file
reader = open(f, 'rb')
datum_reader = io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
for datum in dfr:
self.assertIsNotNone(datum)
self.assertEqual(datum, DATUM)
if __name__ == '__main__':
raise Exception('Use run_tests.py')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.