file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.py | #!/share/apps/python-2.7.2/bin/python
import foldx, re, shutil, random, os, math, sys, glob, pyros
from Bio import *
import Bio.PDB as PDB
#runs with python main.py human random num_fixed selection both 10000000 .00983 10 0 0 0 kept_mutants.txt all_mutants_tried.txt
#yeast effective pop size, real temp.
def main():
if len( sys.argv ) != 14:
print '''
You don't have the right number of arguments.
Input line should resemble:
python calculate_distance.py prefix list/random num_tried/num_fixed selection/no_selection chain population-size beta num-mutations dGt1 dGt2 dGt3 fixed_mutation_file all_mutation_file
For example:
python main.py 2eke list/random num_tried/num_fixed selection/no_selection chain 1000 10 10 -23.0 -5.0 -9.7 kept_mutants.txt all_mutants_tried.txt
'''
else:
args = sys.argv
prefix = args[1]
available_mutations = args[2]
tried_or_fixed = args[3]
selection = args[4]
which_chain = args[5]
population_size = float(args[6])
beta = float(args[7])
num_mutations = int(args[8])
dGt1 = float(args[9])
dGt2 = float(args[10])
dGt3 = float(args[11])
out_file = args[12]
all_file = args[13]
all_kept_mutants = []
all_mutants_tried = []
output_dict = {}
count = 0
initialize_output_files(out_file, all_file)
if available_mutations == 'list':
remaining_mutations = [mut.strip() for mut in list(open('mutations.txt', 'r').readlines())]
else:
remaining_mutations = ['unused', 'unused']
pyros.runFoldxRepair(prefix)
score_ob = pyros.Scores()
score_ob.cleanUp([])
repair_file = glob.glob(prefix + '.clean.pdb')
if len(repair_file) == 1:
shutil.move(repair_file[0], prefix + '.pdb')
else:
raise Exception('No output from RepairPDB.')
pyros.makeInitRepair(prefix)
i = 0
while i < num_mutations and len(remaining_mutations) > 0:
#Make sure the pdb exists
|
score_ob.cleanUp(['*energies*'])
def write_line(out_file, line):
output = open(out_file, 'a')
output.write(line)
output.close()
def does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried):
file_exists = True
if not os.path.isfile(prefix + '.pdb') and i > 0:
all_kept_mutants = all_kept_mutants[0:-1]
prefix = all_kept_mutants[-1]
all_mutants_tried = all_mutants_tried[0:-1]
count -= 1
file_exists = False
return(prefix, count, all_kept_mutants, all_mutants_tried, file_exists)
def initialize_output_files(out_file, all_file):
output = open(out_file, 'w')
to_file = 'file\tmutant\tcount\tstability1\tstability2\tbinding\tprobability\n'
output.write(to_file)
output.close()
output = open(all_file, 'w')
output.write(to_file)
output.close()
def get_pdb_sequence(prefix):
start_name = prefix + '.pdb'
total_length = 0
total_sequence = ''
count = 0
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', start_name)
ppb = PDB.PPBuilder()
for pp in ppb.build_peptides(structure):
total_length += len(pp.get_sequence())
total_sequence += pp.get_sequence()
if count == 0:
first_chain_length = total_length
count += 1
return(total_sequence, total_length, first_chain_length, structure)
def generate_mutation_code(prefix, which_chain):
total_sequence, total_length, first_chain_length, structure = get_pdb_sequence(prefix)
chain = 0
chain_letters = ''
residue_numbers = []
#run with python /home/ateufel/Rosetta/rosetta_bin_linux_2015.39.58186_bundle/tools/renumber_pdb.py -p 4foe.pdb -b 900 -o 4foe_renumb.pdb
print(total_length - 1)
if which_chain == 'both':
site = random.randint(0, total_length - 1)
elif which_chain == '0':
site = random.randint(0, first_chain_length)
elif which_chain == '1':
site = random.randint(first_chain_length, total_length - 1)
if site > first_chain_length - 1:
chain = 1
for chains in structure.get_chains():
chain_letters += chains.get_id()
for chains in structure.get_residues():
residue_numbers.append(str(chains.get_id()[1]))
mutation = total_sequence[site]
while( mutation == total_sequence[site] ):
mutation = random.choice(pyros.rev_resdict.keys())
mutation_code = total_sequence[site] + chain_letters[chain] + residue_numbers[site] + mutation
#mutation_code = residue_numbers[site] + mutation
return(mutation_code, residue_numbers[site])
def pick_mutation_code_from_list(remaining_mutations):
mutation_code = random.choice(remaining_mutations)
return(mutation_code, mutation_code[2:-1])
def calc_prob(stab1, stab2, binding, N, beta, thresholds):
'''In order to use this function, you need to provide a number of parameters.
The stab1, stab2, and binding data should be coming from the foldx values
and they need to be ABSOLUTE energy not differences. The N, beta and
threshold numbers need to specified for the theoretical population size,
the beta distribution constant, and the soft threshold for survival of
each protein in the complex.
At this point, the function cannot be used if binding on both chains is
not desired.'''
mutant = [stab1[1], stab2[1], binding[1]]
origin = [stab1[0], stab2[0], binding[0]]
xi = calc_x(origin, beta, thresholds)
xj = calc_x(mutant, beta, thresholds)
print("mut:",mutant)
print("orgin:",origin)
print("xi:", xi)
print("xj:",xj)
if xj > xi:
return((1.0))
else:
#Need to make sure you check numbers that are too big for the math library
exponent = -2 * float(N) * (xi - xj)
return(safe_calc(exponent))
def calc_x(data, beta, thresholds):
total = 0
print("comparing:", len(data))
for i in range(0, len(data)):
print("data is:", data[i])
#Need to make sure you check numbers that are too big for the math library
exponent = float(beta) * (float(data[i]) - float(thresholds[i]))
print("beta is:", float(beta))
print("threshold is:", float(thresholds[i]))
print(exponent)
print("log exp,",-math.log(safe_calc(exponent) + 1))
total += -math.log(safe_calc(exponent) + 1)
print("score:",total)
return(total)
def safe_calc(exponent):
if exponent > 700:
print("system maxed")
return(sys.float_info.max)
else:
return(math.exp(exponent))
def recode_mutant_pdb(mutation_code, site, prefix):
recoded_mutant = mutation_code[0] + site + mutation_code[-1]
print("what its looking for", recoded_mutant)
new_test = recoded_mutant + '.pdb'
old_test = recoded_mutant + '.wt.pdb'
existing = glob.glob(recoded_mutant)
if len(existing)/2 > 0:
shutil.move(new_test, new_mutant_name[0:-4] + '_' + str(len(existing)/2) + '.pdb')
shutil.move(old_test, new_mutant_name[0:-4] + '_' + str(len(existing)/2) + '.wt.pdb')
print(prefix)
print("moving to new_test", str(pyros.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb'))
shutil.copy(prefix + '.pdb', recoded_mutant + '.wt.pdb')
print(pyros.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb')
shutil.move(foldx.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb', new_test)
#Remove the unused file that is output from position scan
old_files = glob.glob('*_' + prefix + '.pdb')
for a_file in old_files:
os.remove(a_file)
return(new_test, old_test)
def capture_mutant_pdb(out_name, mutant, chain_letter):
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', mutant)
writer = PDB.PDBIO()
writer.set_structure(structure)
writer.save(out_name, select=SelectChains(chain_letter))
class SelectChains(PDB.Select):
""" Only accept the specified chains when saving. """
def __init__(self, chain_letters):
self.chain_letters = chain_letters
def accept_chain(self, chain):
return (chain.get_id() in self.chain_letters)
#Run main program
if __name__ == '__main__':
main()
| prefix, count, all_kept_mutants, all_mutants_tried, exists = does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried)
if not exists:
continue
if available_mutations == 'random':
(mutation_code, site) = generate_mutation_code(prefix, which_chain)
elif available_mutations == 'list':
(mutation_code, site) = pick_mutation_code_from_list(remaining_mutations)
remaining_mutations.remove(mutation_code)
pyros.runFoldxSimpleMutator(mutation_code, prefix + '.pdb')
(new_mutant_name, old_mutant_name) = recode_mutant_pdb(mutation_code, site, prefix)
print("mut anme",new_mutant_name[0:-4])
print("mut name 2",[new_mutant_name])
pyros.runFoldxRepair(new_mutant_name[0:-4])
repair_file = glob.glob(new_mutant_name[0:-4] + '*.clean.pdb')
shutil.move(repair_file[0], new_mutant_name)
print("runing folx analyze complex")
pyros.runFoldxAnalyzeComplex(new_mutant_name[0:-4] + '_complex', [old_mutant_name, new_mutant_name])
proceed = pyros.checkOutputAnalyzeComplex(new_mutant_name[0:-4])
#See if we got the files we needed from Analyze Complex
if not proceed:
score_ob = pyros.Scores()
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*', '*energies*'])
remaining_mutations.append(mutation_code)
continue
#Declare the score parsing object
score_ob = pyros.Scores()
score_ob.parseAnalyzeComplex()
#Grab the scores to be used in the probability calculations
ids = score_ob.getIds()
stab1 = [score_ob.getStability1()[0], score_ob.getStability2()[0]]
stab2 = [score_ob.getStability1()[1], score_ob.getStability2()[1]]
binding = score_ob.getInteractionEnergies()
thresholds = [dGt1, dGt2, dGt3]
#To this function you need 6 variables: stab1, stab2, binding, N, beta, and threshold
probability = calc_prob(stab1, stab2, binding, population_size, beta, thresholds)
all_mutants_tried.append(new_mutant_name[0:-4])
count += 1
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(all_file, to_file)
if random.random() < probability or selection == 'no_selection':
print('\n\nPassing to the next round...\n')
score_ob.cleanUp(['*energies*', 'WT_*'])
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(out_file, to_file)
shutil.move(new_mutant_name, str(count) + '.pdb')
shutil.move(old_mutant_name, str(count) + '.wt.pdb')
prefix = str(count)
all_kept_mutants.append(new_mutant_name[0:-4])
i+=1
elif available_mutations == 'list':
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
remaining_mutations.append(mutation_code)
if tried_or_fixed == 'tried':
i+=1
else:
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
if tried_or_fixed == 'tried':
i+=1 | conditional_block |
main.py | #!/share/apps/python-2.7.2/bin/python
import foldx, re, shutil, random, os, math, sys, glob, pyros
from Bio import *
import Bio.PDB as PDB
#runs with python main.py human random num_fixed selection both 10000000 .00983 10 0 0 0 kept_mutants.txt all_mutants_tried.txt
#yeast effective pop size, real temp.
def main():
if len( sys.argv ) != 14:
print '''
You don't have the right number of arguments.
Input line should resemble:
python calculate_distance.py prefix list/random num_tried/num_fixed selection/no_selection chain population-size beta num-mutations dGt1 dGt2 dGt3 fixed_mutation_file all_mutation_file
For example:
python main.py 2eke list/random num_tried/num_fixed selection/no_selection chain 1000 10 10 -23.0 -5.0 -9.7 kept_mutants.txt all_mutants_tried.txt
'''
else:
args = sys.argv
prefix = args[1]
available_mutations = args[2]
tried_or_fixed = args[3]
selection = args[4]
which_chain = args[5]
population_size = float(args[6])
beta = float(args[7])
num_mutations = int(args[8])
dGt1 = float(args[9])
dGt2 = float(args[10])
dGt3 = float(args[11])
out_file = args[12]
all_file = args[13]
all_kept_mutants = []
all_mutants_tried = []
output_dict = {}
count = 0
initialize_output_files(out_file, all_file)
if available_mutations == 'list':
remaining_mutations = [mut.strip() for mut in list(open('mutations.txt', 'r').readlines())]
else:
remaining_mutations = ['unused', 'unused']
pyros.runFoldxRepair(prefix)
score_ob = pyros.Scores()
score_ob.cleanUp([])
repair_file = glob.glob(prefix + '.clean.pdb')
if len(repair_file) == 1:
shutil.move(repair_file[0], prefix + '.pdb')
else:
raise Exception('No output from RepairPDB.')
pyros.makeInitRepair(prefix)
i = 0
while i < num_mutations and len(remaining_mutations) > 0:
#Make sure the pdb exists
prefix, count, all_kept_mutants, all_mutants_tried, exists = does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried)
if not exists:
continue
if available_mutations == 'random':
(mutation_code, site) = generate_mutation_code(prefix, which_chain)
elif available_mutations == 'list':
(mutation_code, site) = pick_mutation_code_from_list(remaining_mutations)
remaining_mutations.remove(mutation_code)
pyros.runFoldxSimpleMutator(mutation_code, prefix + '.pdb')
(new_mutant_name, old_mutant_name) = recode_mutant_pdb(mutation_code, site, prefix)
print("mut anme",new_mutant_name[0:-4])
print("mut name 2",[new_mutant_name])
pyros.runFoldxRepair(new_mutant_name[0:-4])
repair_file = glob.glob(new_mutant_name[0:-4] + '*.clean.pdb')
shutil.move(repair_file[0], new_mutant_name)
print("runing folx analyze complex")
pyros.runFoldxAnalyzeComplex(new_mutant_name[0:-4] + '_complex', [old_mutant_name, new_mutant_name])
proceed = pyros.checkOutputAnalyzeComplex(new_mutant_name[0:-4])
#See if we got the files we needed from Analyze Complex
if not proceed:
score_ob = pyros.Scores()
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*', '*energies*'])
remaining_mutations.append(mutation_code)
continue
#Declare the score parsing object
score_ob = pyros.Scores()
score_ob.parseAnalyzeComplex()
#Grab the scores to be used in the probability calculations
ids = score_ob.getIds()
stab1 = [score_ob.getStability1()[0], score_ob.getStability2()[0]]
stab2 = [score_ob.getStability1()[1], score_ob.getStability2()[1]]
binding = score_ob.getInteractionEnergies()
thresholds = [dGt1, dGt2, dGt3]
#To this function you need 6 variables: stab1, stab2, binding, N, beta, and threshold
probability = calc_prob(stab1, stab2, binding, population_size, beta, thresholds)
all_mutants_tried.append(new_mutant_name[0:-4])
count += 1
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(all_file, to_file)
if random.random() < probability or selection == 'no_selection':
print('\n\nPassing to the next round...\n')
score_ob.cleanUp(['*energies*', 'WT_*'])
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(out_file, to_file)
shutil.move(new_mutant_name, str(count) + '.pdb')
shutil.move(old_mutant_name, str(count) + '.wt.pdb')
prefix = str(count)
all_kept_mutants.append(new_mutant_name[0:-4])
i+=1
elif available_mutations == 'list':
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
remaining_mutations.append(mutation_code)
if tried_or_fixed == 'tried':
i+=1
else:
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
if tried_or_fixed == 'tried':
i+=1
score_ob.cleanUp(['*energies*'])
def | (out_file, line):
output = open(out_file, 'a')
output.write(line)
output.close()
def does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried):
file_exists = True
if not os.path.isfile(prefix + '.pdb') and i > 0:
all_kept_mutants = all_kept_mutants[0:-1]
prefix = all_kept_mutants[-1]
all_mutants_tried = all_mutants_tried[0:-1]
count -= 1
file_exists = False
return(prefix, count, all_kept_mutants, all_mutants_tried, file_exists)
def initialize_output_files(out_file, all_file):
output = open(out_file, 'w')
to_file = 'file\tmutant\tcount\tstability1\tstability2\tbinding\tprobability\n'
output.write(to_file)
output.close()
output = open(all_file, 'w')
output.write(to_file)
output.close()
def get_pdb_sequence(prefix):
start_name = prefix + '.pdb'
total_length = 0
total_sequence = ''
count = 0
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', start_name)
ppb = PDB.PPBuilder()
for pp in ppb.build_peptides(structure):
total_length += len(pp.get_sequence())
total_sequence += pp.get_sequence()
if count == 0:
first_chain_length = total_length
count += 1
return(total_sequence, total_length, first_chain_length, structure)
def generate_mutation_code(prefix, which_chain):
total_sequence, total_length, first_chain_length, structure = get_pdb_sequence(prefix)
chain = 0
chain_letters = ''
residue_numbers = []
#run with python /home/ateufel/Rosetta/rosetta_bin_linux_2015.39.58186_bundle/tools/renumber_pdb.py -p 4foe.pdb -b 900 -o 4foe_renumb.pdb
print(total_length - 1)
if which_chain == 'both':
site = random.randint(0, total_length - 1)
elif which_chain == '0':
site = random.randint(0, first_chain_length)
elif which_chain == '1':
site = random.randint(first_chain_length, total_length - 1)
if site > first_chain_length - 1:
chain = 1
for chains in structure.get_chains():
chain_letters += chains.get_id()
for chains in structure.get_residues():
residue_numbers.append(str(chains.get_id()[1]))
mutation = total_sequence[site]
while( mutation == total_sequence[site] ):
mutation = random.choice(pyros.rev_resdict.keys())
mutation_code = total_sequence[site] + chain_letters[chain] + residue_numbers[site] + mutation
#mutation_code = residue_numbers[site] + mutation
return(mutation_code, residue_numbers[site])
def pick_mutation_code_from_list(remaining_mutations):
mutation_code = random.choice(remaining_mutations)
return(mutation_code, mutation_code[2:-1])
def calc_prob(stab1, stab2, binding, N, beta, thresholds):
'''In order to use this function, you need to provide a number of parameters.
The stab1, stab2, and binding data should be coming from the foldx values
and they need to be ABSOLUTE energy not differences. The N, beta and
threshold numbers need to specified for the theoretical population size,
the beta distribution constant, and the soft threshold for survival of
each protein in the complex.
At this point, the function cannot be used if binding on both chains is
not desired.'''
mutant = [stab1[1], stab2[1], binding[1]]
origin = [stab1[0], stab2[0], binding[0]]
xi = calc_x(origin, beta, thresholds)
xj = calc_x(mutant, beta, thresholds)
print("mut:",mutant)
print("orgin:",origin)
print("xi:", xi)
print("xj:",xj)
if xj > xi:
return((1.0))
else:
#Need to make sure you check numbers that are too big for the math library
exponent = -2 * float(N) * (xi - xj)
return(safe_calc(exponent))
def calc_x(data, beta, thresholds):
total = 0
print("comparing:", len(data))
for i in range(0, len(data)):
print("data is:", data[i])
#Need to make sure you check numbers that are too big for the math library
exponent = float(beta) * (float(data[i]) - float(thresholds[i]))
print("beta is:", float(beta))
print("threshold is:", float(thresholds[i]))
print(exponent)
print("log exp,",-math.log(safe_calc(exponent) + 1))
total += -math.log(safe_calc(exponent) + 1)
print("score:",total)
return(total)
def safe_calc(exponent):
if exponent > 700:
print("system maxed")
return(sys.float_info.max)
else:
return(math.exp(exponent))
def recode_mutant_pdb(mutation_code, site, prefix):
recoded_mutant = mutation_code[0] + site + mutation_code[-1]
print("what its looking for", recoded_mutant)
new_test = recoded_mutant + '.pdb'
old_test = recoded_mutant + '.wt.pdb'
existing = glob.glob(recoded_mutant)
if len(existing)/2 > 0:
shutil.move(new_test, new_mutant_name[0:-4] + '_' + str(len(existing)/2) + '.pdb')
shutil.move(old_test, new_mutant_name[0:-4] + '_' + str(len(existing)/2) + '.wt.pdb')
print(prefix)
print("moving to new_test", str(pyros.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb'))
shutil.copy(prefix + '.pdb', recoded_mutant + '.wt.pdb')
print(pyros.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb')
shutil.move(foldx.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb', new_test)
#Remove the unused file that is output from position scan
old_files = glob.glob('*_' + prefix + '.pdb')
for a_file in old_files:
os.remove(a_file)
return(new_test, old_test)
def capture_mutant_pdb(out_name, mutant, chain_letter):
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', mutant)
writer = PDB.PDBIO()
writer.set_structure(structure)
writer.save(out_name, select=SelectChains(chain_letter))
class SelectChains(PDB.Select):
""" Only accept the specified chains when saving. """
def __init__(self, chain_letters):
self.chain_letters = chain_letters
def accept_chain(self, chain):
return (chain.get_id() in self.chain_letters)
#Run main program
if __name__ == '__main__':
main()
| write_line | identifier_name |
main.py | #!/share/apps/python-2.7.2/bin/python
import foldx, re, shutil, random, os, math, sys, glob, pyros
from Bio import *
import Bio.PDB as PDB
#runs with python main.py human random num_fixed selection both 10000000 .00983 10 0 0 0 kept_mutants.txt all_mutants_tried.txt
#yeast effective pop size, real temp.
def main():
if len( sys.argv ) != 14:
print '''
You don't have the right number of arguments.
Input line should resemble:
python calculate_distance.py prefix list/random num_tried/num_fixed selection/no_selection chain population-size beta num-mutations dGt1 dGt2 dGt3 fixed_mutation_file all_mutation_file
For example:
python main.py 2eke list/random num_tried/num_fixed selection/no_selection chain 1000 10 10 -23.0 -5.0 -9.7 kept_mutants.txt all_mutants_tried.txt
'''
else:
args = sys.argv
prefix = args[1]
available_mutations = args[2]
tried_or_fixed = args[3]
selection = args[4]
which_chain = args[5]
population_size = float(args[6])
beta = float(args[7])
num_mutations = int(args[8])
dGt1 = float(args[9])
dGt2 = float(args[10])
dGt3 = float(args[11])
out_file = args[12]
all_file = args[13]
all_kept_mutants = []
all_mutants_tried = []
output_dict = {}
count = 0
initialize_output_files(out_file, all_file)
if available_mutations == 'list':
remaining_mutations = [mut.strip() for mut in list(open('mutations.txt', 'r').readlines())]
else:
remaining_mutations = ['unused', 'unused']
pyros.runFoldxRepair(prefix)
score_ob = pyros.Scores()
score_ob.cleanUp([])
repair_file = glob.glob(prefix + '.clean.pdb')
if len(repair_file) == 1:
shutil.move(repair_file[0], prefix + '.pdb')
else:
raise Exception('No output from RepairPDB.')
pyros.makeInitRepair(prefix)
i = 0
while i < num_mutations and len(remaining_mutations) > 0:
#Make sure the pdb exists
prefix, count, all_kept_mutants, all_mutants_tried, exists = does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried)
if not exists:
continue
if available_mutations == 'random':
(mutation_code, site) = generate_mutation_code(prefix, which_chain)
elif available_mutations == 'list':
(mutation_code, site) = pick_mutation_code_from_list(remaining_mutations)
remaining_mutations.remove(mutation_code)
pyros.runFoldxSimpleMutator(mutation_code, prefix + '.pdb')
(new_mutant_name, old_mutant_name) = recode_mutant_pdb(mutation_code, site, prefix)
print("mut anme",new_mutant_name[0:-4])
print("mut name 2",[new_mutant_name])
pyros.runFoldxRepair(new_mutant_name[0:-4])
repair_file = glob.glob(new_mutant_name[0:-4] + '*.clean.pdb')
shutil.move(repair_file[0], new_mutant_name)
print("runing folx analyze complex")
pyros.runFoldxAnalyzeComplex(new_mutant_name[0:-4] + '_complex', [old_mutant_name, new_mutant_name])
proceed = pyros.checkOutputAnalyzeComplex(new_mutant_name[0:-4])
#See if we got the files we needed from Analyze Complex
if not proceed:
score_ob = pyros.Scores() | continue
#Declare the score parsing object
score_ob = pyros.Scores()
score_ob.parseAnalyzeComplex()
#Grab the scores to be used in the probability calculations
ids = score_ob.getIds()
stab1 = [score_ob.getStability1()[0], score_ob.getStability2()[0]]
stab2 = [score_ob.getStability1()[1], score_ob.getStability2()[1]]
binding = score_ob.getInteractionEnergies()
thresholds = [dGt1, dGt2, dGt3]
#To this function you need 6 variables: stab1, stab2, binding, N, beta, and threshold
probability = calc_prob(stab1, stab2, binding, population_size, beta, thresholds)
all_mutants_tried.append(new_mutant_name[0:-4])
count += 1
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(all_file, to_file)
if random.random() < probability or selection == 'no_selection':
print('\n\nPassing to the next round...\n')
score_ob.cleanUp(['*energies*', 'WT_*'])
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(out_file, to_file)
shutil.move(new_mutant_name, str(count) + '.pdb')
shutil.move(old_mutant_name, str(count) + '.wt.pdb')
prefix = str(count)
all_kept_mutants.append(new_mutant_name[0:-4])
i+=1
elif available_mutations == 'list':
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
remaining_mutations.append(mutation_code)
if tried_or_fixed == 'tried':
i+=1
else:
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
if tried_or_fixed == 'tried':
i+=1
score_ob.cleanUp(['*energies*'])
def write_line(out_file, line):
output = open(out_file, 'a')
output.write(line)
output.close()
def does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried):
file_exists = True
if not os.path.isfile(prefix + '.pdb') and i > 0:
all_kept_mutants = all_kept_mutants[0:-1]
prefix = all_kept_mutants[-1]
all_mutants_tried = all_mutants_tried[0:-1]
count -= 1
file_exists = False
return(prefix, count, all_kept_mutants, all_mutants_tried, file_exists)
def initialize_output_files(out_file, all_file):
output = open(out_file, 'w')
to_file = 'file\tmutant\tcount\tstability1\tstability2\tbinding\tprobability\n'
output.write(to_file)
output.close()
output = open(all_file, 'w')
output.write(to_file)
output.close()
def get_pdb_sequence(prefix):
start_name = prefix + '.pdb'
total_length = 0
total_sequence = ''
count = 0
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', start_name)
ppb = PDB.PPBuilder()
for pp in ppb.build_peptides(structure):
total_length += len(pp.get_sequence())
total_sequence += pp.get_sequence()
if count == 0:
first_chain_length = total_length
count += 1
return(total_sequence, total_length, first_chain_length, structure)
def generate_mutation_code(prefix, which_chain):
total_sequence, total_length, first_chain_length, structure = get_pdb_sequence(prefix)
chain = 0
chain_letters = ''
residue_numbers = []
#run with python /home/ateufel/Rosetta/rosetta_bin_linux_2015.39.58186_bundle/tools/renumber_pdb.py -p 4foe.pdb -b 900 -o 4foe_renumb.pdb
print(total_length - 1)
if which_chain == 'both':
site = random.randint(0, total_length - 1)
elif which_chain == '0':
site = random.randint(0, first_chain_length)
elif which_chain == '1':
site = random.randint(first_chain_length, total_length - 1)
if site > first_chain_length - 1:
chain = 1
for chains in structure.get_chains():
chain_letters += chains.get_id()
for chains in structure.get_residues():
residue_numbers.append(str(chains.get_id()[1]))
mutation = total_sequence[site]
while( mutation == total_sequence[site] ):
mutation = random.choice(pyros.rev_resdict.keys())
mutation_code = total_sequence[site] + chain_letters[chain] + residue_numbers[site] + mutation
#mutation_code = residue_numbers[site] + mutation
return(mutation_code, residue_numbers[site])
def pick_mutation_code_from_list(remaining_mutations):
mutation_code = random.choice(remaining_mutations)
return(mutation_code, mutation_code[2:-1])
def calc_prob(stab1, stab2, binding, N, beta, thresholds):
'''In order to use this function, you need to provide a number of parameters.
The stab1, stab2, and binding data should be coming from the foldx values
and they need to be ABSOLUTE energy not differences. The N, beta and
threshold numbers need to specified for the theoretical population size,
the beta distribution constant, and the soft threshold for survival of
each protein in the complex.
At this point, the function cannot be used if binding on both chains is
not desired.'''
mutant = [stab1[1], stab2[1], binding[1]]
origin = [stab1[0], stab2[0], binding[0]]
xi = calc_x(origin, beta, thresholds)
xj = calc_x(mutant, beta, thresholds)
print("mut:",mutant)
print("orgin:",origin)
print("xi:", xi)
print("xj:",xj)
if xj > xi:
return((1.0))
else:
#Need to make sure you check numbers that are too big for the math library
exponent = -2 * float(N) * (xi - xj)
return(safe_calc(exponent))
def calc_x(data, beta, thresholds):
total = 0
print("comparing:", len(data))
for i in range(0, len(data)):
print("data is:", data[i])
#Need to make sure you check numbers that are too big for the math library
exponent = float(beta) * (float(data[i]) - float(thresholds[i]))
print("beta is:", float(beta))
print("threshold is:", float(thresholds[i]))
print(exponent)
print("log exp,",-math.log(safe_calc(exponent) + 1))
total += -math.log(safe_calc(exponent) + 1)
print("score:",total)
return(total)
def safe_calc(exponent):
if exponent > 700:
print("system maxed")
return(sys.float_info.max)
else:
return(math.exp(exponent))
def recode_mutant_pdb(mutation_code, site, prefix):
recoded_mutant = mutation_code[0] + site + mutation_code[-1]
print("what its looking for", recoded_mutant)
new_test = recoded_mutant + '.pdb'
old_test = recoded_mutant + '.wt.pdb'
existing = glob.glob(recoded_mutant)
if len(existing)/2 > 0:
shutil.move(new_test, new_mutant_name[0:-4] + '_' + str(len(existing)/2) + '.pdb')
shutil.move(old_test, new_mutant_name[0:-4] + '_' + str(len(existing)/2) + '.wt.pdb')
print(prefix)
print("moving to new_test", str(pyros.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb'))
shutil.copy(prefix + '.pdb', recoded_mutant + '.wt.pdb')
print(pyros.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb')
shutil.move(foldx.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb', new_test)
#Remove the unused file that is output from position scan
old_files = glob.glob('*_' + prefix + '.pdb')
for a_file in old_files:
os.remove(a_file)
return(new_test, old_test)
def capture_mutant_pdb(out_name, mutant, chain_letter):
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', mutant)
writer = PDB.PDBIO()
writer.set_structure(structure)
writer.save(out_name, select=SelectChains(chain_letter))
class SelectChains(PDB.Select):
""" Only accept the specified chains when saving. """
def __init__(self, chain_letters):
self.chain_letters = chain_letters
def accept_chain(self, chain):
return (chain.get_id() in self.chain_letters)
#Run main program
if __name__ == '__main__':
main() | score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*', '*energies*'])
remaining_mutations.append(mutation_code) | random_line_split |
main.py | #!/share/apps/python-2.7.2/bin/python
import foldx, re, shutil, random, os, math, sys, glob, pyros
from Bio import *
import Bio.PDB as PDB
#runs with python main.py human random num_fixed selection both 10000000 .00983 10 0 0 0 kept_mutants.txt all_mutants_tried.txt
#yeast effective pop size, real temp.
def main():
if len( sys.argv ) != 14:
print '''
You don't have the right number of arguments.
Input line should resemble:
python calculate_distance.py prefix list/random num_tried/num_fixed selection/no_selection chain population-size beta num-mutations dGt1 dGt2 dGt3 fixed_mutation_file all_mutation_file
For example:
python main.py 2eke list/random num_tried/num_fixed selection/no_selection chain 1000 10 10 -23.0 -5.0 -9.7 kept_mutants.txt all_mutants_tried.txt
'''
else:
args = sys.argv
prefix = args[1]
available_mutations = args[2]
tried_or_fixed = args[3]
selection = args[4]
which_chain = args[5]
population_size = float(args[6])
beta = float(args[7])
num_mutations = int(args[8])
dGt1 = float(args[9])
dGt2 = float(args[10])
dGt3 = float(args[11])
out_file = args[12]
all_file = args[13]
all_kept_mutants = []
all_mutants_tried = []
output_dict = {}
count = 0
initialize_output_files(out_file, all_file)
if available_mutations == 'list':
remaining_mutations = [mut.strip() for mut in list(open('mutations.txt', 'r').readlines())]
else:
remaining_mutations = ['unused', 'unused']
pyros.runFoldxRepair(prefix)
score_ob = pyros.Scores()
score_ob.cleanUp([])
repair_file = glob.glob(prefix + '.clean.pdb')
if len(repair_file) == 1:
shutil.move(repair_file[0], prefix + '.pdb')
else:
raise Exception('No output from RepairPDB.')
pyros.makeInitRepair(prefix)
i = 0
while i < num_mutations and len(remaining_mutations) > 0:
#Make sure the pdb exists
prefix, count, all_kept_mutants, all_mutants_tried, exists = does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried)
if not exists:
continue
if available_mutations == 'random':
(mutation_code, site) = generate_mutation_code(prefix, which_chain)
elif available_mutations == 'list':
(mutation_code, site) = pick_mutation_code_from_list(remaining_mutations)
remaining_mutations.remove(mutation_code)
pyros.runFoldxSimpleMutator(mutation_code, prefix + '.pdb')
(new_mutant_name, old_mutant_name) = recode_mutant_pdb(mutation_code, site, prefix)
print("mut anme",new_mutant_name[0:-4])
print("mut name 2",[new_mutant_name])
pyros.runFoldxRepair(new_mutant_name[0:-4])
repair_file = glob.glob(new_mutant_name[0:-4] + '*.clean.pdb')
shutil.move(repair_file[0], new_mutant_name)
print("runing folx analyze complex")
pyros.runFoldxAnalyzeComplex(new_mutant_name[0:-4] + '_complex', [old_mutant_name, new_mutant_name])
proceed = pyros.checkOutputAnalyzeComplex(new_mutant_name[0:-4])
#See if we got the files we needed from Analyze Complex
if not proceed:
score_ob = pyros.Scores()
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*', '*energies*'])
remaining_mutations.append(mutation_code)
continue
#Declare the score parsing object
score_ob = pyros.Scores()
score_ob.parseAnalyzeComplex()
#Grab the scores to be used in the probability calculations
ids = score_ob.getIds()
stab1 = [score_ob.getStability1()[0], score_ob.getStability2()[0]]
stab2 = [score_ob.getStability1()[1], score_ob.getStability2()[1]]
binding = score_ob.getInteractionEnergies()
thresholds = [dGt1, dGt2, dGt3]
#To this function you need 6 variables: stab1, stab2, binding, N, beta, and threshold
probability = calc_prob(stab1, stab2, binding, population_size, beta, thresholds)
all_mutants_tried.append(new_mutant_name[0:-4])
count += 1
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(all_file, to_file)
if random.random() < probability or selection == 'no_selection':
print('\n\nPassing to the next round...\n')
score_ob.cleanUp(['*energies*', 'WT_*'])
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(out_file, to_file)
shutil.move(new_mutant_name, str(count) + '.pdb')
shutil.move(old_mutant_name, str(count) + '.wt.pdb')
prefix = str(count)
all_kept_mutants.append(new_mutant_name[0:-4])
i+=1
elif available_mutations == 'list':
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
remaining_mutations.append(mutation_code)
if tried_or_fixed == 'tried':
i+=1
else:
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
if tried_or_fixed == 'tried':
i+=1
score_ob.cleanUp(['*energies*'])
def write_line(out_file, line):
output = open(out_file, 'a')
output.write(line)
output.close()
def does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried):
file_exists = True
if not os.path.isfile(prefix + '.pdb') and i > 0:
all_kept_mutants = all_kept_mutants[0:-1]
prefix = all_kept_mutants[-1]
all_mutants_tried = all_mutants_tried[0:-1]
count -= 1
file_exists = False
return(prefix, count, all_kept_mutants, all_mutants_tried, file_exists)
def initialize_output_files(out_file, all_file):
output = open(out_file, 'w')
to_file = 'file\tmutant\tcount\tstability1\tstability2\tbinding\tprobability\n'
output.write(to_file)
output.close()
output = open(all_file, 'w')
output.write(to_file)
output.close()
def get_pdb_sequence(prefix):
start_name = prefix + '.pdb'
total_length = 0
total_sequence = ''
count = 0
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', start_name)
ppb = PDB.PPBuilder()
for pp in ppb.build_peptides(structure):
total_length += len(pp.get_sequence())
total_sequence += pp.get_sequence()
if count == 0:
first_chain_length = total_length
count += 1
return(total_sequence, total_length, first_chain_length, structure)
def generate_mutation_code(prefix, which_chain):
total_sequence, total_length, first_chain_length, structure = get_pdb_sequence(prefix)
chain = 0
chain_letters = ''
residue_numbers = []
#run with python /home/ateufel/Rosetta/rosetta_bin_linux_2015.39.58186_bundle/tools/renumber_pdb.py -p 4foe.pdb -b 900 -o 4foe_renumb.pdb
print(total_length - 1)
if which_chain == 'both':
site = random.randint(0, total_length - 1)
elif which_chain == '0':
site = random.randint(0, first_chain_length)
elif which_chain == '1':
site = random.randint(first_chain_length, total_length - 1)
if site > first_chain_length - 1:
chain = 1
for chains in structure.get_chains():
chain_letters += chains.get_id()
for chains in structure.get_residues():
residue_numbers.append(str(chains.get_id()[1]))
mutation = total_sequence[site]
while( mutation == total_sequence[site] ):
mutation = random.choice(pyros.rev_resdict.keys())
mutation_code = total_sequence[site] + chain_letters[chain] + residue_numbers[site] + mutation
#mutation_code = residue_numbers[site] + mutation
return(mutation_code, residue_numbers[site])
def pick_mutation_code_from_list(remaining_mutations):
mutation_code = random.choice(remaining_mutations)
return(mutation_code, mutation_code[2:-1])
def calc_prob(stab1, stab2, binding, N, beta, thresholds):
'''In order to use this function, you need to provide a number of parameters.
The stab1, stab2, and binding data should be coming from the foldx values
and they need to be ABSOLUTE energy not differences. The N, beta and
threshold numbers need to specified for the theoretical population size,
the beta distribution constant, and the soft threshold for survival of
each protein in the complex.
At this point, the function cannot be used if binding on both chains is
not desired.'''
mutant = [stab1[1], stab2[1], binding[1]]
origin = [stab1[0], stab2[0], binding[0]]
xi = calc_x(origin, beta, thresholds)
xj = calc_x(mutant, beta, thresholds)
print("mut:",mutant)
print("orgin:",origin)
print("xi:", xi)
print("xj:",xj)
if xj > xi:
return((1.0))
else:
#Need to make sure you check numbers that are too big for the math library
exponent = -2 * float(N) * (xi - xj)
return(safe_calc(exponent))
def calc_x(data, beta, thresholds):
total = 0
print("comparing:", len(data))
for i in range(0, len(data)):
print("data is:", data[i])
#Need to make sure you check numbers that are too big for the math library
exponent = float(beta) * (float(data[i]) - float(thresholds[i]))
print("beta is:", float(beta))
print("threshold is:", float(thresholds[i]))
print(exponent)
print("log exp,",-math.log(safe_calc(exponent) + 1))
total += -math.log(safe_calc(exponent) + 1)
print("score:",total)
return(total)
def safe_calc(exponent):
if exponent > 700:
print("system maxed")
return(sys.float_info.max)
else:
return(math.exp(exponent))
def recode_mutant_pdb(mutation_code, site, prefix):
recoded_mutant = mutation_code[0] + site + mutation_code[-1]
print("what its looking for", recoded_mutant)
new_test = recoded_mutant + '.pdb'
old_test = recoded_mutant + '.wt.pdb'
existing = glob.glob(recoded_mutant)
if len(existing)/2 > 0:
shutil.move(new_test, new_mutant_name[0:-4] + '_' + str(len(existing)/2) + '.pdb')
shutil.move(old_test, new_mutant_name[0:-4] + '_' + str(len(existing)/2) + '.wt.pdb')
print(prefix)
print("moving to new_test", str(pyros.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb'))
shutil.copy(prefix + '.pdb', recoded_mutant + '.wt.pdb')
print(pyros.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb')
shutil.move(foldx.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb', new_test)
#Remove the unused file that is output from position scan
old_files = glob.glob('*_' + prefix + '.pdb')
for a_file in old_files:
os.remove(a_file)
return(new_test, old_test)
def capture_mutant_pdb(out_name, mutant, chain_letter):
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', mutant)
writer = PDB.PDBIO()
writer.set_structure(structure)
writer.save(out_name, select=SelectChains(chain_letter))
class SelectChains(PDB.Select):
""" Only accept the specified chains when saving. """
def __init__(self, chain_letters):
self.chain_letters = chain_letters
def accept_chain(self, chain):
|
#Run main program
if __name__ == '__main__':
main()
| return (chain.get_id() in self.chain_letters) | identifier_body |
Tetris.go | package main
import (
"fmt"
"math/rand"
"os"
"os/exec"
"time"
)
var (
colSize int = 10
rowSize int = 20
//
shapeO = [][]int{
{1, 1},
{1, 1}}
shapeL = [][]int{
{0, 0, 1},
{1, 1, 1},
{0, 0, 0}}
shapeJ = [][]int{
{1, 0, 0},
{1, 1, 1},
{0, 0, 0}}
shapeS = [][]int{
{0, 1, 1},
{1, 1, 0},
{0, 0, 0}}
shapeZ = [][]int{
{1, 1, 0},
{0, 1, 1},
{0, 0, 0}}
shapeI = [][]int{
{0, 0, 0, 0},
{1, 1, 1, 1},
{0, 0, 0, 0},
{0, 0, 0, 0}}
shapeT = [][]int{
{0, 1, 0},
{1, 1, 1},
{0, 0, 0}}
//
blockShape = [][][]int{shapeO, shapeL, shapeJ, shapeS, shapeZ, shapeI, shapeT}
//
colorCode = []string{
"\033[0m \033[m", //blank
"\033[0;31;41m[]\033[m", //Text: Red, Background: Red
"\033[0;32;42m[]\033[m", //Text: Green, Background: Green
"\033[0;33;43m[]\033[m", //Text: Yellow, Background: Yellow
"\033[0;34;44m[]\033[m", //Text: Blue, Background: Blue
"\033[0;35;45m[]\033[m", //Text: Purple, Background: Purple
"\033[0;36;46m[]\033[m", //Text: Cyan, Background: Cyan
"\033[0;37;47m[]\033[m"} //Text: White, Background: White
//SRS rotation
//Wall Kick case
//apply for J,L,Z,S,T Tetromino's case following clockwise direction
//test1 will be all {0,0} so we will skip test1
case1 = [][]int{{-1, 0}, {-1, 1}, {0, -2}, {-1, -2}} //0->R
case2 = [][]int{{1, 0}, {1, -1}, {0, 2}, {1, 2}} //R->2
case3 = [][]int{{1, 0}, {1, 1}, {0, -2}, {1, -2}} //2->L
case4 = [][]int{{-1, 0}, {-1, -1}, {0, 2}, {-1, 2}} //L->0
//for I Tetromino's case (clockwise direction)
case1_i = [][]int{{-2, 0}, {1, 0}, {-2, -1}, {1, 2}} //0->R
case2_i = [][]int{{-1, 0}, {2, 0}, {-1, 2}, {2, -1}} //R->2
case3_i = [][]int{{2, 0}, {-1, 0}, {2, 1}, {-1, -2}} //2->L
case4_i = [][]int{{1, 0}, {-2, 0}, {1, -2}, {-2, 1}} //L->0
)
//
type Block struct {
x int
y int
shape [][]int
shapeType int //store following index of blockShape (range [0,6] means shapeO->shapeT above). For rotation supporting
rotateType int //store 1 number in range [0,3]. For rotation supporting
}
//
func (block *Block) reInit() {
randomNum := rand.Intn(7-1) + 1
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
block.shape[i][j] = randomNum
}
}
}
}
//
func (block *Block) transpose() {
for i := range block.shape {
for j := 0 + i; j < len(block.shape[0]); j++ {
//swap
block.shape[j][i], block.shape[i][j] = block.shape[i][j], block.shape[j][i]
}
}
}
//
func (block *Block) reverseColumn() {
temp := 0
pivot := int(len(block.shape[0]) / 2)
//swap column i with column len(block.shape)-1-i
for i := range block.shape {
for j := 0; j < pivot; j++ {
temp = block.shape[i][j]
block.shape[i][j] = block.shape[i][len(block.shape[0])-1-j]
block.shape[i][len(block.shape[0])-1-j] = temp
}
}
}
//
func (block *Block) rotate(land [][]int) {
previousShape := makeCopy(block.shape) //store previouse shape after rotated shape
block.transpose()
block.reverseColumn()
tempX := block.x
tempY := block.y
flag := false
//
if checkCollision(land, *block) {
//following my code, index 5 is I Tetromino, which has its own wall kick case
if block.shapeType != 5 {
if block.rotateType == 0 { //0->R
for i := range case1 {
block.x += case1[i][0]
block.y += case1[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2 {
block.x += case2[i][0]
block.y += case2[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 { //2->L
for i := range case3 {
block.x += case3[i][0]
block.y += case3[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4 {
block.x += case4[i][0]
block.y += case4[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
} else {
if block.rotateType == 0 { //0->R
for i := range case1_i {
block.x += case1_i[i][0]
block.y += case1_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2_i {
block.x += case2_i[i][0]
block.y += case2_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 {
for i := range case3_i {
block.x += case3_i[i][0]
block.y += case3_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4_i {
block.x += case4_i[i][0]
block.y += case4_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
}
} else {
flag = true
}
//if rotation was failed
if flag == false {
block.shape = previousShape
return
}
//
if block.rotateType == 3 {
block.rotateType = 0
} else {
block.rotateType += 1
}
}
//
func (block *Block) moveDown(land [][]int) {
block.y++
if checkCollision(land, *block) {
block.y--
}
}
//
func (block *Block) moveLeft(land [][]int) {
block.x--
if checkCollision(land, *block) {
block.x++
}
}
//
func (block *Block) moveRight(land [][]int) {
block.x++
if checkCollision(land, *block) {
block.x--
}
}
//
func (block *Block) hardDrop(land [][]int) {
for !checkCollision(land, *block) {
block.y++
}
block.y--
}
//
func makeCopy(arr [][]int) [][]int {
newArray := make([][]int, len(arr))
for i := range arr {
newArray[i] = make([]int, len(arr[0]))
for j := range arr[0] {
newArray[i][j] = arr[i][j]
}
}
return newArray
}
//
func checkCollision(land [][]int, block Block) bool {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.x+j < 0 || block.x+j > colSize-1 || block.y+i > rowSize-1 {
return true
} else if block.x+j >= 0 && block.x+j <= colSize-1 && block.y+i <= rowSize-1 && block.y+i >= 0 {
if land[block.y+i][block.x+j] != 0 {
return true
}
}
}
}
}
return false
}
//
func | (board [][]int, Shape [][][]int) Block {
randomNum := rand.Intn(len(Shape) - 1)
randomShape := makeCopy(Shape[randomNum])
coordinateX := int(len(board[0])/2) - len(randomShape[0]) + 1
coordinateY := -2
block := Block{coordinateX, coordinateY, randomShape, randomNum, 0}
block.reInit()
return block
}
//
func initBoard() [][]int {
board := make([][]int, rowSize)
for i := range board {
board[i] = make([]int, colSize)
}
return board
}
//
func mergeBlock(board [][]int, block Block) [][]int {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.y+i >= 0 && block.y+i <= rowSize-1 && block.x+j >= 0 && block.x+j <= colSize-1 {
board[block.y+i][block.x+j] = block.shape[i][j]
}
}
}
}
return board
}
//
func checkReachTop(land [][]int) bool {
for i := range land[0] {
if land[0][i] != 0 {
return true
}
}
return false
}
//
func handleFullLine(land [][]int, score int) ([][]int, int) {
var line []int
var flag bool
for i := range land {
flag = true
for j := range land[0] {
if land[i][j] == 0 {
flag = false
break
}
}
if flag == true {
line = append(line, i)
}
}
//index is line (row), j is column
for _, index := range line {
for j := range land[index] {
land[index][j] = 0
}
for j := index; j > 0; j-- {
//swap
land[j-1], land[j] = land[j], land[j-1]
}
score += 100
}
return land, score
}
//
func ClearScreen() {
cmd := exec.Command("clear")
cmd.Stdout = os.Stdout
cmd.Run()
}
//
func Display(arr [][]int, nextBlock Block, score int, Color []string) {
ClearScreen()
tempIndexRow := 0
tempIndexCol := 0
for i := range arr {
tempIndexCol = 0
if i == 0 {
fmt.Print("\r ")
for k := 0; k < len(arr[0]); k++ {
fmt.Print("\033[1m_ ")
}
fmt.Println("\033[m")
}
fmt.Print("\033[1m||\033[m")
for j := range arr[0] {
// fmt.Print(arr[i][j]," ") //for print out 0, 1 (real board)
if arr[i][j] == 0 && i == len(arr)-1 {
fmt.Print("\033[1m_ \033[m")
} else {
fmt.Print(Color[arr[i][j]])
}
}
if i == 1 {
fmt.Println("\033[1m|| Your score: ", score, "\033[m")
} else if i == 3 {
fmt.Println("\033[1m|| Next\033[m")
} else if i > 4 && tempIndexRow < len(nextBlock.shape) {
fmt.Print("\033[1m|| \033[m")
for tempIndexCol < len(nextBlock.shape[0]) {
fmt.Print(Color[nextBlock.shape[tempIndexRow][tempIndexCol]])
tempIndexCol++
}
fmt.Println("")
tempIndexRow++
} else {
fmt.Println("\033[1m||\033[m")
}
}
fmt.Println("\rPress: W - Rotate, S - Fall down faster, A - Move left, D - Move Right")
fmt.Println("\rPress: Spacebar - Hard Drop")
fmt.Println("\rPress: X to exit the program ")
}
//
func readInputKey() string {
// disable input buffering
exec.Command("stty", "-F", "/dev/tty", "cbreak", "min", "1").Run()
// do not display entered characters on the screen
exec.Command("stty", "-F", "/dev/tty", "-echo").Run()
var b []byte = make([]byte, 1)
os.Stdin.Read(b)
return string(b)
}
//
func threadGetKey(inputQueue chan string) {
for {
key := readInputKey()
if key == "x" || key == "X" {
exec.Command("stty", "-F", "/dev/tty", "echo").Run()
os.Exit(0)
}
inputQueue <- key
}
}
//
func clearChan(inputQueue chan string) chan string {
for len(inputQueue) != 0 {
<-inputQueue
}
return inputQueue
}
//
func doMovement(board [][]int, land [][]int, block Block, nextBlock Block, Color []string, inputQueue chan string, score int, levelUp int) ([][]int, [][]int, Block, chan string, string) {
var key string
var preSec int = int(time.Now().UnixNano() / 1000000)
for len(inputQueue) != 0 {
key = <-inputQueue
if key == "w" || key == "W" {
block.rotate(land)
} else if key == "a" || key == "A" {
block.moveLeft(land)
} else if key == "d" || key == "D" {
block.moveRight(land)
} else if key == "s" || key == "S" {
block.moveDown(land)
} else if key == " " {
block.hardDrop(land)
} else {
continue
}
board = makeCopy(land)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
time.Sleep(time.Microsecond)
//if time pass (1 sec or less depended on level_up var)
if (int(time.Now().UnixNano()/1000000)-preSec >= 1000-levelUp) || key == " " {
break
}
}
return board, land, block, inputQueue, key
}
//Tetris's main game functin
func Tetris(Color []string, Shape [][][]int) {
//initializing
land := initBoard()
board := initBoard()
score := 0
//score and time drop
var levelUp int = int(160 * int(score/500))
var preSec int = 0
//
var key string
inputQueue := make(chan string, 100)
//begin get key input thread
go threadGetKey(inputQueue)
//begin the game
block := randomBlock(board, Shape)
nextBlock := randomBlock(board, Shape)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
//
for {
levelUp = int(160 * int(score/500))
if score >= 3000 {
levelUp = 900
}
//
board, land, block, inputQueue, key = doMovement(board, land, block, nextBlock, Color, inputQueue, score, levelUp)
if int(time.Now().UnixNano()/1000000)-preSec >= 1000-levelUp || key == " " {
block.y++
if checkCollision(land, block) {
block.y--
inputQueue = clearChan(inputQueue)
land = makeCopy(board)
land, score = handleFullLine(land, score)
block = nextBlock
nextBlock = randomBlock(board, Shape)
}
board = makeCopy(land)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
preSec = int(time.Now().UnixNano() / 1000000)
}
if checkReachTop(land) {
exec.Command("stty", "-F", "/dev/tty", "echo").Run()
fmt.Println("\r\033[1;31mGAME OVER\033[m")
os.Exit(0)
} else {
time.Sleep(time.Millisecond)
}
}
}
//Main
func main() {
rand.Seed(time.Now().UnixNano())
Tetris(colorCode, blockShape)
}
| randomBlock | identifier_name |
Tetris.go | package main
import (
"fmt"
"math/rand"
"os"
"os/exec"
"time"
)
var (
colSize int = 10
rowSize int = 20
//
shapeO = [][]int{
{1, 1},
{1, 1}}
shapeL = [][]int{
{0, 0, 1},
{1, 1, 1},
{0, 0, 0}}
shapeJ = [][]int{
{1, 0, 0},
{1, 1, 1},
{0, 0, 0}}
shapeS = [][]int{
{0, 1, 1},
{1, 1, 0},
{0, 0, 0}}
shapeZ = [][]int{
{1, 1, 0},
{0, 1, 1},
{0, 0, 0}}
shapeI = [][]int{
{0, 0, 0, 0},
{1, 1, 1, 1},
{0, 0, 0, 0},
{0, 0, 0, 0}}
shapeT = [][]int{
{0, 1, 0},
{1, 1, 1},
{0, 0, 0}}
//
blockShape = [][][]int{shapeO, shapeL, shapeJ, shapeS, shapeZ, shapeI, shapeT}
//
colorCode = []string{
"\033[0m \033[m", //blank
"\033[0;31;41m[]\033[m", //Text: Red, Background: Red
"\033[0;32;42m[]\033[m", //Text: Green, Background: Green
"\033[0;33;43m[]\033[m", //Text: Yellow, Background: Yellow
"\033[0;34;44m[]\033[m", //Text: Blue, Background: Blue
"\033[0;35;45m[]\033[m", //Text: Purple, Background: Purple
"\033[0;36;46m[]\033[m", //Text: Cyan, Background: Cyan
"\033[0;37;47m[]\033[m"} //Text: White, Background: White
//SRS rotation
//Wall Kick case
//apply for J,L,Z,S,T Tetromino's case following clockwise direction
//test1 will be all {0,0} so we will skip test1
case1 = [][]int{{-1, 0}, {-1, 1}, {0, -2}, {-1, -2}} //0->R
case2 = [][]int{{1, 0}, {1, -1}, {0, 2}, {1, 2}} //R->2
case3 = [][]int{{1, 0}, {1, 1}, {0, -2}, {1, -2}} //2->L
case4 = [][]int{{-1, 0}, {-1, -1}, {0, 2}, {-1, 2}} //L->0
//for I Tetromino's case (clockwise direction)
case1_i = [][]int{{-2, 0}, {1, 0}, {-2, -1}, {1, 2}} //0->R
case2_i = [][]int{{-1, 0}, {2, 0}, {-1, 2}, {2, -1}} //R->2
case3_i = [][]int{{2, 0}, {-1, 0}, {2, 1}, {-1, -2}} //2->L
case4_i = [][]int{{1, 0}, {-2, 0}, {1, -2}, {-2, 1}} //L->0
)
//
type Block struct {
x int
y int
shape [][]int
shapeType int //store following index of blockShape (range [0,6] means shapeO->shapeT above). For rotation supporting
rotateType int //store 1 number in range [0,3]. For rotation supporting
}
//
func (block *Block) reInit() {
randomNum := rand.Intn(7-1) + 1
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
block.shape[i][j] = randomNum
}
}
}
}
//
func (block *Block) transpose() {
for i := range block.shape {
for j := 0 + i; j < len(block.shape[0]); j++ {
//swap
block.shape[j][i], block.shape[i][j] = block.shape[i][j], block.shape[j][i]
}
}
}
//
func (block *Block) reverseColumn() {
temp := 0
pivot := int(len(block.shape[0]) / 2)
//swap column i with column len(block.shape)-1-i
for i := range block.shape {
for j := 0; j < pivot; j++ {
temp = block.shape[i][j]
block.shape[i][j] = block.shape[i][len(block.shape[0])-1-j]
block.shape[i][len(block.shape[0])-1-j] = temp
}
}
}
//
func (block *Block) rotate(land [][]int) {
previousShape := makeCopy(block.shape) //store previouse shape after rotated shape
block.transpose()
block.reverseColumn()
tempX := block.x
tempY := block.y
flag := false
//
if checkCollision(land, *block) {
//following my code, index 5 is I Tetromino, which has its own wall kick case
if block.shapeType != 5 {
if block.rotateType == 0 { //0->R
for i := range case1 {
block.x += case1[i][0]
block.y += case1[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2 {
block.x += case2[i][0]
block.y += case2[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 { //2->L
for i := range case3 {
block.x += case3[i][0]
block.y += case3[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4 {
block.x += case4[i][0] | if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
} else {
if block.rotateType == 0 { //0->R
for i := range case1_i {
block.x += case1_i[i][0]
block.y += case1_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2_i {
block.x += case2_i[i][0]
block.y += case2_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 {
for i := range case3_i {
block.x += case3_i[i][0]
block.y += case3_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4_i {
block.x += case4_i[i][0]
block.y += case4_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
}
} else {
flag = true
}
//if rotation was failed
if flag == false {
block.shape = previousShape
return
}
//
if block.rotateType == 3 {
block.rotateType = 0
} else {
block.rotateType += 1
}
}
//
func (block *Block) moveDown(land [][]int) {
block.y++
if checkCollision(land, *block) {
block.y--
}
}
//
func (block *Block) moveLeft(land [][]int) {
block.x--
if checkCollision(land, *block) {
block.x++
}
}
//
func (block *Block) moveRight(land [][]int) {
block.x++
if checkCollision(land, *block) {
block.x--
}
}
//
func (block *Block) hardDrop(land [][]int) {
for !checkCollision(land, *block) {
block.y++
}
block.y--
}
//
func makeCopy(arr [][]int) [][]int {
newArray := make([][]int, len(arr))
for i := range arr {
newArray[i] = make([]int, len(arr[0]))
for j := range arr[0] {
newArray[i][j] = arr[i][j]
}
}
return newArray
}
//
func checkCollision(land [][]int, block Block) bool {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.x+j < 0 || block.x+j > colSize-1 || block.y+i > rowSize-1 {
return true
} else if block.x+j >= 0 && block.x+j <= colSize-1 && block.y+i <= rowSize-1 && block.y+i >= 0 {
if land[block.y+i][block.x+j] != 0 {
return true
}
}
}
}
}
return false
}
//
func randomBlock(board [][]int, Shape [][][]int) Block {
randomNum := rand.Intn(len(Shape) - 1)
randomShape := makeCopy(Shape[randomNum])
coordinateX := int(len(board[0])/2) - len(randomShape[0]) + 1
coordinateY := -2
block := Block{coordinateX, coordinateY, randomShape, randomNum, 0}
block.reInit()
return block
}
//
func initBoard() [][]int {
board := make([][]int, rowSize)
for i := range board {
board[i] = make([]int, colSize)
}
return board
}
//
func mergeBlock(board [][]int, block Block) [][]int {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.y+i >= 0 && block.y+i <= rowSize-1 && block.x+j >= 0 && block.x+j <= colSize-1 {
board[block.y+i][block.x+j] = block.shape[i][j]
}
}
}
}
return board
}
//
func checkReachTop(land [][]int) bool {
for i := range land[0] {
if land[0][i] != 0 {
return true
}
}
return false
}
//
func handleFullLine(land [][]int, score int) ([][]int, int) {
var line []int
var flag bool
for i := range land {
flag = true
for j := range land[0] {
if land[i][j] == 0 {
flag = false
break
}
}
if flag == true {
line = append(line, i)
}
}
//index is line (row), j is column
for _, index := range line {
for j := range land[index] {
land[index][j] = 0
}
for j := index; j > 0; j-- {
//swap
land[j-1], land[j] = land[j], land[j-1]
}
score += 100
}
return land, score
}
//
func ClearScreen() {
cmd := exec.Command("clear")
cmd.Stdout = os.Stdout
cmd.Run()
}
//
func Display(arr [][]int, nextBlock Block, score int, Color []string) {
ClearScreen()
tempIndexRow := 0
tempIndexCol := 0
for i := range arr {
tempIndexCol = 0
if i == 0 {
fmt.Print("\r ")
for k := 0; k < len(arr[0]); k++ {
fmt.Print("\033[1m_ ")
}
fmt.Println("\033[m")
}
fmt.Print("\033[1m||\033[m")
for j := range arr[0] {
// fmt.Print(arr[i][j]," ") //for print out 0, 1 (real board)
if arr[i][j] == 0 && i == len(arr)-1 {
fmt.Print("\033[1m_ \033[m")
} else {
fmt.Print(Color[arr[i][j]])
}
}
if i == 1 {
fmt.Println("\033[1m|| Your score: ", score, "\033[m")
} else if i == 3 {
fmt.Println("\033[1m|| Next\033[m")
} else if i > 4 && tempIndexRow < len(nextBlock.shape) {
fmt.Print("\033[1m|| \033[m")
for tempIndexCol < len(nextBlock.shape[0]) {
fmt.Print(Color[nextBlock.shape[tempIndexRow][tempIndexCol]])
tempIndexCol++
}
fmt.Println("")
tempIndexRow++
} else {
fmt.Println("\033[1m||\033[m")
}
}
fmt.Println("\rPress: W - Rotate, S - Fall down faster, A - Move left, D - Move Right")
fmt.Println("\rPress: Spacebar - Hard Drop")
fmt.Println("\rPress: X to exit the program ")
}
//
func readInputKey() string {
// disable input buffering
exec.Command("stty", "-F", "/dev/tty", "cbreak", "min", "1").Run()
// do not display entered characters on the screen
exec.Command("stty", "-F", "/dev/tty", "-echo").Run()
var b []byte = make([]byte, 1)
os.Stdin.Read(b)
return string(b)
}
//
func threadGetKey(inputQueue chan string) {
for {
key := readInputKey()
if key == "x" || key == "X" {
exec.Command("stty", "-F", "/dev/tty", "echo").Run()
os.Exit(0)
}
inputQueue <- key
}
}
//
func clearChan(inputQueue chan string) chan string {
for len(inputQueue) != 0 {
<-inputQueue
}
return inputQueue
}
//
func doMovement(board [][]int, land [][]int, block Block, nextBlock Block, Color []string, inputQueue chan string, score int, levelUp int) ([][]int, [][]int, Block, chan string, string) {
var key string
var preSec int = int(time.Now().UnixNano() / 1000000)
for len(inputQueue) != 0 {
key = <-inputQueue
if key == "w" || key == "W" {
block.rotate(land)
} else if key == "a" || key == "A" {
block.moveLeft(land)
} else if key == "d" || key == "D" {
block.moveRight(land)
} else if key == "s" || key == "S" {
block.moveDown(land)
} else if key == " " {
block.hardDrop(land)
} else {
continue
}
board = makeCopy(land)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
time.Sleep(time.Microsecond)
//if time pass (1 sec or less depended on level_up var)
if (int(time.Now().UnixNano()/1000000)-preSec >= 1000-levelUp) || key == " " {
break
}
}
return board, land, block, inputQueue, key
}
//Tetris's main game functin
func Tetris(Color []string, Shape [][][]int) {
//initializing
land := initBoard()
board := initBoard()
score := 0
//score and time drop
var levelUp int = int(160 * int(score/500))
var preSec int = 0
//
var key string
inputQueue := make(chan string, 100)
//begin get key input thread
go threadGetKey(inputQueue)
//begin the game
block := randomBlock(board, Shape)
nextBlock := randomBlock(board, Shape)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
//
for {
levelUp = int(160 * int(score/500))
if score >= 3000 {
levelUp = 900
}
//
board, land, block, inputQueue, key = doMovement(board, land, block, nextBlock, Color, inputQueue, score, levelUp)
if int(time.Now().UnixNano()/1000000)-preSec >= 1000-levelUp || key == " " {
block.y++
if checkCollision(land, block) {
block.y--
inputQueue = clearChan(inputQueue)
land = makeCopy(board)
land, score = handleFullLine(land, score)
block = nextBlock
nextBlock = randomBlock(board, Shape)
}
board = makeCopy(land)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
preSec = int(time.Now().UnixNano() / 1000000)
}
if checkReachTop(land) {
exec.Command("stty", "-F", "/dev/tty", "echo").Run()
fmt.Println("\r\033[1;31mGAME OVER\033[m")
os.Exit(0)
} else {
time.Sleep(time.Millisecond)
}
}
}
//Main
func main() {
rand.Seed(time.Now().UnixNano())
Tetris(colorCode, blockShape)
} | block.y += case4[i][1] | random_line_split |
Tetris.go | package main
import (
"fmt"
"math/rand"
"os"
"os/exec"
"time"
)
var (
colSize int = 10
rowSize int = 20
//
shapeO = [][]int{
{1, 1},
{1, 1}}
shapeL = [][]int{
{0, 0, 1},
{1, 1, 1},
{0, 0, 0}}
shapeJ = [][]int{
{1, 0, 0},
{1, 1, 1},
{0, 0, 0}}
shapeS = [][]int{
{0, 1, 1},
{1, 1, 0},
{0, 0, 0}}
shapeZ = [][]int{
{1, 1, 0},
{0, 1, 1},
{0, 0, 0}}
shapeI = [][]int{
{0, 0, 0, 0},
{1, 1, 1, 1},
{0, 0, 0, 0},
{0, 0, 0, 0}}
shapeT = [][]int{
{0, 1, 0},
{1, 1, 1},
{0, 0, 0}}
//
blockShape = [][][]int{shapeO, shapeL, shapeJ, shapeS, shapeZ, shapeI, shapeT}
//
colorCode = []string{
"\033[0m \033[m", //blank
"\033[0;31;41m[]\033[m", //Text: Red, Background: Red
"\033[0;32;42m[]\033[m", //Text: Green, Background: Green
"\033[0;33;43m[]\033[m", //Text: Yellow, Background: Yellow
"\033[0;34;44m[]\033[m", //Text: Blue, Background: Blue
"\033[0;35;45m[]\033[m", //Text: Purple, Background: Purple
"\033[0;36;46m[]\033[m", //Text: Cyan, Background: Cyan
"\033[0;37;47m[]\033[m"} //Text: White, Background: White
//SRS rotation
//Wall Kick case
//apply for J,L,Z,S,T Tetromino's case following clockwise direction
//test1 will be all {0,0} so we will skip test1
case1 = [][]int{{-1, 0}, {-1, 1}, {0, -2}, {-1, -2}} //0->R
case2 = [][]int{{1, 0}, {1, -1}, {0, 2}, {1, 2}} //R->2
case3 = [][]int{{1, 0}, {1, 1}, {0, -2}, {1, -2}} //2->L
case4 = [][]int{{-1, 0}, {-1, -1}, {0, 2}, {-1, 2}} //L->0
//for I Tetromino's case (clockwise direction)
case1_i = [][]int{{-2, 0}, {1, 0}, {-2, -1}, {1, 2}} //0->R
case2_i = [][]int{{-1, 0}, {2, 0}, {-1, 2}, {2, -1}} //R->2
case3_i = [][]int{{2, 0}, {-1, 0}, {2, 1}, {-1, -2}} //2->L
case4_i = [][]int{{1, 0}, {-2, 0}, {1, -2}, {-2, 1}} //L->0
)
//
type Block struct {
x int
y int
shape [][]int
shapeType int //store following index of blockShape (range [0,6] means shapeO->shapeT above). For rotation supporting
rotateType int //store 1 number in range [0,3]. For rotation supporting
}
//
func (block *Block) reInit() {
randomNum := rand.Intn(7-1) + 1
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
block.shape[i][j] = randomNum
}
}
}
}
//
func (block *Block) transpose() {
for i := range block.shape {
for j := 0 + i; j < len(block.shape[0]); j++ {
//swap
block.shape[j][i], block.shape[i][j] = block.shape[i][j], block.shape[j][i]
}
}
}
//
func (block *Block) reverseColumn() {
temp := 0
pivot := int(len(block.shape[0]) / 2)
//swap column i with column len(block.shape)-1-i
for i := range block.shape {
for j := 0; j < pivot; j++ {
temp = block.shape[i][j]
block.shape[i][j] = block.shape[i][len(block.shape[0])-1-j]
block.shape[i][len(block.shape[0])-1-j] = temp
}
}
}
//
func (block *Block) rotate(land [][]int) {
previousShape := makeCopy(block.shape) //store previouse shape after rotated shape
block.transpose()
block.reverseColumn()
tempX := block.x
tempY := block.y
flag := false
//
if checkCollision(land, *block) {
//following my code, index 5 is I Tetromino, which has its own wall kick case
if block.shapeType != 5 {
if block.rotateType == 0 { //0->R
for i := range case1 {
block.x += case1[i][0]
block.y += case1[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2 {
block.x += case2[i][0]
block.y += case2[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 { //2->L
for i := range case3 {
block.x += case3[i][0]
block.y += case3[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4 {
block.x += case4[i][0]
block.y += case4[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
} else {
if block.rotateType == 0 { //0->R
for i := range case1_i {
block.x += case1_i[i][0]
block.y += case1_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2_i {
block.x += case2_i[i][0]
block.y += case2_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 {
for i := range case3_i {
block.x += case3_i[i][0]
block.y += case3_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4_i {
block.x += case4_i[i][0]
block.y += case4_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
}
} else {
flag = true
}
//if rotation was failed
if flag == false {
block.shape = previousShape
return
}
//
if block.rotateType == 3 {
block.rotateType = 0
} else {
block.rotateType += 1
}
}
//
func (block *Block) moveDown(land [][]int) {
block.y++
if checkCollision(land, *block) {
block.y--
}
}
//
func (block *Block) moveLeft(land [][]int) {
block.x--
if checkCollision(land, *block) {
block.x++
}
}
//
func (block *Block) moveRight(land [][]int) {
block.x++
if checkCollision(land, *block) {
block.x--
}
}
//
func (block *Block) hardDrop(land [][]int) {
for !checkCollision(land, *block) {
block.y++
}
block.y--
}
//
func makeCopy(arr [][]int) [][]int |
//
func checkCollision(land [][]int, block Block) bool {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.x+j < 0 || block.x+j > colSize-1 || block.y+i > rowSize-1 {
return true
} else if block.x+j >= 0 && block.x+j <= colSize-1 && block.y+i <= rowSize-1 && block.y+i >= 0 {
if land[block.y+i][block.x+j] != 0 {
return true
}
}
}
}
}
return false
}
//
func randomBlock(board [][]int, Shape [][][]int) Block {
randomNum := rand.Intn(len(Shape) - 1)
randomShape := makeCopy(Shape[randomNum])
coordinateX := int(len(board[0])/2) - len(randomShape[0]) + 1
coordinateY := -2
block := Block{coordinateX, coordinateY, randomShape, randomNum, 0}
block.reInit()
return block
}
//
func initBoard() [][]int {
board := make([][]int, rowSize)
for i := range board {
board[i] = make([]int, colSize)
}
return board
}
//
func mergeBlock(board [][]int, block Block) [][]int {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.y+i >= 0 && block.y+i <= rowSize-1 && block.x+j >= 0 && block.x+j <= colSize-1 {
board[block.y+i][block.x+j] = block.shape[i][j]
}
}
}
}
return board
}
//
func checkReachTop(land [][]int) bool {
for i := range land[0] {
if land[0][i] != 0 {
return true
}
}
return false
}
//
func handleFullLine(land [][]int, score int) ([][]int, int) {
var line []int
var flag bool
for i := range land {
flag = true
for j := range land[0] {
if land[i][j] == 0 {
flag = false
break
}
}
if flag == true {
line = append(line, i)
}
}
//index is line (row), j is column
for _, index := range line {
for j := range land[index] {
land[index][j] = 0
}
for j := index; j > 0; j-- {
//swap
land[j-1], land[j] = land[j], land[j-1]
}
score += 100
}
return land, score
}
//
func ClearScreen() {
cmd := exec.Command("clear")
cmd.Stdout = os.Stdout
cmd.Run()
}
//
func Display(arr [][]int, nextBlock Block, score int, Color []string) {
ClearScreen()
tempIndexRow := 0
tempIndexCol := 0
for i := range arr {
tempIndexCol = 0
if i == 0 {
fmt.Print("\r ")
for k := 0; k < len(arr[0]); k++ {
fmt.Print("\033[1m_ ")
}
fmt.Println("\033[m")
}
fmt.Print("\033[1m||\033[m")
for j := range arr[0] {
// fmt.Print(arr[i][j]," ") //for print out 0, 1 (real board)
if arr[i][j] == 0 && i == len(arr)-1 {
fmt.Print("\033[1m_ \033[m")
} else {
fmt.Print(Color[arr[i][j]])
}
}
if i == 1 {
fmt.Println("\033[1m|| Your score: ", score, "\033[m")
} else if i == 3 {
fmt.Println("\033[1m|| Next\033[m")
} else if i > 4 && tempIndexRow < len(nextBlock.shape) {
fmt.Print("\033[1m|| \033[m")
for tempIndexCol < len(nextBlock.shape[0]) {
fmt.Print(Color[nextBlock.shape[tempIndexRow][tempIndexCol]])
tempIndexCol++
}
fmt.Println("")
tempIndexRow++
} else {
fmt.Println("\033[1m||\033[m")
}
}
fmt.Println("\rPress: W - Rotate, S - Fall down faster, A - Move left, D - Move Right")
fmt.Println("\rPress: Spacebar - Hard Drop")
fmt.Println("\rPress: X to exit the program ")
}
//
func readInputKey() string {
// disable input buffering
exec.Command("stty", "-F", "/dev/tty", "cbreak", "min", "1").Run()
// do not display entered characters on the screen
exec.Command("stty", "-F", "/dev/tty", "-echo").Run()
var b []byte = make([]byte, 1)
os.Stdin.Read(b)
return string(b)
}
//
func threadGetKey(inputQueue chan string) {
for {
key := readInputKey()
if key == "x" || key == "X" {
exec.Command("stty", "-F", "/dev/tty", "echo").Run()
os.Exit(0)
}
inputQueue <- key
}
}
//
func clearChan(inputQueue chan string) chan string {
for len(inputQueue) != 0 {
<-inputQueue
}
return inputQueue
}
//
func doMovement(board [][]int, land [][]int, block Block, nextBlock Block, Color []string, inputQueue chan string, score int, levelUp int) ([][]int, [][]int, Block, chan string, string) {
var key string
var preSec int = int(time.Now().UnixNano() / 1000000)
for len(inputQueue) != 0 {
key = <-inputQueue
if key == "w" || key == "W" {
block.rotate(land)
} else if key == "a" || key == "A" {
block.moveLeft(land)
} else if key == "d" || key == "D" {
block.moveRight(land)
} else if key == "s" || key == "S" {
block.moveDown(land)
} else if key == " " {
block.hardDrop(land)
} else {
continue
}
board = makeCopy(land)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
time.Sleep(time.Microsecond)
//if time pass (1 sec or less depended on level_up var)
if (int(time.Now().UnixNano()/1000000)-preSec >= 1000-levelUp) || key == " " {
break
}
}
return board, land, block, inputQueue, key
}
//Tetris's main game functin
func Tetris(Color []string, Shape [][][]int) {
//initializing
land := initBoard()
board := initBoard()
score := 0
//score and time drop
var levelUp int = int(160 * int(score/500))
var preSec int = 0
//
var key string
inputQueue := make(chan string, 100)
//begin get key input thread
go threadGetKey(inputQueue)
//begin the game
block := randomBlock(board, Shape)
nextBlock := randomBlock(board, Shape)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
//
for {
levelUp = int(160 * int(score/500))
if score >= 3000 {
levelUp = 900
}
//
board, land, block, inputQueue, key = doMovement(board, land, block, nextBlock, Color, inputQueue, score, levelUp)
if int(time.Now().UnixNano()/1000000)-preSec >= 1000-levelUp || key == " " {
block.y++
if checkCollision(land, block) {
block.y--
inputQueue = clearChan(inputQueue)
land = makeCopy(board)
land, score = handleFullLine(land, score)
block = nextBlock
nextBlock = randomBlock(board, Shape)
}
board = makeCopy(land)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
preSec = int(time.Now().UnixNano() / 1000000)
}
if checkReachTop(land) {
exec.Command("stty", "-F", "/dev/tty", "echo").Run()
fmt.Println("\r\033[1;31mGAME OVER\033[m")
os.Exit(0)
} else {
time.Sleep(time.Millisecond)
}
}
}
//Main
func main() {
rand.Seed(time.Now().UnixNano())
Tetris(colorCode, blockShape)
}
| {
newArray := make([][]int, len(arr))
for i := range arr {
newArray[i] = make([]int, len(arr[0]))
for j := range arr[0] {
newArray[i][j] = arr[i][j]
}
}
return newArray
} | identifier_body |
Tetris.go | package main
import (
"fmt"
"math/rand"
"os"
"os/exec"
"time"
)
var (
colSize int = 10
rowSize int = 20
//
shapeO = [][]int{
{1, 1},
{1, 1}}
shapeL = [][]int{
{0, 0, 1},
{1, 1, 1},
{0, 0, 0}}
shapeJ = [][]int{
{1, 0, 0},
{1, 1, 1},
{0, 0, 0}}
shapeS = [][]int{
{0, 1, 1},
{1, 1, 0},
{0, 0, 0}}
shapeZ = [][]int{
{1, 1, 0},
{0, 1, 1},
{0, 0, 0}}
shapeI = [][]int{
{0, 0, 0, 0},
{1, 1, 1, 1},
{0, 0, 0, 0},
{0, 0, 0, 0}}
shapeT = [][]int{
{0, 1, 0},
{1, 1, 1},
{0, 0, 0}}
//
blockShape = [][][]int{shapeO, shapeL, shapeJ, shapeS, shapeZ, shapeI, shapeT}
//
colorCode = []string{
"\033[0m \033[m", //blank
"\033[0;31;41m[]\033[m", //Text: Red, Background: Red
"\033[0;32;42m[]\033[m", //Text: Green, Background: Green
"\033[0;33;43m[]\033[m", //Text: Yellow, Background: Yellow
"\033[0;34;44m[]\033[m", //Text: Blue, Background: Blue
"\033[0;35;45m[]\033[m", //Text: Purple, Background: Purple
"\033[0;36;46m[]\033[m", //Text: Cyan, Background: Cyan
"\033[0;37;47m[]\033[m"} //Text: White, Background: White
//SRS rotation
//Wall Kick case
//apply for J,L,Z,S,T Tetromino's case following clockwise direction
//test1 will be all {0,0} so we will skip test1
case1 = [][]int{{-1, 0}, {-1, 1}, {0, -2}, {-1, -2}} //0->R
case2 = [][]int{{1, 0}, {1, -1}, {0, 2}, {1, 2}} //R->2
case3 = [][]int{{1, 0}, {1, 1}, {0, -2}, {1, -2}} //2->L
case4 = [][]int{{-1, 0}, {-1, -1}, {0, 2}, {-1, 2}} //L->0
//for I Tetromino's case (clockwise direction)
case1_i = [][]int{{-2, 0}, {1, 0}, {-2, -1}, {1, 2}} //0->R
case2_i = [][]int{{-1, 0}, {2, 0}, {-1, 2}, {2, -1}} //R->2
case3_i = [][]int{{2, 0}, {-1, 0}, {2, 1}, {-1, -2}} //2->L
case4_i = [][]int{{1, 0}, {-2, 0}, {1, -2}, {-2, 1}} //L->0
)
//
type Block struct {
x int
y int
shape [][]int
shapeType int //store following index of blockShape (range [0,6] means shapeO->shapeT above). For rotation supporting
rotateType int //store 1 number in range [0,3]. For rotation supporting
}
//
func (block *Block) reInit() {
randomNum := rand.Intn(7-1) + 1
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
block.shape[i][j] = randomNum
}
}
}
}
//
func (block *Block) transpose() {
for i := range block.shape {
for j := 0 + i; j < len(block.shape[0]); j++ {
//swap
block.shape[j][i], block.shape[i][j] = block.shape[i][j], block.shape[j][i]
}
}
}
//
func (block *Block) reverseColumn() {
temp := 0
pivot := int(len(block.shape[0]) / 2)
//swap column i with column len(block.shape)-1-i
for i := range block.shape {
for j := 0; j < pivot; j++ {
temp = block.shape[i][j]
block.shape[i][j] = block.shape[i][len(block.shape[0])-1-j]
block.shape[i][len(block.shape[0])-1-j] = temp
}
}
}
//
func (block *Block) rotate(land [][]int) {
previousShape := makeCopy(block.shape) //store previouse shape after rotated shape
block.transpose()
block.reverseColumn()
tempX := block.x
tempY := block.y
flag := false
//
if checkCollision(land, *block) {
//following my code, index 5 is I Tetromino, which has its own wall kick case
if block.shapeType != 5 {
if block.rotateType == 0 { //0->R
for i := range case1 {
block.x += case1[i][0]
block.y += case1[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2 {
block.x += case2[i][0]
block.y += case2[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 { //2->L
for i := range case3 {
block.x += case3[i][0]
block.y += case3[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4 {
block.x += case4[i][0]
block.y += case4[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
} else {
if block.rotateType == 0 { //0->R
for i := range case1_i {
block.x += case1_i[i][0]
block.y += case1_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2_i {
block.x += case2_i[i][0]
block.y += case2_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 {
for i := range case3_i {
block.x += case3_i[i][0]
block.y += case3_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4_i {
block.x += case4_i[i][0]
block.y += case4_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
}
} else {
flag = true
}
//if rotation was failed
if flag == false {
block.shape = previousShape
return
}
//
if block.rotateType == 3 {
block.rotateType = 0
} else {
block.rotateType += 1
}
}
//
func (block *Block) moveDown(land [][]int) {
block.y++
if checkCollision(land, *block) {
block.y--
}
}
//
func (block *Block) moveLeft(land [][]int) {
block.x--
if checkCollision(land, *block) {
block.x++
}
}
//
func (block *Block) moveRight(land [][]int) {
block.x++
if checkCollision(land, *block) {
block.x--
}
}
//
func (block *Block) hardDrop(land [][]int) {
for !checkCollision(land, *block) {
block.y++
}
block.y--
}
//
func makeCopy(arr [][]int) [][]int {
newArray := make([][]int, len(arr))
for i := range arr {
newArray[i] = make([]int, len(arr[0]))
for j := range arr[0] {
newArray[i][j] = arr[i][j]
}
}
return newArray
}
//
func checkCollision(land [][]int, block Block) bool {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.x+j < 0 || block.x+j > colSize-1 || block.y+i > rowSize-1 {
return true
} else if block.x+j >= 0 && block.x+j <= colSize-1 && block.y+i <= rowSize-1 && block.y+i >= 0 {
if land[block.y+i][block.x+j] != 0 {
return true
}
}
}
}
}
return false
}
//
func randomBlock(board [][]int, Shape [][][]int) Block {
randomNum := rand.Intn(len(Shape) - 1)
randomShape := makeCopy(Shape[randomNum])
coordinateX := int(len(board[0])/2) - len(randomShape[0]) + 1
coordinateY := -2
block := Block{coordinateX, coordinateY, randomShape, randomNum, 0}
block.reInit()
return block
}
//
func initBoard() [][]int {
board := make([][]int, rowSize)
for i := range board {
board[i] = make([]int, colSize)
}
return board
}
//
func mergeBlock(board [][]int, block Block) [][]int {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.y+i >= 0 && block.y+i <= rowSize-1 && block.x+j >= 0 && block.x+j <= colSize-1 {
board[block.y+i][block.x+j] = block.shape[i][j]
}
}
}
}
return board
}
//
func checkReachTop(land [][]int) bool {
for i := range land[0] {
if land[0][i] != 0 {
return true
}
}
return false
}
//
func handleFullLine(land [][]int, score int) ([][]int, int) {
var line []int
var flag bool
for i := range land {
flag = true
for j := range land[0] {
if land[i][j] == 0 {
flag = false
break
}
}
if flag == true {
line = append(line, i)
}
}
//index is line (row), j is column
for _, index := range line {
for j := range land[index] {
land[index][j] = 0
}
for j := index; j > 0; j-- {
//swap
land[j-1], land[j] = land[j], land[j-1]
}
score += 100
}
return land, score
}
//
func ClearScreen() {
cmd := exec.Command("clear")
cmd.Stdout = os.Stdout
cmd.Run()
}
//
func Display(arr [][]int, nextBlock Block, score int, Color []string) {
ClearScreen()
tempIndexRow := 0
tempIndexCol := 0
for i := range arr {
tempIndexCol = 0
if i == 0 {
fmt.Print("\r ")
for k := 0; k < len(arr[0]); k++ |
fmt.Println("\033[m")
}
fmt.Print("\033[1m||\033[m")
for j := range arr[0] {
// fmt.Print(arr[i][j]," ") //for print out 0, 1 (real board)
if arr[i][j] == 0 && i == len(arr)-1 {
fmt.Print("\033[1m_ \033[m")
} else {
fmt.Print(Color[arr[i][j]])
}
}
if i == 1 {
fmt.Println("\033[1m|| Your score: ", score, "\033[m")
} else if i == 3 {
fmt.Println("\033[1m|| Next\033[m")
} else if i > 4 && tempIndexRow < len(nextBlock.shape) {
fmt.Print("\033[1m|| \033[m")
for tempIndexCol < len(nextBlock.shape[0]) {
fmt.Print(Color[nextBlock.shape[tempIndexRow][tempIndexCol]])
tempIndexCol++
}
fmt.Println("")
tempIndexRow++
} else {
fmt.Println("\033[1m||\033[m")
}
}
fmt.Println("\rPress: W - Rotate, S - Fall down faster, A - Move left, D - Move Right")
fmt.Println("\rPress: Spacebar - Hard Drop")
fmt.Println("\rPress: X to exit the program ")
}
//
func readInputKey() string {
// disable input buffering
exec.Command("stty", "-F", "/dev/tty", "cbreak", "min", "1").Run()
// do not display entered characters on the screen
exec.Command("stty", "-F", "/dev/tty", "-echo").Run()
var b []byte = make([]byte, 1)
os.Stdin.Read(b)
return string(b)
}
//
func threadGetKey(inputQueue chan string) {
for {
key := readInputKey()
if key == "x" || key == "X" {
exec.Command("stty", "-F", "/dev/tty", "echo").Run()
os.Exit(0)
}
inputQueue <- key
}
}
//
func clearChan(inputQueue chan string) chan string {
for len(inputQueue) != 0 {
<-inputQueue
}
return inputQueue
}
//
func doMovement(board [][]int, land [][]int, block Block, nextBlock Block, Color []string, inputQueue chan string, score int, levelUp int) ([][]int, [][]int, Block, chan string, string) {
var key string
var preSec int = int(time.Now().UnixNano() / 1000000)
for len(inputQueue) != 0 {
key = <-inputQueue
if key == "w" || key == "W" {
block.rotate(land)
} else if key == "a" || key == "A" {
block.moveLeft(land)
} else if key == "d" || key == "D" {
block.moveRight(land)
} else if key == "s" || key == "S" {
block.moveDown(land)
} else if key == " " {
block.hardDrop(land)
} else {
continue
}
board = makeCopy(land)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
time.Sleep(time.Microsecond)
//if time pass (1 sec or less depended on level_up var)
if (int(time.Now().UnixNano()/1000000)-preSec >= 1000-levelUp) || key == " " {
break
}
}
return board, land, block, inputQueue, key
}
//Tetris's main game functin
func Tetris(Color []string, Shape [][][]int) {
//initializing
land := initBoard()
board := initBoard()
score := 0
//score and time drop
var levelUp int = int(160 * int(score/500))
var preSec int = 0
//
var key string
inputQueue := make(chan string, 100)
//begin get key input thread
go threadGetKey(inputQueue)
//begin the game
block := randomBlock(board, Shape)
nextBlock := randomBlock(board, Shape)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
//
for {
levelUp = int(160 * int(score/500))
if score >= 3000 {
levelUp = 900
}
//
board, land, block, inputQueue, key = doMovement(board, land, block, nextBlock, Color, inputQueue, score, levelUp)
if int(time.Now().UnixNano()/1000000)-preSec >= 1000-levelUp || key == " " {
block.y++
if checkCollision(land, block) {
block.y--
inputQueue = clearChan(inputQueue)
land = makeCopy(board)
land, score = handleFullLine(land, score)
block = nextBlock
nextBlock = randomBlock(board, Shape)
}
board = makeCopy(land)
board = mergeBlock(board, block)
Display(board, nextBlock, score, Color)
preSec = int(time.Now().UnixNano() / 1000000)
}
if checkReachTop(land) {
exec.Command("stty", "-F", "/dev/tty", "echo").Run()
fmt.Println("\r\033[1;31mGAME OVER\033[m")
os.Exit(0)
} else {
time.Sleep(time.Millisecond)
}
}
}
//Main
func main() {
rand.Seed(time.Now().UnixNano())
Tetris(colorCode, blockShape)
}
| {
fmt.Print("\033[1m_ ")
} | conditional_block |
ui.rs | use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use std::cmp::{min, max};
use stdweb::web;
use stdweb::unstable::TryInto;
use nalgebra::{Vector2};
use time_steward::{DeterministicRandomId};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Accessor, simple_timeline};
use self::simple_timeline::{query_ref};
use steward_module::bbox_collision_detection_2d::Detector;
pub struct Game {
pub steward: Steward,
pub now: Time,
pub last_ui_time: f64,
pub time_speed: f64,
pub display_center: Vector,
pub display_radius: Coordinate,
pub selected_object: Option <ObjectHandle>,
}
pub fn make_game(seed_id: DeterministicRandomId)->Game {
let mut steward: Steward = Steward::from_globals (Globals::default());
steward.insert_fiat_event (0, seed_id, Initialize {}).unwrap();
Game {
steward: steward,
now: 1,
last_ui_time: 0.0,
time_speed: 1.0,
display_center: Vector::new (0, 0),
display_radius: INITIAL_PALACE_DISTANCE*3/2,
selected_object: None,
}
}
pub fn draw_game <A: Accessor <Steward = Steward>>(accessor: &A, game: & Game) {
let canvas_width: f64 = js! {return canvas.width;}.try_into().unwrap();
let scale = canvas_width/(game.display_radius as f64*2.0);
js! {
var size = Math.min (window.innerHeight, window.innerWidth);
canvas.setAttribute ("width", size);
canvas.setAttribute ("height", size);
context.clearRect (0, 0, canvas.width, canvas.height);
context.save();
context.scale (@{scale},@{scale});
context.translate (@{-game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.awareness_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn | (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game.clone();
let time_callback = move |speed: f64| {
let mut game = game.borrow_mut();
game.time_speed = if speed == -10.0 { 0.0 } else { (2.0f64).powf(speed/2.0) };
println!("{:?}", (speed));
};
js! {
var callback = @{time_callback};
game_container.append($("<div>").append(
$("<input>", {
type: "range",
id: "time_speed",
value: 0, min: -10, max: 10, step: 1
}).on ("input", function (event) {
callback(event.target.valueAsNumber);
}),
$("<label>", {
for: "time_speed",
text: " time speed",
})
));
}
}
{
let game = game.clone();
let click_callback = move |x: f64,y: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let location = game.display_center + Vector2::new (offset [0] as Coordinate, offset [1] as Coordinate);
let now = game.now;
//game.steward.insert_fiat_event (now, DeterministicRandomId::new (& (now)), ChangeOrders {team: 1, orders: Orders {unit_destination: Some (location)}}).unwrap();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
for object in Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (location), 0), None) {
let varying = query_ref (& snapshot, & object.varying);
let center = varying.trajectory.evaluate (now);
let object_radius = radius (& varying);
if distance (location, center) < object_radius {
game.selected_object = Some (object.clone());
}
}
};
js! {
var callback = @{click_callback};
canvas.addEventListener ("click", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5
);
event.preventDefault();
});
}
}
js! {
game_container.append($("<div>").append(
$("<input>", {
type: "number",
id: "seed",
value:@{seed},
min: 0,
max: @{0u32.wrapping_sub(1)}
}).on ("input", function (event) {
var value = Math.floor(event.target.valueAsNumber);
if (value >= 0 && value <= @{0u32.wrapping_sub(1)}) {
window.localStorage.setItem ("random_seed", value);
}
}),
$("<label>", {
for: "seed",
text: " random seed (reload page to apply)",
})
));
game_container.append(window.selected_info = $("<div>", {id: "selected_info"}));
}
web::window().request_animation_frame (move | time | main_loop (time, game));
stdweb::event_loop();
}
#[cfg (not(target_os = "emscripten"))]
pub fn run() {
let mut scores = [0; 2];
loop {
let mut game = make_game(DeterministicRandomId::new (& (scores, 0xae06fcf3129d0685u64)));
loop {
game.now += SECOND /100;
let snapshot = game.steward.snapshot_before (& game.now). unwrap ();
game.steward.forget_before (& game.now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().filter (| object | is_building (&query_ref (& snapshot, & object.varying))).map (| object | query_ref (& snapshot, & object.varying).team).collect();
if teams_alive.len() <= 1 {
scores [teams_alive.into_iter().next().unwrap()] += 1;
println!("{:?}", scores);
break;
}*/
}
}
}
| main_loop | identifier_name |
ui.rs | use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use std::cmp::{min, max};
use stdweb::web;
use stdweb::unstable::TryInto;
use nalgebra::{Vector2};
use time_steward::{DeterministicRandomId};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Accessor, simple_timeline};
use self::simple_timeline::{query_ref};
use steward_module::bbox_collision_detection_2d::Detector;
pub struct Game {
pub steward: Steward,
pub now: Time,
pub last_ui_time: f64,
pub time_speed: f64,
pub display_center: Vector,
pub display_radius: Coordinate,
pub selected_object: Option <ObjectHandle>,
}
pub fn make_game(seed_id: DeterministicRandomId)->Game {
let mut steward: Steward = Steward::from_globals (Globals::default());
steward.insert_fiat_event (0, seed_id, Initialize {}).unwrap();
Game {
steward: steward,
now: 1,
last_ui_time: 0.0,
time_speed: 1.0, | }
pub fn draw_game <A: Accessor <Steward = Steward>>(accessor: &A, game: & Game) {
let canvas_width: f64 = js! {return canvas.width;}.try_into().unwrap();
let scale = canvas_width/(game.display_radius as f64*2.0);
js! {
var size = Math.min (window.innerHeight, window.innerWidth);
canvas.setAttribute ("width", size);
canvas.setAttribute ("height", size);
context.clearRect (0, 0, canvas.width, canvas.height);
context.save();
context.scale (@{scale},@{scale});
context.translate (@{-game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.awareness_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn main_loop (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game.clone();
let time_callback = move |speed: f64| {
let mut game = game.borrow_mut();
game.time_speed = if speed == -10.0 { 0.0 } else { (2.0f64).powf(speed/2.0) };
println!("{:?}", (speed));
};
js! {
var callback = @{time_callback};
game_container.append($("<div>").append(
$("<input>", {
type: "range",
id: "time_speed",
value: 0, min: -10, max: 10, step: 1
}).on ("input", function (event) {
callback(event.target.valueAsNumber);
}),
$("<label>", {
for: "time_speed",
text: " time speed",
})
));
}
}
{
let game = game.clone();
let click_callback = move |x: f64,y: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let location = game.display_center + Vector2::new (offset [0] as Coordinate, offset [1] as Coordinate);
let now = game.now;
//game.steward.insert_fiat_event (now, DeterministicRandomId::new (& (now)), ChangeOrders {team: 1, orders: Orders {unit_destination: Some (location)}}).unwrap();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
for object in Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (location), 0), None) {
let varying = query_ref (& snapshot, & object.varying);
let center = varying.trajectory.evaluate (now);
let object_radius = radius (& varying);
if distance (location, center) < object_radius {
game.selected_object = Some (object.clone());
}
}
};
js! {
var callback = @{click_callback};
canvas.addEventListener ("click", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5
);
event.preventDefault();
});
}
}
js! {
game_container.append($("<div>").append(
$("<input>", {
type: "number",
id: "seed",
value:@{seed},
min: 0,
max: @{0u32.wrapping_sub(1)}
}).on ("input", function (event) {
var value = Math.floor(event.target.valueAsNumber);
if (value >= 0 && value <= @{0u32.wrapping_sub(1)}) {
window.localStorage.setItem ("random_seed", value);
}
}),
$("<label>", {
for: "seed",
text: " random seed (reload page to apply)",
})
));
game_container.append(window.selected_info = $("<div>", {id: "selected_info"}));
}
web::window().request_animation_frame (move | time | main_loop (time, game));
stdweb::event_loop();
}
#[cfg (not(target_os = "emscripten"))]
pub fn run() {
let mut scores = [0; 2];
loop {
let mut game = make_game(DeterministicRandomId::new (& (scores, 0xae06fcf3129d0685u64)));
loop {
game.now += SECOND /100;
let snapshot = game.steward.snapshot_before (& game.now). unwrap ();
game.steward.forget_before (& game.now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().filter (| object | is_building (&query_ref (& snapshot, & object.varying))).map (| object | query_ref (& snapshot, & object.varying).team).collect();
if teams_alive.len() <= 1 {
scores [teams_alive.into_iter().next().unwrap()] += 1;
println!("{:?}", scores);
break;
}*/
}
}
} | display_center: Vector::new (0, 0),
display_radius: INITIAL_PALACE_DISTANCE*3/2,
selected_object: None,
} | random_line_split |
ui.rs | use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use std::cmp::{min, max};
use stdweb::web;
use stdweb::unstable::TryInto;
use nalgebra::{Vector2};
use time_steward::{DeterministicRandomId};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Accessor, simple_timeline};
use self::simple_timeline::{query_ref};
use steward_module::bbox_collision_detection_2d::Detector;
pub struct Game {
pub steward: Steward,
pub now: Time,
pub last_ui_time: f64,
pub time_speed: f64,
pub display_center: Vector,
pub display_radius: Coordinate,
pub selected_object: Option <ObjectHandle>,
}
pub fn make_game(seed_id: DeterministicRandomId)->Game {
let mut steward: Steward = Steward::from_globals (Globals::default());
steward.insert_fiat_event (0, seed_id, Initialize {}).unwrap();
Game {
steward: steward,
now: 1,
last_ui_time: 0.0,
time_speed: 1.0,
display_center: Vector::new (0, 0),
display_radius: INITIAL_PALACE_DISTANCE*3/2,
selected_object: None,
}
}
pub fn draw_game <A: Accessor <Steward = Steward>>(accessor: &A, game: & Game) {
let canvas_width: f64 = js! {return canvas.width;}.try_into().unwrap();
let scale = canvas_width/(game.display_radius as f64*2.0);
js! {
var size = Math.min (window.innerHeight, window.innerWidth);
canvas.setAttribute ("width", size);
canvas.setAttribute ("height", size);
context.clearRect (0, 0, canvas.width, canvas.height);
context.save();
context.scale (@{scale},@{scale});
context.translate (@{-game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type != ObjectType::Beast |
if varying.awareness_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn main_loop (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game.clone();
let time_callback = move |speed: f64| {
let mut game = game.borrow_mut();
game.time_speed = if speed == -10.0 { 0.0 } else { (2.0f64).powf(speed/2.0) };
println!("{:?}", (speed));
};
js! {
var callback = @{time_callback};
game_container.append($("<div>").append(
$("<input>", {
type: "range",
id: "time_speed",
value: 0, min: -10, max: 10, step: 1
}).on ("input", function (event) {
callback(event.target.valueAsNumber);
}),
$("<label>", {
for: "time_speed",
text: " time speed",
})
));
}
}
{
let game = game.clone();
let click_callback = move |x: f64,y: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let location = game.display_center + Vector2::new (offset [0] as Coordinate, offset [1] as Coordinate);
let now = game.now;
//game.steward.insert_fiat_event (now, DeterministicRandomId::new (& (now)), ChangeOrders {team: 1, orders: Orders {unit_destination: Some (location)}}).unwrap();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
for object in Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (location), 0), None) {
let varying = query_ref (& snapshot, & object.varying);
let center = varying.trajectory.evaluate (now);
let object_radius = radius (& varying);
if distance (location, center) < object_radius {
game.selected_object = Some (object.clone());
}
}
};
js! {
var callback = @{click_callback};
canvas.addEventListener ("click", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5
);
event.preventDefault();
});
}
}
js! {
game_container.append($("<div>").append(
$("<input>", {
type: "number",
id: "seed",
value:@{seed},
min: 0,
max: @{0u32.wrapping_sub(1)}
}).on ("input", function (event) {
var value = Math.floor(event.target.valueAsNumber);
if (value >= 0 && value <= @{0u32.wrapping_sub(1)}) {
window.localStorage.setItem ("random_seed", value);
}
}),
$("<label>", {
for: "seed",
text: " random seed (reload page to apply)",
})
));
game_container.append(window.selected_info = $("<div>", {id: "selected_info"}));
}
web::window().request_animation_frame (move | time | main_loop (time, game));
stdweb::event_loop();
}
#[cfg (not(target_os = "emscripten"))]
pub fn run() {
let mut scores = [0; 2];
loop {
let mut game = make_game(DeterministicRandomId::new (& (scores, 0xae06fcf3129d0685u64)));
loop {
game.now += SECOND /100;
let snapshot = game.steward.snapshot_before (& game.now). unwrap ();
game.steward.forget_before (& game.now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().filter (| object | is_building (&query_ref (& snapshot, & object.varying))).map (| object | query_ref (& snapshot, & object.varying).team).collect();
if teams_alive.len() <= 1 {
scores [teams_alive.into_iter().next().unwrap()] += 1;
println!("{:?}", scores);
break;
}*/
}
}
}
| {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}} | conditional_block |
ui.rs | use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use std::cmp::{min, max};
use stdweb::web;
use stdweb::unstable::TryInto;
use nalgebra::{Vector2};
use time_steward::{DeterministicRandomId};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Accessor, simple_timeline};
use self::simple_timeline::{query_ref};
use steward_module::bbox_collision_detection_2d::Detector;
pub struct Game {
pub steward: Steward,
pub now: Time,
pub last_ui_time: f64,
pub time_speed: f64,
pub display_center: Vector,
pub display_radius: Coordinate,
pub selected_object: Option <ObjectHandle>,
}
pub fn make_game(seed_id: DeterministicRandomId)->Game {
let mut steward: Steward = Steward::from_globals (Globals::default());
steward.insert_fiat_event (0, seed_id, Initialize {}).unwrap();
Game {
steward: steward,
now: 1,
last_ui_time: 0.0,
time_speed: 1.0,
display_center: Vector::new (0, 0),
display_radius: INITIAL_PALACE_DISTANCE*3/2,
selected_object: None,
}
}
pub fn draw_game <A: Accessor <Steward = Steward>>(accessor: &A, game: & Game) {
let canvas_width: f64 = js! {return canvas.width;}.try_into().unwrap();
let scale = canvas_width/(game.display_radius as f64*2.0);
js! {
var size = Math.min (window.innerHeight, window.innerWidth);
canvas.setAttribute ("width", size);
canvas.setAttribute ("height", size);
context.clearRect (0, 0, canvas.width, canvas.height);
context.save();
context.scale (@{scale},@{scale});
context.translate (@{-game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.awareness_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn main_loop (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game.clone();
let time_callback = move |speed: f64| {
let mut game = game.borrow_mut();
game.time_speed = if speed == -10.0 { 0.0 } else { (2.0f64).powf(speed/2.0) };
println!("{:?}", (speed));
};
js! {
var callback = @{time_callback};
game_container.append($("<div>").append(
$("<input>", {
type: "range",
id: "time_speed",
value: 0, min: -10, max: 10, step: 1
}).on ("input", function (event) {
callback(event.target.valueAsNumber);
}),
$("<label>", {
for: "time_speed",
text: " time speed",
})
));
}
}
{
let game = game.clone();
let click_callback = move |x: f64,y: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let location = game.display_center + Vector2::new (offset [0] as Coordinate, offset [1] as Coordinate);
let now = game.now;
//game.steward.insert_fiat_event (now, DeterministicRandomId::new (& (now)), ChangeOrders {team: 1, orders: Orders {unit_destination: Some (location)}}).unwrap();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
for object in Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (location), 0), None) {
let varying = query_ref (& snapshot, & object.varying);
let center = varying.trajectory.evaluate (now);
let object_radius = radius (& varying);
if distance (location, center) < object_radius {
game.selected_object = Some (object.clone());
}
}
};
js! {
var callback = @{click_callback};
canvas.addEventListener ("click", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5
);
event.preventDefault();
});
}
}
js! {
game_container.append($("<div>").append(
$("<input>", {
type: "number",
id: "seed",
value:@{seed},
min: 0,
max: @{0u32.wrapping_sub(1)}
}).on ("input", function (event) {
var value = Math.floor(event.target.valueAsNumber);
if (value >= 0 && value <= @{0u32.wrapping_sub(1)}) {
window.localStorage.setItem ("random_seed", value);
}
}),
$("<label>", {
for: "seed",
text: " random seed (reload page to apply)",
})
));
game_container.append(window.selected_info = $("<div>", {id: "selected_info"}));
}
web::window().request_animation_frame (move | time | main_loop (time, game));
stdweb::event_loop();
}
#[cfg (not(target_os = "emscripten"))]
pub fn run() | {
let mut scores = [0; 2];
loop {
let mut game = make_game(DeterministicRandomId::new (& (scores, 0xae06fcf3129d0685u64)));
loop {
game.now += SECOND /100;
let snapshot = game.steward.snapshot_before (& game.now). unwrap ();
game.steward.forget_before (& game.now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().filter (| object | is_building (&query_ref (& snapshot, & object.varying))).map (| object | query_ref (& snapshot, & object.varying).team).collect();
if teams_alive.len() <= 1 {
scores [teams_alive.into_iter().next().unwrap()] += 1;
println!("{:?}", scores);
break;
}*/
}
}
} | identifier_body | |
cached.go | // Copyright 2015 - 2016 Square Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cached
import (
"errors"
"sync"
"time"
"github.com/square/metrics/api"
"github.com/square/metrics/log"
"github.com/square/metrics/metric_metadata"
"github.com/square/metrics/util"
)
// BackgroundAPI is a MetadataAPI that also supports background cache updates.
type BackgroundAPI interface {
metadata.MetricAPI
// GetBackgroundAction returns a function to be called to execute a background cache update.
GetBackgroundAction() func(metadata.Context) error
// CurrentLiveRequests returns the number of requests currently in the queue
CurrentLiveRequests() int
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
MaximumLiveRequests() int
}
// metricMetadataAPI caches some of the metadata associated with the API to reduce latency.
// However, it does not reduce total QPS: whenever it reads from the cache, it performs an update
// in the background by launching a new goroutine.
type metricMetadataAPI struct {
metricMetadataAPI metadata.MetricAPI // The internal MetricAPI that performs the actual queries.
clock util.Clock // Here so we can mock out in tests
// Cached items
getAllTagsCache map[api.MetricKey]*TagSetList // The cache of metric -> tags
getAllTagsCacheMutex sync.RWMutex // Mutex for getAllTagsCache
// Cache Config
freshness time.Duration // How long until cache entries become stale
timeToLive time.Duration // How long until cache entries become expired
// Queue
backgroundQueue chan func(metadata.Context) error // A channel that holds background requests.
queueMutex sync.Mutex // Synchronizing mutex for the queue
}
// metricUpdateAPI is a wrapper for when the underlying metadata.MetricAPI is also a metadata.MetricUpdateAPI.
type metricUpdateAPI struct {
metricMetadataAPI
}
func (c *metricMetadataAPI) AddMetric(metric api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetric(metric, context)
}
func (c *metricMetadataAPI) AddMetrics(metrics []api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetrics(metrics, context)
}
// Config stores data needed to instantiate a CachedMetricMetadataAPI.
type Config struct {
Freshness time.Duration
RequestLimit int
TimeToLive time.Duration
}
// TagSetList is an item in the cache.
type TagSetList struct {
TagSets []api.TagSet // The tagsets for this metric
Expiry time.Time // The time at which the cache entry expires
Stale time.Time // The time at which the cache entry becomes stale
sync.Mutex // Synchronizing mutex
inflight bool // Indicates a request is already in flight
enqueued bool // Indicates a request has been enqueued
wg sync.WaitGroup // Synchronizing wait group
fetchError error // Fetch error from the last attempt
}
// NewMetricMetadataAPI creates a cached API given configuration and an underlying API object.
func NewMetricMetadataAPI(apiInstance metadata.MetricAPI, config Config) BackgroundAPI {
requests := make(chan func(metadata.Context) error, config.RequestLimit)
if config.Freshness == 0 {
config.Freshness = config.TimeToLive
}
result := metricMetadataAPI{
metricMetadataAPI: apiInstance,
clock: util.RealClock{},
getAllTagsCache: map[api.MetricKey]*TagSetList{},
freshness: config.Freshness,
timeToLive: config.TimeToLive,
backgroundQueue: requests,
}
if _, ok := apiInstance.(metadata.MetricUpdateAPI); ok {
return &metricUpdateAPI{result}
}
return &result
}
// addBackgroundGetAllTagsRequest adds a job to update the lag list for the given
// metric. Requires the caller hold the lock for the item in the cache.
func (c *metricMetadataAPI) addBackgroundGetAllTagsRequest(item *TagSetList, metricKey api.MetricKey) {
if item == nil {
log.Errorf("Asked to perform a background GetAllTags lookup for %s but missing entry", metricKey)
return
}
c.queueMutex.Lock()
defer c.queueMutex.Unlock()
if cap(c.backgroundQueue) <= len(c.backgroundQueue) {
log.Warningf("Unable to enqueue a background GetAllTags lookup for %s due to a full queue", metricKey)
return
}
if item.enqueued {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already enqueued", metricKey)
return
}
if item.inflight {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already in flight", metricKey)
return
}
log.Infof("Enqueuing a background GetAllTags lookup for %s", metricKey)
item.enqueued = true
c.backgroundQueue <- func(context metadata.Context) error {
log.Infof("Executing the background GetAllTags lookup for %s", metricKey)
defer log.Infof("Finished the background GetAllTags lookup for %s", metricKey)
item.Lock()
defer item.Unlock()
item.enqueued = false
defer context.Profiler.Record("CachedMetricMetadataAPI_BackgroundAction_GetAllTags")()
_, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
return err
}
}
// GetBackgroundAction is a blocking method that runs one queued cache update.
// It will block until an update is available.
func (c *metricMetadataAPI) GetBackgroundAction() func(metadata.Context) error {
return <-c.backgroundQueue
}
// GetAllMetrics waits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetAllMetrics(context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetAllMetrics(context)
}
// GetMetricsForTag wwaits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetMetricsForTag(tagKey, tagValue string, context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetMetricsForTag(tagKey, tagValue, context)
}
// CheckHealthy checks if the underlying MetricAPI is healthy
func (c *metricMetadataAPI) CheckHealthy() error {
return c.metricMetadataAPI.CheckHealthy()
}
// fetchAndUpdateCachedTagSet updates the in-memory cache (asusming the update
// is newer than what is in the cache). Requires the caller hold the lock for the
// item in the cache.
func (c *metricMetadataAPI) fetchAndUpdateCachedTagSet(item *TagSetList, metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
if item == nil {
return nil, errors.New("missing cache list entry")
}
item.wg.Add(1)
item.fetchError = nil
item.inflight = true
item.Unlock()
startTime := c.clock.Now()
tagsets, err := c.metricMetadataAPI.GetAllTags(metricKey, context)
item.Lock()
if err != nil {
item.fetchError = err
item.wg.Done()
item.inflight = false
return nil, err
}
// Only update the cache if the update expires later than the current
// entry in the cache
newExpiry := startTime.Add(c.timeToLive)
if item.Expiry.Before(newExpiry) {
item.TagSets = tagsets
item.Expiry = newExpiry
item.Stale = startTime.Add(c.freshness)
} else {
log.Warningf("Asked to update the tag set for %s but new expiry is earlier than current (%s vs %s)",
metricKey, newExpiry.String(), item.Expiry.String())
}
item.wg.Done()
item.inflight = false
return tagsets, nil
}
// GetAllTags uses the cache to serve tag data for the given metric.
// If the cache entry is missing or out of date, it uses the results of a query
// to the underlying API to return to the caller. Even if the cache entry is
// up-to-date, this method may enqueue a background request to the underlying API
// to keep the cache fresh.
func (c *metricMetadataAPI) GetAllTags(metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags")()
// Get the cached result for this metric.
c.getAllTagsCacheMutex.RLock()
item, ok := c.getAllTagsCache[metricKey]
c.getAllTagsCacheMutex.RUnlock()
if !ok {
c.getAllTagsCacheMutex.Lock()
// Now that we have the mutex for getAllTagsCache, make sure another goroutine
// hasn't already updated the cache
item, ok = c.getAllTagsCache[metricKey]
if !ok {
item = &TagSetList{}
c.getAllTagsCache[metricKey] = item
} | item.Lock()
if item.Expiry.IsZero() || item.Expiry.Before(c.clock.Now()) {
if item.inflight {
item.Unlock()
item.wg.Wait()
// Make sure we have the lock to re-read
item.Lock()
defer item.Unlock()
// If the request we were waiting on errored, we also errored
return item.TagSets, item.fetchError
}
defer item.Unlock()
// We're going to execute this fetch now
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags_Expired")()
tagsets, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
if err != nil {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags_Errored")()
return nil, err
}
return tagsets, nil
}
defer context.Profiler.Record("CachedMetricMetadataAPI_Hit")()
defer item.Unlock()
// Otherwise, we could be stale
if item.Stale.Before(c.clock.Now()) {
// Enqueue a background request
c.addBackgroundGetAllTagsRequest(item, metricKey)
}
// but return the cached result immediately.
return item.TagSets, nil
}
// CurrentLiveRequests returns the number of requests currently in the queue
func (c *metricMetadataAPI) CurrentLiveRequests() int {
return len(c.backgroundQueue)
}
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
func (c *metricMetadataAPI) MaximumLiveRequests() int {
return cap(c.backgroundQueue)
} |
c.getAllTagsCacheMutex.Unlock()
}
| random_line_split |
cached.go | // Copyright 2015 - 2016 Square Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cached
import (
"errors"
"sync"
"time"
"github.com/square/metrics/api"
"github.com/square/metrics/log"
"github.com/square/metrics/metric_metadata"
"github.com/square/metrics/util"
)
// BackgroundAPI is a MetadataAPI that also supports background cache updates.
type BackgroundAPI interface {
metadata.MetricAPI
// GetBackgroundAction returns a function to be called to execute a background cache update.
GetBackgroundAction() func(metadata.Context) error
// CurrentLiveRequests returns the number of requests currently in the queue
CurrentLiveRequests() int
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
MaximumLiveRequests() int
}
// metricMetadataAPI caches some of the metadata associated with the API to reduce latency.
// However, it does not reduce total QPS: whenever it reads from the cache, it performs an update
// in the background by launching a new goroutine.
type metricMetadataAPI struct {
metricMetadataAPI metadata.MetricAPI // The internal MetricAPI that performs the actual queries.
clock util.Clock // Here so we can mock out in tests
// Cached items
getAllTagsCache map[api.MetricKey]*TagSetList // The cache of metric -> tags
getAllTagsCacheMutex sync.RWMutex // Mutex for getAllTagsCache
// Cache Config
freshness time.Duration // How long until cache entries become stale
timeToLive time.Duration // How long until cache entries become expired
// Queue
backgroundQueue chan func(metadata.Context) error // A channel that holds background requests.
queueMutex sync.Mutex // Synchronizing mutex for the queue
}
// metricUpdateAPI is a wrapper for when the underlying metadata.MetricAPI is also a metadata.MetricUpdateAPI.
type metricUpdateAPI struct {
metricMetadataAPI
}
func (c *metricMetadataAPI) AddMetric(metric api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetric(metric, context)
}
func (c *metricMetadataAPI) AddMetrics(metrics []api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetrics(metrics, context)
}
// Config stores data needed to instantiate a CachedMetricMetadataAPI.
type Config struct {
Freshness time.Duration
RequestLimit int
TimeToLive time.Duration
}
// TagSetList is an item in the cache.
type TagSetList struct {
TagSets []api.TagSet // The tagsets for this metric
Expiry time.Time // The time at which the cache entry expires
Stale time.Time // The time at which the cache entry becomes stale
sync.Mutex // Synchronizing mutex
inflight bool // Indicates a request is already in flight
enqueued bool // Indicates a request has been enqueued
wg sync.WaitGroup // Synchronizing wait group
fetchError error // Fetch error from the last attempt
}
// NewMetricMetadataAPI creates a cached API given configuration and an underlying API object.
func NewMetricMetadataAPI(apiInstance metadata.MetricAPI, config Config) BackgroundAPI {
requests := make(chan func(metadata.Context) error, config.RequestLimit)
if config.Freshness == 0 {
config.Freshness = config.TimeToLive
}
result := metricMetadataAPI{
metricMetadataAPI: apiInstance,
clock: util.RealClock{},
getAllTagsCache: map[api.MetricKey]*TagSetList{},
freshness: config.Freshness,
timeToLive: config.TimeToLive,
backgroundQueue: requests,
}
if _, ok := apiInstance.(metadata.MetricUpdateAPI); ok {
return &metricUpdateAPI{result}
}
return &result
}
// addBackgroundGetAllTagsRequest adds a job to update the lag list for the given
// metric. Requires the caller hold the lock for the item in the cache.
func (c *metricMetadataAPI) addBackgroundGetAllTagsRequest(item *TagSetList, metricKey api.MetricKey) {
if item == nil {
log.Errorf("Asked to perform a background GetAllTags lookup for %s but missing entry", metricKey)
return
}
c.queueMutex.Lock()
defer c.queueMutex.Unlock()
if cap(c.backgroundQueue) <= len(c.backgroundQueue) {
log.Warningf("Unable to enqueue a background GetAllTags lookup for %s due to a full queue", metricKey)
return
}
if item.enqueued {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already enqueued", metricKey)
return
}
if item.inflight {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already in flight", metricKey)
return
}
log.Infof("Enqueuing a background GetAllTags lookup for %s", metricKey)
item.enqueued = true
c.backgroundQueue <- func(context metadata.Context) error {
log.Infof("Executing the background GetAllTags lookup for %s", metricKey)
defer log.Infof("Finished the background GetAllTags lookup for %s", metricKey)
item.Lock()
defer item.Unlock()
item.enqueued = false
defer context.Profiler.Record("CachedMetricMetadataAPI_BackgroundAction_GetAllTags")()
_, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
return err
}
}
// GetBackgroundAction is a blocking method that runs one queued cache update.
// It will block until an update is available.
func (c *metricMetadataAPI) GetBackgroundAction() func(metadata.Context) error {
return <-c.backgroundQueue
}
// GetAllMetrics waits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetAllMetrics(context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetAllMetrics(context)
}
// GetMetricsForTag wwaits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetMetricsForTag(tagKey, tagValue string, context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetMetricsForTag(tagKey, tagValue, context)
}
// CheckHealthy checks if the underlying MetricAPI is healthy
func (c *metricMetadataAPI) CheckHealthy() error {
return c.metricMetadataAPI.CheckHealthy()
}
// fetchAndUpdateCachedTagSet updates the in-memory cache (asusming the update
// is newer than what is in the cache). Requires the caller hold the lock for the
// item in the cache.
func (c *metricMetadataAPI) fetchAndUpdateCachedTagSet(item *TagSetList, metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
if item == nil {
return nil, errors.New("missing cache list entry")
}
item.wg.Add(1)
item.fetchError = nil
item.inflight = true
item.Unlock()
startTime := c.clock.Now()
tagsets, err := c.metricMetadataAPI.GetAllTags(metricKey, context)
item.Lock()
if err != nil {
item.fetchError = err
item.wg.Done()
item.inflight = false
return nil, err
}
// Only update the cache if the update expires later than the current
// entry in the cache
newExpiry := startTime.Add(c.timeToLive)
if item.Expiry.Before(newExpiry) | else {
log.Warningf("Asked to update the tag set for %s but new expiry is earlier than current (%s vs %s)",
metricKey, newExpiry.String(), item.Expiry.String())
}
item.wg.Done()
item.inflight = false
return tagsets, nil
}
// GetAllTags uses the cache to serve tag data for the given metric.
// If the cache entry is missing or out of date, it uses the results of a query
// to the underlying API to return to the caller. Even if the cache entry is
// up-to-date, this method may enqueue a background request to the underlying API
// to keep the cache fresh.
func (c *metricMetadataAPI) GetAllTags(metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags")()
// Get the cached result for this metric.
c.getAllTagsCacheMutex.RLock()
item, ok := c.getAllTagsCache[metricKey]
c.getAllTagsCacheMutex.RUnlock()
if !ok {
c.getAllTagsCacheMutex.Lock()
// Now that we have the mutex for getAllTagsCache, make sure another goroutine
// hasn't already updated the cache
item, ok = c.getAllTagsCache[metricKey]
if !ok {
item = &TagSetList{}
c.getAllTagsCache[metricKey] = item
}
c.getAllTagsCacheMutex.Unlock()
}
item.Lock()
if item.Expiry.IsZero() || item.Expiry.Before(c.clock.Now()) {
if item.inflight {
item.Unlock()
item.wg.Wait()
// Make sure we have the lock to re-read
item.Lock()
defer item.Unlock()
// If the request we were waiting on errored, we also errored
return item.TagSets, item.fetchError
}
defer item.Unlock()
// We're going to execute this fetch now
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags_Expired")()
tagsets, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
if err != nil {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags_Errored")()
return nil, err
}
return tagsets, nil
}
defer context.Profiler.Record("CachedMetricMetadataAPI_Hit")()
defer item.Unlock()
// Otherwise, we could be stale
if item.Stale.Before(c.clock.Now()) {
// Enqueue a background request
c.addBackgroundGetAllTagsRequest(item, metricKey)
}
// but return the cached result immediately.
return item.TagSets, nil
}
// CurrentLiveRequests returns the number of requests currently in the queue
func (c *metricMetadataAPI) CurrentLiveRequests() int {
return len(c.backgroundQueue)
}
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
func (c *metricMetadataAPI) MaximumLiveRequests() int {
return cap(c.backgroundQueue)
}
| {
item.TagSets = tagsets
item.Expiry = newExpiry
item.Stale = startTime.Add(c.freshness)
} | conditional_block |
cached.go | // Copyright 2015 - 2016 Square Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cached
import (
"errors"
"sync"
"time"
"github.com/square/metrics/api"
"github.com/square/metrics/log"
"github.com/square/metrics/metric_metadata"
"github.com/square/metrics/util"
)
// BackgroundAPI is a MetadataAPI that also supports background cache updates.
type BackgroundAPI interface {
metadata.MetricAPI
// GetBackgroundAction returns a function to be called to execute a background cache update.
GetBackgroundAction() func(metadata.Context) error
// CurrentLiveRequests returns the number of requests currently in the queue
CurrentLiveRequests() int
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
MaximumLiveRequests() int
}
// metricMetadataAPI caches some of the metadata associated with the API to reduce latency.
// However, it does not reduce total QPS: whenever it reads from the cache, it performs an update
// in the background by launching a new goroutine.
type metricMetadataAPI struct {
metricMetadataAPI metadata.MetricAPI // The internal MetricAPI that performs the actual queries.
clock util.Clock // Here so we can mock out in tests
// Cached items
getAllTagsCache map[api.MetricKey]*TagSetList // The cache of metric -> tags
getAllTagsCacheMutex sync.RWMutex // Mutex for getAllTagsCache
// Cache Config
freshness time.Duration // How long until cache entries become stale
timeToLive time.Duration // How long until cache entries become expired
// Queue
backgroundQueue chan func(metadata.Context) error // A channel that holds background requests.
queueMutex sync.Mutex // Synchronizing mutex for the queue
}
// metricUpdateAPI is a wrapper for when the underlying metadata.MetricAPI is also a metadata.MetricUpdateAPI.
type metricUpdateAPI struct {
metricMetadataAPI
}
func (c *metricMetadataAPI) AddMetric(metric api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetric(metric, context)
}
func (c *metricMetadataAPI) AddMetrics(metrics []api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetrics(metrics, context)
}
// Config stores data needed to instantiate a CachedMetricMetadataAPI.
type Config struct {
Freshness time.Duration
RequestLimit int
TimeToLive time.Duration
}
// TagSetList is an item in the cache.
type TagSetList struct {
TagSets []api.TagSet // The tagsets for this metric
Expiry time.Time // The time at which the cache entry expires
Stale time.Time // The time at which the cache entry becomes stale
sync.Mutex // Synchronizing mutex
inflight bool // Indicates a request is already in flight
enqueued bool // Indicates a request has been enqueued
wg sync.WaitGroup // Synchronizing wait group
fetchError error // Fetch error from the last attempt
}
// NewMetricMetadataAPI creates a cached API given configuration and an underlying API object.
func NewMetricMetadataAPI(apiInstance metadata.MetricAPI, config Config) BackgroundAPI {
requests := make(chan func(metadata.Context) error, config.RequestLimit)
if config.Freshness == 0 {
config.Freshness = config.TimeToLive
}
result := metricMetadataAPI{
metricMetadataAPI: apiInstance,
clock: util.RealClock{},
getAllTagsCache: map[api.MetricKey]*TagSetList{},
freshness: config.Freshness,
timeToLive: config.TimeToLive,
backgroundQueue: requests,
}
if _, ok := apiInstance.(metadata.MetricUpdateAPI); ok {
return &metricUpdateAPI{result}
}
return &result
}
// addBackgroundGetAllTagsRequest adds a job to update the lag list for the given
// metric. Requires the caller hold the lock for the item in the cache.
func (c *metricMetadataAPI) addBackgroundGetAllTagsRequest(item *TagSetList, metricKey api.MetricKey) {
if item == nil {
log.Errorf("Asked to perform a background GetAllTags lookup for %s but missing entry", metricKey)
return
}
c.queueMutex.Lock()
defer c.queueMutex.Unlock()
if cap(c.backgroundQueue) <= len(c.backgroundQueue) {
log.Warningf("Unable to enqueue a background GetAllTags lookup for %s due to a full queue", metricKey)
return
}
if item.enqueued {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already enqueued", metricKey)
return
}
if item.inflight {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already in flight", metricKey)
return
}
log.Infof("Enqueuing a background GetAllTags lookup for %s", metricKey)
item.enqueued = true
c.backgroundQueue <- func(context metadata.Context) error {
log.Infof("Executing the background GetAllTags lookup for %s", metricKey)
defer log.Infof("Finished the background GetAllTags lookup for %s", metricKey)
item.Lock()
defer item.Unlock()
item.enqueued = false
defer context.Profiler.Record("CachedMetricMetadataAPI_BackgroundAction_GetAllTags")()
_, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
return err
}
}
// GetBackgroundAction is a blocking method that runs one queued cache update.
// It will block until an update is available.
func (c *metricMetadataAPI) GetBackgroundAction() func(metadata.Context) error {
return <-c.backgroundQueue
}
// GetAllMetrics waits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetAllMetrics(context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetAllMetrics(context)
}
// GetMetricsForTag wwaits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetMetricsForTag(tagKey, tagValue string, context metadata.Context) ([]api.MetricKey, error) |
// CheckHealthy checks if the underlying MetricAPI is healthy
func (c *metricMetadataAPI) CheckHealthy() error {
return c.metricMetadataAPI.CheckHealthy()
}
// fetchAndUpdateCachedTagSet updates the in-memory cache (asusming the update
// is newer than what is in the cache). Requires the caller hold the lock for the
// item in the cache.
func (c *metricMetadataAPI) fetchAndUpdateCachedTagSet(item *TagSetList, metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
if item == nil {
return nil, errors.New("missing cache list entry")
}
item.wg.Add(1)
item.fetchError = nil
item.inflight = true
item.Unlock()
startTime := c.clock.Now()
tagsets, err := c.metricMetadataAPI.GetAllTags(metricKey, context)
item.Lock()
if err != nil {
item.fetchError = err
item.wg.Done()
item.inflight = false
return nil, err
}
// Only update the cache if the update expires later than the current
// entry in the cache
newExpiry := startTime.Add(c.timeToLive)
if item.Expiry.Before(newExpiry) {
item.TagSets = tagsets
item.Expiry = newExpiry
item.Stale = startTime.Add(c.freshness)
} else {
log.Warningf("Asked to update the tag set for %s but new expiry is earlier than current (%s vs %s)",
metricKey, newExpiry.String(), item.Expiry.String())
}
item.wg.Done()
item.inflight = false
return tagsets, nil
}
// GetAllTags uses the cache to serve tag data for the given metric.
// If the cache entry is missing or out of date, it uses the results of a query
// to the underlying API to return to the caller. Even if the cache entry is
// up-to-date, this method may enqueue a background request to the underlying API
// to keep the cache fresh.
func (c *metricMetadataAPI) GetAllTags(metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags")()
// Get the cached result for this metric.
c.getAllTagsCacheMutex.RLock()
item, ok := c.getAllTagsCache[metricKey]
c.getAllTagsCacheMutex.RUnlock()
if !ok {
c.getAllTagsCacheMutex.Lock()
// Now that we have the mutex for getAllTagsCache, make sure another goroutine
// hasn't already updated the cache
item, ok = c.getAllTagsCache[metricKey]
if !ok {
item = &TagSetList{}
c.getAllTagsCache[metricKey] = item
}
c.getAllTagsCacheMutex.Unlock()
}
item.Lock()
if item.Expiry.IsZero() || item.Expiry.Before(c.clock.Now()) {
if item.inflight {
item.Unlock()
item.wg.Wait()
// Make sure we have the lock to re-read
item.Lock()
defer item.Unlock()
// If the request we were waiting on errored, we also errored
return item.TagSets, item.fetchError
}
defer item.Unlock()
// We're going to execute this fetch now
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags_Expired")()
tagsets, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
if err != nil {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags_Errored")()
return nil, err
}
return tagsets, nil
}
defer context.Profiler.Record("CachedMetricMetadataAPI_Hit")()
defer item.Unlock()
// Otherwise, we could be stale
if item.Stale.Before(c.clock.Now()) {
// Enqueue a background request
c.addBackgroundGetAllTagsRequest(item, metricKey)
}
// but return the cached result immediately.
return item.TagSets, nil
}
// CurrentLiveRequests returns the number of requests currently in the queue
func (c *metricMetadataAPI) CurrentLiveRequests() int {
return len(c.backgroundQueue)
}
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
func (c *metricMetadataAPI) MaximumLiveRequests() int {
return cap(c.backgroundQueue)
}
| {
return c.metricMetadataAPI.GetMetricsForTag(tagKey, tagValue, context)
} | identifier_body |
cached.go | // Copyright 2015 - 2016 Square Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cached
import (
"errors"
"sync"
"time"
"github.com/square/metrics/api"
"github.com/square/metrics/log"
"github.com/square/metrics/metric_metadata"
"github.com/square/metrics/util"
)
// BackgroundAPI is a MetadataAPI that also supports background cache updates.
type BackgroundAPI interface {
metadata.MetricAPI
// GetBackgroundAction returns a function to be called to execute a background cache update.
GetBackgroundAction() func(metadata.Context) error
// CurrentLiveRequests returns the number of requests currently in the queue
CurrentLiveRequests() int
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
MaximumLiveRequests() int
}
// metricMetadataAPI caches some of the metadata associated with the API to reduce latency.
// However, it does not reduce total QPS: whenever it reads from the cache, it performs an update
// in the background by launching a new goroutine.
type metricMetadataAPI struct {
metricMetadataAPI metadata.MetricAPI // The internal MetricAPI that performs the actual queries.
clock util.Clock // Here so we can mock out in tests
// Cached items
getAllTagsCache map[api.MetricKey]*TagSetList // The cache of metric -> tags
getAllTagsCacheMutex sync.RWMutex // Mutex for getAllTagsCache
// Cache Config
freshness time.Duration // How long until cache entries become stale
timeToLive time.Duration // How long until cache entries become expired
// Queue
backgroundQueue chan func(metadata.Context) error // A channel that holds background requests.
queueMutex sync.Mutex // Synchronizing mutex for the queue
}
// metricUpdateAPI is a wrapper for when the underlying metadata.MetricAPI is also a metadata.MetricUpdateAPI.
type metricUpdateAPI struct {
metricMetadataAPI
}
func (c *metricMetadataAPI) AddMetric(metric api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetric(metric, context)
}
func (c *metricMetadataAPI) | (metrics []api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetrics(metrics, context)
}
// Config stores data needed to instantiate a CachedMetricMetadataAPI.
type Config struct {
Freshness time.Duration
RequestLimit int
TimeToLive time.Duration
}
// TagSetList is an item in the cache.
type TagSetList struct {
TagSets []api.TagSet // The tagsets for this metric
Expiry time.Time // The time at which the cache entry expires
Stale time.Time // The time at which the cache entry becomes stale
sync.Mutex // Synchronizing mutex
inflight bool // Indicates a request is already in flight
enqueued bool // Indicates a request has been enqueued
wg sync.WaitGroup // Synchronizing wait group
fetchError error // Fetch error from the last attempt
}
// NewMetricMetadataAPI creates a cached API given configuration and an underlying API object.
func NewMetricMetadataAPI(apiInstance metadata.MetricAPI, config Config) BackgroundAPI {
requests := make(chan func(metadata.Context) error, config.RequestLimit)
if config.Freshness == 0 {
config.Freshness = config.TimeToLive
}
result := metricMetadataAPI{
metricMetadataAPI: apiInstance,
clock: util.RealClock{},
getAllTagsCache: map[api.MetricKey]*TagSetList{},
freshness: config.Freshness,
timeToLive: config.TimeToLive,
backgroundQueue: requests,
}
if _, ok := apiInstance.(metadata.MetricUpdateAPI); ok {
return &metricUpdateAPI{result}
}
return &result
}
// addBackgroundGetAllTagsRequest adds a job to update the lag list for the given
// metric. Requires the caller hold the lock for the item in the cache.
func (c *metricMetadataAPI) addBackgroundGetAllTagsRequest(item *TagSetList, metricKey api.MetricKey) {
if item == nil {
log.Errorf("Asked to perform a background GetAllTags lookup for %s but missing entry", metricKey)
return
}
c.queueMutex.Lock()
defer c.queueMutex.Unlock()
if cap(c.backgroundQueue) <= len(c.backgroundQueue) {
log.Warningf("Unable to enqueue a background GetAllTags lookup for %s due to a full queue", metricKey)
return
}
if item.enqueued {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already enqueued", metricKey)
return
}
if item.inflight {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already in flight", metricKey)
return
}
log.Infof("Enqueuing a background GetAllTags lookup for %s", metricKey)
item.enqueued = true
c.backgroundQueue <- func(context metadata.Context) error {
log.Infof("Executing the background GetAllTags lookup for %s", metricKey)
defer log.Infof("Finished the background GetAllTags lookup for %s", metricKey)
item.Lock()
defer item.Unlock()
item.enqueued = false
defer context.Profiler.Record("CachedMetricMetadataAPI_BackgroundAction_GetAllTags")()
_, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
return err
}
}
// GetBackgroundAction is a blocking method that runs one queued cache update.
// It will block until an update is available.
func (c *metricMetadataAPI) GetBackgroundAction() func(metadata.Context) error {
return <-c.backgroundQueue
}
// GetAllMetrics waits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetAllMetrics(context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetAllMetrics(context)
}
// GetMetricsForTag wwaits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetMetricsForTag(tagKey, tagValue string, context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetMetricsForTag(tagKey, tagValue, context)
}
// CheckHealthy checks if the underlying MetricAPI is healthy
func (c *metricMetadataAPI) CheckHealthy() error {
return c.metricMetadataAPI.CheckHealthy()
}
// fetchAndUpdateCachedTagSet updates the in-memory cache (asusming the update
// is newer than what is in the cache). Requires the caller hold the lock for the
// item in the cache.
func (c *metricMetadataAPI) fetchAndUpdateCachedTagSet(item *TagSetList, metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
if item == nil {
return nil, errors.New("missing cache list entry")
}
item.wg.Add(1)
item.fetchError = nil
item.inflight = true
item.Unlock()
startTime := c.clock.Now()
tagsets, err := c.metricMetadataAPI.GetAllTags(metricKey, context)
item.Lock()
if err != nil {
item.fetchError = err
item.wg.Done()
item.inflight = false
return nil, err
}
// Only update the cache if the update expires later than the current
// entry in the cache
newExpiry := startTime.Add(c.timeToLive)
if item.Expiry.Before(newExpiry) {
item.TagSets = tagsets
item.Expiry = newExpiry
item.Stale = startTime.Add(c.freshness)
} else {
log.Warningf("Asked to update the tag set for %s but new expiry is earlier than current (%s vs %s)",
metricKey, newExpiry.String(), item.Expiry.String())
}
item.wg.Done()
item.inflight = false
return tagsets, nil
}
// GetAllTags uses the cache to serve tag data for the given metric.
// If the cache entry is missing or out of date, it uses the results of a query
// to the underlying API to return to the caller. Even if the cache entry is
// up-to-date, this method may enqueue a background request to the underlying API
// to keep the cache fresh.
func (c *metricMetadataAPI) GetAllTags(metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags")()
// Get the cached result for this metric.
c.getAllTagsCacheMutex.RLock()
item, ok := c.getAllTagsCache[metricKey]
c.getAllTagsCacheMutex.RUnlock()
if !ok {
c.getAllTagsCacheMutex.Lock()
// Now that we have the mutex for getAllTagsCache, make sure another goroutine
// hasn't already updated the cache
item, ok = c.getAllTagsCache[metricKey]
if !ok {
item = &TagSetList{}
c.getAllTagsCache[metricKey] = item
}
c.getAllTagsCacheMutex.Unlock()
}
item.Lock()
if item.Expiry.IsZero() || item.Expiry.Before(c.clock.Now()) {
if item.inflight {
item.Unlock()
item.wg.Wait()
// Make sure we have the lock to re-read
item.Lock()
defer item.Unlock()
// If the request we were waiting on errored, we also errored
return item.TagSets, item.fetchError
}
defer item.Unlock()
// We're going to execute this fetch now
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags_Expired")()
tagsets, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
if err != nil {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags_Errored")()
return nil, err
}
return tagsets, nil
}
defer context.Profiler.Record("CachedMetricMetadataAPI_Hit")()
defer item.Unlock()
// Otherwise, we could be stale
if item.Stale.Before(c.clock.Now()) {
// Enqueue a background request
c.addBackgroundGetAllTagsRequest(item, metricKey)
}
// but return the cached result immediately.
return item.TagSets, nil
}
// CurrentLiveRequests returns the number of requests currently in the queue
func (c *metricMetadataAPI) CurrentLiveRequests() int {
return len(c.backgroundQueue)
}
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
func (c *metricMetadataAPI) MaximumLiveRequests() int {
return cap(c.backgroundQueue)
}
| AddMetrics | identifier_name |
util.rs | //! Useful functions and macros for writing figments.
//!
//! # `map!` macro
//!
//! The `map!` macro constructs a [`Map`](crate::value::Map) from key-value
//! pairs and is particularly useful during testing:
//!
//! ```rust
//! use figment::util::map;
//!
//! let map = map! {
//! "name" => "Bob",
//! "age" => "100"
//! };
//!
//! assert_eq!(map.get("name"), Some(&"Bob"));
//! assert_eq!(map.get("age"), Some(&"100"));
//!
//! let map = map! {
//! 100 => "one hundred",
//! 23 => "twenty-three"
//! };
//!
//! assert_eq!(map.get(&100), Some(&"one hundred"));
//! assert_eq!(map.get(&23), Some(&"twenty-three"));
//!
//! ```
use std::fmt;
use std::path::{Path, PathBuf, Component};
use serde::de::{self, Unexpected, Deserializer};
/// A helper function to determine the relative path to `path` from `base`.
///
/// Returns `None` if there is no relative path from `base` to `path`, that is,
/// `base` and `path` do not share a common ancestor. `path` and `base` must be
/// either both absolute or both relative; returns `None` if one is relative and
/// the other absolute.
///
/// ```
/// use std::path::Path;
/// use figment::util::diff_paths;
///
/// // Paths must be both relative or both absolute.
/// assert_eq!(diff_paths("/a/b/c", "b/c"), None);
/// assert_eq!(diff_paths("a/b/c", "/b/c"), None);
///
/// // The root/relative root is always a common ancestor.
/// assert_eq!(diff_paths("/a/b/c", "/b/c"), Some("../../a/b/c".into()));
/// assert_eq!(diff_paths("c/a", "b/c/a"), Some("../../../c/a".into()));
///
/// let bar = "/foo/bar";
/// let baz = "/foo/bar/baz";
/// let quux = "/foo/bar/quux";
///
/// assert_eq!(diff_paths(bar, baz), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// assert_eq!(diff_paths(quux, baz), Some("../quux".into()));
/// assert_eq!(diff_paths(baz, quux), Some("../baz".into()));
/// assert_eq!(diff_paths(bar, quux), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// ```
// Copyright 2012-2015 The Rust Project Developers.
// Copyright 2017 The Rust Project Developers.
// Adapted from `pathdiff`, which itself adapted from rustc's path_relative_from.
pub fn diff_paths<P, B>(path: P, base: B) -> Option<PathBuf>
where P: AsRef<Path>, B: AsRef<Path>
{
let (path, base) = (path.as_ref(), base.as_ref());
if path.has_root() != base.has_root() {
return None;
}
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None, | }
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
/// A helper to deserialize `0/false` as `false` and `1/true` as `true`.
///
/// Serde's default deserializer for `bool` only parses the strings `"true"` and
/// `"false"` as the booleans `true` and `false`, respectively. By contract,
/// this function _case-insensitively_ parses both the strings `"true"/"false"`
/// and the integers `1/0` as the booleans `true/false`, respectively.
///
/// # Example
///
/// ```rust
/// use figment::Figment;
///
/// #[derive(serde::Deserialize)]
/// struct Config {
/// #[serde(deserialize_with = "figment::util::bool_from_str_or_int")]
/// cli_colors: bool,
/// }
///
/// let c0: Config = Figment::from(("cli_colors", "true")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "TRUE")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 1)).extract().unwrap();
/// assert_eq!(c0.cli_colors, true);
/// assert_eq!(c1.cli_colors, true);
/// assert_eq!(c2.cli_colors, true);
///
/// let c0: Config = Figment::from(("cli_colors", "false")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "fAlSe")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 0)).extract().unwrap();
/// assert_eq!(c0.cli_colors, false);
/// assert_eq!(c1.cli_colors, false);
/// assert_eq!(c2.cli_colors, false);
/// ```
pub fn bool_from_str_or_int<'de, D: Deserializer<'de>>(de: D) -> Result<bool, D::Error> {
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = bool;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a boolean")
}
fn visit_str<E: de::Error>(self, val: &str) -> Result<bool, E> {
match val {
v if uncased::eq(v, "true") => Ok(true),
v if uncased::eq(v, "false") => Ok(false),
s => Err(E::invalid_value(Unexpected::Str(s), &"true or false"))
}
}
fn visit_u64<E: de::Error>(self, n: u64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Unsigned(n), &"0 or 1"))
}
}
fn visit_i64<E: de::Error>(self, n: i64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Signed(n), &"0 or 1"))
}
}
fn visit_bool<E: de::Error>(self, b: bool) -> Result<bool, E> {
Ok(b)
}
}
de.deserialize_any(Visitor)
}
/// A helper to serialize and deserialize a map as a vector of `(key, value)`
/// pairs.
///
/// ```
/// use figment::{Figment, util::map};
/// use serde::{Serialize, Deserialize};
///
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct Config {
/// #[serde(with = "figment::util::vec_tuple_map")]
/// pairs: Vec<(String, usize)>
/// }
///
/// let map = map!["key" => 1, "value" => 100, "name" => 20];
/// let c: Config = Figment::from(("pairs", map)).extract().unwrap();
/// assert_eq!(c.pairs.len(), 3);
///
/// let mut pairs = c.pairs;
/// pairs.sort_by_key(|(_, v)| *v);
///
/// assert_eq!(pairs[0], ("key".into(), 1));
/// assert_eq!(pairs[1], ("name".into(), 20));
/// assert_eq!(pairs[2], ("value".into(), 100));
/// ```
pub mod vec_tuple_map {
use std::fmt;
use serde::{de, Deserialize, Serialize, Deserializer, Serializer};
/// The serializer half.
pub fn serialize<S, K, V>(vec: &[(K, V)], se: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Serialize, V: Serialize
{
se.collect_map(vec.iter().map(|(ref k, ref v)| (k, v)))
}
/// The deserializer half.
pub fn deserialize<'de, K, V, D>(de: D) -> Result<Vec<(K, V)>, D::Error>
where D: Deserializer<'de>, K: Deserialize<'de>, V: Deserialize<'de>
{
struct Visitor<K, V>(std::marker::PhantomData<Vec<(K, V)>>);
impl<'de, K, V> de::Visitor<'de> for Visitor<K, V>
where K: Deserialize<'de>, V: Deserialize<'de>,
{
type Value = Vec<(K, V)>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Vec<(K, V)>, A::Error>
where A: de::MapAccess<'de>
{
let mut vec = Vec::with_capacity(map.size_hint().unwrap_or(0));
while let Some((k, v)) = map.next_entry()? {
vec.push((k, v));
}
Ok(vec)
}
}
de.deserialize_map(Visitor(std::marker::PhantomData))
}
}
use crate::value::{Value, Dict};
/// Given a key path `key` of the form `a.b.c`, creates nested dictionaries for
/// for every path component delimited by `.` in the path string (3 in `a.b.c`),
/// each a parent of the next, and the leaf mapping to `value` (`a` -> `b` ->
/// `c` -> `value`).
///
/// If `key` is empty, simply returns `value`. Otherwise, `Value` will be a
/// dictionary with the nested mappings.
///
/// # Example
///
/// ```rust
/// use figment::{util::nest, value::Value};
///
/// let leaf = Value::from("I'm a leaf!");
///
/// let dict = nest("tea", leaf.clone());
/// assert_eq!(dict.find_ref("tea").unwrap(), &leaf);
///
/// let dict = nest("tea.leaf", leaf.clone());
/// let tea = dict.find_ref("tea").unwrap();
/// let found_leaf = tea.find_ref("leaf").unwrap();
/// assert_eq!(found_leaf, &leaf);
/// assert_eq!(dict.find_ref("tea.leaf").unwrap(), &leaf);
///
/// let just_leaf = nest("", leaf.clone());
/// assert_eq!(just_leaf, leaf);
/// ```
pub fn nest(key: &str, value: Value) -> Value {
fn value_from(mut keys: std::str::Split<'_, char>, value: Value) -> Value {
match keys.next() {
Some(k) if !k.is_empty() => {
let mut dict = Dict::new();
dict.insert(k.into(), value_from(keys, value));
dict.into()
}
Some(_) | None => value
}
}
value_from(key.split('.'), value)
}
#[doc(hidden)]
#[macro_export]
/// This is a macro.
macro_rules! map {
($($key:expr => $value:expr),* $(,)?) => ({
let mut map = $crate::value::Map::new();
$(map.insert($key, $value);)*
map
});
}
pub use map;
#[doc(hidden)]
#[macro_export]
macro_rules! make_cloneable {
($Trait:path: $Cloneable:ident) => {
trait $Cloneable {
fn box_clone(&self) -> Box<dyn $Trait>;
}
impl std::clone::Clone for Box<dyn $Trait> {
fn clone(&self) -> Box<dyn $Trait> {
(&**self).box_clone()
}
}
impl std::fmt::Debug for Box<dyn $Trait> {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
Ok(())
}
}
impl<T: $Trait + Clone> $Cloneable for T {
fn box_clone(&self) -> Box<dyn $Trait> {
Box::new(self.clone())
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! cloneable_fn_trait {
($Name:ident: $($rest:tt)*) => {
trait $Name: $($rest)* + Cloneable + 'static { }
impl<F: Clone + 'static> $Name for F where F: $($rest)* { }
$crate::make_cloneable!($Name: Cloneable);
}
}
pub(crate) use cloneable_fn_trait; | (Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir); | random_line_split |
util.rs | //! Useful functions and macros for writing figments.
//!
//! # `map!` macro
//!
//! The `map!` macro constructs a [`Map`](crate::value::Map) from key-value
//! pairs and is particularly useful during testing:
//!
//! ```rust
//! use figment::util::map;
//!
//! let map = map! {
//! "name" => "Bob",
//! "age" => "100"
//! };
//!
//! assert_eq!(map.get("name"), Some(&"Bob"));
//! assert_eq!(map.get("age"), Some(&"100"));
//!
//! let map = map! {
//! 100 => "one hundred",
//! 23 => "twenty-three"
//! };
//!
//! assert_eq!(map.get(&100), Some(&"one hundred"));
//! assert_eq!(map.get(&23), Some(&"twenty-three"));
//!
//! ```
use std::fmt;
use std::path::{Path, PathBuf, Component};
use serde::de::{self, Unexpected, Deserializer};
/// A helper function to determine the relative path to `path` from `base`.
///
/// Returns `None` if there is no relative path from `base` to `path`, that is,
/// `base` and `path` do not share a common ancestor. `path` and `base` must be
/// either both absolute or both relative; returns `None` if one is relative and
/// the other absolute.
///
/// ```
/// use std::path::Path;
/// use figment::util::diff_paths;
///
/// // Paths must be both relative or both absolute.
/// assert_eq!(diff_paths("/a/b/c", "b/c"), None);
/// assert_eq!(diff_paths("a/b/c", "/b/c"), None);
///
/// // The root/relative root is always a common ancestor.
/// assert_eq!(diff_paths("/a/b/c", "/b/c"), Some("../../a/b/c".into()));
/// assert_eq!(diff_paths("c/a", "b/c/a"), Some("../../../c/a".into()));
///
/// let bar = "/foo/bar";
/// let baz = "/foo/bar/baz";
/// let quux = "/foo/bar/quux";
///
/// assert_eq!(diff_paths(bar, baz), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// assert_eq!(diff_paths(quux, baz), Some("../quux".into()));
/// assert_eq!(diff_paths(baz, quux), Some("../baz".into()));
/// assert_eq!(diff_paths(bar, quux), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// ```
// Copyright 2012-2015 The Rust Project Developers.
// Copyright 2017 The Rust Project Developers.
// Adapted from `pathdiff`, which itself adapted from rustc's path_relative_from.
pub fn diff_paths<P, B>(path: P, base: B) -> Option<PathBuf>
where P: AsRef<Path>, B: AsRef<Path>
{
let (path, base) = (path.as_ref(), base.as_ref());
if path.has_root() != base.has_root() {
return None;
}
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None,
(Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir);
}
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
/// A helper to deserialize `0/false` as `false` and `1/true` as `true`.
///
/// Serde's default deserializer for `bool` only parses the strings `"true"` and
/// `"false"` as the booleans `true` and `false`, respectively. By contract,
/// this function _case-insensitively_ parses both the strings `"true"/"false"`
/// and the integers `1/0` as the booleans `true/false`, respectively.
///
/// # Example
///
/// ```rust
/// use figment::Figment;
///
/// #[derive(serde::Deserialize)]
/// struct Config {
/// #[serde(deserialize_with = "figment::util::bool_from_str_or_int")]
/// cli_colors: bool,
/// }
///
/// let c0: Config = Figment::from(("cli_colors", "true")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "TRUE")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 1)).extract().unwrap();
/// assert_eq!(c0.cli_colors, true);
/// assert_eq!(c1.cli_colors, true);
/// assert_eq!(c2.cli_colors, true);
///
/// let c0: Config = Figment::from(("cli_colors", "false")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "fAlSe")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 0)).extract().unwrap();
/// assert_eq!(c0.cli_colors, false);
/// assert_eq!(c1.cli_colors, false);
/// assert_eq!(c2.cli_colors, false);
/// ```
pub fn bool_from_str_or_int<'de, D: Deserializer<'de>>(de: D) -> Result<bool, D::Error> {
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = bool;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a boolean")
}
fn visit_str<E: de::Error>(self, val: &str) -> Result<bool, E> |
fn visit_u64<E: de::Error>(self, n: u64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Unsigned(n), &"0 or 1"))
}
}
fn visit_i64<E: de::Error>(self, n: i64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Signed(n), &"0 or 1"))
}
}
fn visit_bool<E: de::Error>(self, b: bool) -> Result<bool, E> {
Ok(b)
}
}
de.deserialize_any(Visitor)
}
/// A helper to serialize and deserialize a map as a vector of `(key, value)`
/// pairs.
///
/// ```
/// use figment::{Figment, util::map};
/// use serde::{Serialize, Deserialize};
///
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct Config {
/// #[serde(with = "figment::util::vec_tuple_map")]
/// pairs: Vec<(String, usize)>
/// }
///
/// let map = map!["key" => 1, "value" => 100, "name" => 20];
/// let c: Config = Figment::from(("pairs", map)).extract().unwrap();
/// assert_eq!(c.pairs.len(), 3);
///
/// let mut pairs = c.pairs;
/// pairs.sort_by_key(|(_, v)| *v);
///
/// assert_eq!(pairs[0], ("key".into(), 1));
/// assert_eq!(pairs[1], ("name".into(), 20));
/// assert_eq!(pairs[2], ("value".into(), 100));
/// ```
pub mod vec_tuple_map {
use std::fmt;
use serde::{de, Deserialize, Serialize, Deserializer, Serializer};
/// The serializer half.
pub fn serialize<S, K, V>(vec: &[(K, V)], se: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Serialize, V: Serialize
{
se.collect_map(vec.iter().map(|(ref k, ref v)| (k, v)))
}
/// The deserializer half.
pub fn deserialize<'de, K, V, D>(de: D) -> Result<Vec<(K, V)>, D::Error>
where D: Deserializer<'de>, K: Deserialize<'de>, V: Deserialize<'de>
{
struct Visitor<K, V>(std::marker::PhantomData<Vec<(K, V)>>);
impl<'de, K, V> de::Visitor<'de> for Visitor<K, V>
where K: Deserialize<'de>, V: Deserialize<'de>,
{
type Value = Vec<(K, V)>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Vec<(K, V)>, A::Error>
where A: de::MapAccess<'de>
{
let mut vec = Vec::with_capacity(map.size_hint().unwrap_or(0));
while let Some((k, v)) = map.next_entry()? {
vec.push((k, v));
}
Ok(vec)
}
}
de.deserialize_map(Visitor(std::marker::PhantomData))
}
}
use crate::value::{Value, Dict};
/// Given a key path `key` of the form `a.b.c`, creates nested dictionaries for
/// for every path component delimited by `.` in the path string (3 in `a.b.c`),
/// each a parent of the next, and the leaf mapping to `value` (`a` -> `b` ->
/// `c` -> `value`).
///
/// If `key` is empty, simply returns `value`. Otherwise, `Value` will be a
/// dictionary with the nested mappings.
///
/// # Example
///
/// ```rust
/// use figment::{util::nest, value::Value};
///
/// let leaf = Value::from("I'm a leaf!");
///
/// let dict = nest("tea", leaf.clone());
/// assert_eq!(dict.find_ref("tea").unwrap(), &leaf);
///
/// let dict = nest("tea.leaf", leaf.clone());
/// let tea = dict.find_ref("tea").unwrap();
/// let found_leaf = tea.find_ref("leaf").unwrap();
/// assert_eq!(found_leaf, &leaf);
/// assert_eq!(dict.find_ref("tea.leaf").unwrap(), &leaf);
///
/// let just_leaf = nest("", leaf.clone());
/// assert_eq!(just_leaf, leaf);
/// ```
pub fn nest(key: &str, value: Value) -> Value {
fn value_from(mut keys: std::str::Split<'_, char>, value: Value) -> Value {
match keys.next() {
Some(k) if !k.is_empty() => {
let mut dict = Dict::new();
dict.insert(k.into(), value_from(keys, value));
dict.into()
}
Some(_) | None => value
}
}
value_from(key.split('.'), value)
}
#[doc(hidden)]
#[macro_export]
/// This is a macro.
macro_rules! map {
($($key:expr => $value:expr),* $(,)?) => ({
let mut map = $crate::value::Map::new();
$(map.insert($key, $value);)*
map
});
}
pub use map;
#[doc(hidden)]
#[macro_export]
macro_rules! make_cloneable {
($Trait:path: $Cloneable:ident) => {
trait $Cloneable {
fn box_clone(&self) -> Box<dyn $Trait>;
}
impl std::clone::Clone for Box<dyn $Trait> {
fn clone(&self) -> Box<dyn $Trait> {
(&**self).box_clone()
}
}
impl std::fmt::Debug for Box<dyn $Trait> {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
Ok(())
}
}
impl<T: $Trait + Clone> $Cloneable for T {
fn box_clone(&self) -> Box<dyn $Trait> {
Box::new(self.clone())
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! cloneable_fn_trait {
($Name:ident: $($rest:tt)*) => {
trait $Name: $($rest)* + Cloneable + 'static { }
impl<F: Clone + 'static> $Name for F where F: $($rest)* { }
$crate::make_cloneable!($Name: Cloneable);
}
}
pub(crate) use cloneable_fn_trait;
| {
match val {
v if uncased::eq(v, "true") => Ok(true),
v if uncased::eq(v, "false") => Ok(false),
s => Err(E::invalid_value(Unexpected::Str(s), &"true or false"))
}
} | identifier_body |
util.rs | //! Useful functions and macros for writing figments.
//!
//! # `map!` macro
//!
//! The `map!` macro constructs a [`Map`](crate::value::Map) from key-value
//! pairs and is particularly useful during testing:
//!
//! ```rust
//! use figment::util::map;
//!
//! let map = map! {
//! "name" => "Bob",
//! "age" => "100"
//! };
//!
//! assert_eq!(map.get("name"), Some(&"Bob"));
//! assert_eq!(map.get("age"), Some(&"100"));
//!
//! let map = map! {
//! 100 => "one hundred",
//! 23 => "twenty-three"
//! };
//!
//! assert_eq!(map.get(&100), Some(&"one hundred"));
//! assert_eq!(map.get(&23), Some(&"twenty-three"));
//!
//! ```
use std::fmt;
use std::path::{Path, PathBuf, Component};
use serde::de::{self, Unexpected, Deserializer};
/// A helper function to determine the relative path to `path` from `base`.
///
/// Returns `None` if there is no relative path from `base` to `path`, that is,
/// `base` and `path` do not share a common ancestor. `path` and `base` must be
/// either both absolute or both relative; returns `None` if one is relative and
/// the other absolute.
///
/// ```
/// use std::path::Path;
/// use figment::util::diff_paths;
///
/// // Paths must be both relative or both absolute.
/// assert_eq!(diff_paths("/a/b/c", "b/c"), None);
/// assert_eq!(diff_paths("a/b/c", "/b/c"), None);
///
/// // The root/relative root is always a common ancestor.
/// assert_eq!(diff_paths("/a/b/c", "/b/c"), Some("../../a/b/c".into()));
/// assert_eq!(diff_paths("c/a", "b/c/a"), Some("../../../c/a".into()));
///
/// let bar = "/foo/bar";
/// let baz = "/foo/bar/baz";
/// let quux = "/foo/bar/quux";
///
/// assert_eq!(diff_paths(bar, baz), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// assert_eq!(diff_paths(quux, baz), Some("../quux".into()));
/// assert_eq!(diff_paths(baz, quux), Some("../baz".into()));
/// assert_eq!(diff_paths(bar, quux), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// ```
// Copyright 2012-2015 The Rust Project Developers.
// Copyright 2017 The Rust Project Developers.
// Adapted from `pathdiff`, which itself adapted from rustc's path_relative_from.
pub fn diff_paths<P, B>(path: P, base: B) -> Option<PathBuf>
where P: AsRef<Path>, B: AsRef<Path>
{
let (path, base) = (path.as_ref(), base.as_ref());
if path.has_root() != base.has_root() {
return None;
}
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None,
(Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir);
}
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
/// A helper to deserialize `0/false` as `false` and `1/true` as `true`.
///
/// Serde's default deserializer for `bool` only parses the strings `"true"` and
/// `"false"` as the booleans `true` and `false`, respectively. By contract,
/// this function _case-insensitively_ parses both the strings `"true"/"false"`
/// and the integers `1/0` as the booleans `true/false`, respectively.
///
/// # Example
///
/// ```rust
/// use figment::Figment;
///
/// #[derive(serde::Deserialize)]
/// struct Config {
/// #[serde(deserialize_with = "figment::util::bool_from_str_or_int")]
/// cli_colors: bool,
/// }
///
/// let c0: Config = Figment::from(("cli_colors", "true")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "TRUE")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 1)).extract().unwrap();
/// assert_eq!(c0.cli_colors, true);
/// assert_eq!(c1.cli_colors, true);
/// assert_eq!(c2.cli_colors, true);
///
/// let c0: Config = Figment::from(("cli_colors", "false")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "fAlSe")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 0)).extract().unwrap();
/// assert_eq!(c0.cli_colors, false);
/// assert_eq!(c1.cli_colors, false);
/// assert_eq!(c2.cli_colors, false);
/// ```
pub fn | <'de, D: Deserializer<'de>>(de: D) -> Result<bool, D::Error> {
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = bool;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a boolean")
}
fn visit_str<E: de::Error>(self, val: &str) -> Result<bool, E> {
match val {
v if uncased::eq(v, "true") => Ok(true),
v if uncased::eq(v, "false") => Ok(false),
s => Err(E::invalid_value(Unexpected::Str(s), &"true or false"))
}
}
fn visit_u64<E: de::Error>(self, n: u64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Unsigned(n), &"0 or 1"))
}
}
fn visit_i64<E: de::Error>(self, n: i64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Signed(n), &"0 or 1"))
}
}
fn visit_bool<E: de::Error>(self, b: bool) -> Result<bool, E> {
Ok(b)
}
}
de.deserialize_any(Visitor)
}
/// A helper to serialize and deserialize a map as a vector of `(key, value)`
/// pairs.
///
/// ```
/// use figment::{Figment, util::map};
/// use serde::{Serialize, Deserialize};
///
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct Config {
/// #[serde(with = "figment::util::vec_tuple_map")]
/// pairs: Vec<(String, usize)>
/// }
///
/// let map = map!["key" => 1, "value" => 100, "name" => 20];
/// let c: Config = Figment::from(("pairs", map)).extract().unwrap();
/// assert_eq!(c.pairs.len(), 3);
///
/// let mut pairs = c.pairs;
/// pairs.sort_by_key(|(_, v)| *v);
///
/// assert_eq!(pairs[0], ("key".into(), 1));
/// assert_eq!(pairs[1], ("name".into(), 20));
/// assert_eq!(pairs[2], ("value".into(), 100));
/// ```
pub mod vec_tuple_map {
use std::fmt;
use serde::{de, Deserialize, Serialize, Deserializer, Serializer};
/// The serializer half.
pub fn serialize<S, K, V>(vec: &[(K, V)], se: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Serialize, V: Serialize
{
se.collect_map(vec.iter().map(|(ref k, ref v)| (k, v)))
}
/// The deserializer half.
pub fn deserialize<'de, K, V, D>(de: D) -> Result<Vec<(K, V)>, D::Error>
where D: Deserializer<'de>, K: Deserialize<'de>, V: Deserialize<'de>
{
struct Visitor<K, V>(std::marker::PhantomData<Vec<(K, V)>>);
impl<'de, K, V> de::Visitor<'de> for Visitor<K, V>
where K: Deserialize<'de>, V: Deserialize<'de>,
{
type Value = Vec<(K, V)>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Vec<(K, V)>, A::Error>
where A: de::MapAccess<'de>
{
let mut vec = Vec::with_capacity(map.size_hint().unwrap_or(0));
while let Some((k, v)) = map.next_entry()? {
vec.push((k, v));
}
Ok(vec)
}
}
de.deserialize_map(Visitor(std::marker::PhantomData))
}
}
use crate::value::{Value, Dict};
/// Given a key path `key` of the form `a.b.c`, creates nested dictionaries for
/// for every path component delimited by `.` in the path string (3 in `a.b.c`),
/// each a parent of the next, and the leaf mapping to `value` (`a` -> `b` ->
/// `c` -> `value`).
///
/// If `key` is empty, simply returns `value`. Otherwise, `Value` will be a
/// dictionary with the nested mappings.
///
/// # Example
///
/// ```rust
/// use figment::{util::nest, value::Value};
///
/// let leaf = Value::from("I'm a leaf!");
///
/// let dict = nest("tea", leaf.clone());
/// assert_eq!(dict.find_ref("tea").unwrap(), &leaf);
///
/// let dict = nest("tea.leaf", leaf.clone());
/// let tea = dict.find_ref("tea").unwrap();
/// let found_leaf = tea.find_ref("leaf").unwrap();
/// assert_eq!(found_leaf, &leaf);
/// assert_eq!(dict.find_ref("tea.leaf").unwrap(), &leaf);
///
/// let just_leaf = nest("", leaf.clone());
/// assert_eq!(just_leaf, leaf);
/// ```
pub fn nest(key: &str, value: Value) -> Value {
fn value_from(mut keys: std::str::Split<'_, char>, value: Value) -> Value {
match keys.next() {
Some(k) if !k.is_empty() => {
let mut dict = Dict::new();
dict.insert(k.into(), value_from(keys, value));
dict.into()
}
Some(_) | None => value
}
}
value_from(key.split('.'), value)
}
#[doc(hidden)]
#[macro_export]
/// This is a macro.
macro_rules! map {
($($key:expr => $value:expr),* $(,)?) => ({
let mut map = $crate::value::Map::new();
$(map.insert($key, $value);)*
map
});
}
pub use map;
#[doc(hidden)]
#[macro_export]
macro_rules! make_cloneable {
($Trait:path: $Cloneable:ident) => {
trait $Cloneable {
fn box_clone(&self) -> Box<dyn $Trait>;
}
impl std::clone::Clone for Box<dyn $Trait> {
fn clone(&self) -> Box<dyn $Trait> {
(&**self).box_clone()
}
}
impl std::fmt::Debug for Box<dyn $Trait> {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
Ok(())
}
}
impl<T: $Trait + Clone> $Cloneable for T {
fn box_clone(&self) -> Box<dyn $Trait> {
Box::new(self.clone())
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! cloneable_fn_trait {
($Name:ident: $($rest:tt)*) => {
trait $Name: $($rest)* + Cloneable + 'static { }
impl<F: Clone + 'static> $Name for F where F: $($rest)* { }
$crate::make_cloneable!($Name: Cloneable);
}
}
pub(crate) use cloneable_fn_trait;
| bool_from_str_or_int | identifier_name |
main.rs | #![feature(collections)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate parser_combinators;
// TODO:
// - Benchmarks
// - Cache of last piece
// - merge consecutive insert, delete
// - snapshots
// - Allow String, &str, &[u8], and Vec<u8> as parameter to insert, append
/// A append only buffer
/// (This is unnecessary inefficient because we copy, we could
/// just allocate new separate buffers)
#[derive(Debug)]
pub struct AppendOnlyBuffer {
buf: Vec<u8>,
}
#[derive(Debug,Copy,Clone,PartialEq)]
pub struct Span {
off1: u32,
off2: u32,
}
impl Span {
pub fn new(off1: u32, off2: u32) -> Span {
assert!(off2 >= off1);
Span { off1: off1, off2: off2 }
}
/// The empty span
pub fn empty() -> Span {
Span::new(0,0)
}
pub fn len(&self) -> u32 {
self.off2 - self.off1
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Split self such that the left piece has n characters.
pub fn split(&self, n: u32) -> Option<(Span, Span)> {
if n == 0 || n == self.len() {
None
} else {
Some((Span::new(self.off1, self.off1+n), Span::new(self.off1+n, self.off2)))
}
}
}
impl AppendOnlyBuffer {
/// Constructs a new, empty AppendOnlyBuffer.
pub fn new() -> AppendOnlyBuffer {
AppendOnlyBuffer {
buf: Vec::with_capacity(4096)
}
}
/// Append a slice of bytes.
pub fn append(&mut self, bytes: &[u8]) -> Span {
let off1 = self.buf.len() as u32;
self.buf.push_all(bytes);
Span::new(off1, self.buf.len() as u32)
}
pub fn get(&self, s: Span) -> &[u8] {
&self.buf[s.off1 as usize .. s.off2 as usize]
}
pub fn get_byte(&self, p: u32) -> u8 {
self.buf[p as usize]
}
}
/// We represent pieces by their index in the vector that we use to allocate
/// them. That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len());
}
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn link(&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::FromUtf8Error> {
String::from_utf8(self.to_vec())
}
}
#[test]
fn test_pieces() {
let t = Text::new();
assert_eq!(t.pieces().collect::<Vec<_>>(), vec![]);
}
#[cfg(test)]
mod tests {
mod span {
use super::super::*;
#[test]
fn basics() {
let s = Span::new(1, 1);
assert_eq!(s.len(), 0);
assert!(s.is_empty());
let s2 = Span::new(3, 7);
assert!(s2.len() == 4);
}
#[test]
fn split() {
let s = Span::new(3, 7);
assert_eq!(s.split(0), None);
assert_eq!(s.split(4), None);
assert_eq!(s.split(3), Some((Span { off1: 3, off2: 6 }, Span { off1: 6, off2: 7 })));
}
}
mod append_only_buffer {
use super::super::*;
#[test]
fn basics() {
let mut b = AppendOnlyBuffer::new();
let bytes = "Hello World".as_bytes();
let sp = b.append(bytes);
assert_eq!(b.get(sp), bytes);
let bytes2 = "Just testing".as_bytes();
let sp2 = b.append(bytes2);
assert_eq!(b.get(sp), bytes);
assert_eq!(b.get(sp2), bytes2);
}
}
mod text {
use super::super::*;
#[test]
fn insert_beginning() {
let mut t = Text::new();
assert_eq!(t.len(), 0);
t.insert(0, "World".as_bytes());
assert_eq!(t.len(), 5);
assert_eq!(t.to_utf8_string().unwrap(), "World");
t.insert(0, "Hello ".as_bytes());
assert_eq!(t.len(), 11);
assert_eq!(t.to_utf8_string().unwrap(), "Hello World");
t.insert(0, "...".as_bytes());
assert_eq!(t.len(), 14);
assert_eq!(t.to_utf8_string().unwrap(), "...Hello World");
}
#[test]
fn append() {
let mut t = Text::new();
t.insert(0, "Hello".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello");
t.insert(5, " Bene".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello Bene");
}
#[test]
fn insert_middle() {
let mut t = Text::new();
t.insert(0, "1234".as_bytes());
t.insert(2, "x".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12x34");
t.insert(3, "yz".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12xyz34");
}
#[test]
fn delete_all1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_all2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_part1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn delete_part2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn bytes1() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), bytes);
}
#[test]
fn bytes2() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
let bytes2 = vec![3, 4];
t.insert(0, &bytes2);
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
}
}
/// Set of possible commands
#[derive(Debug, Clone)]
pub enum Command {
Insert(String),
}
use parser_combinators::primitives::ParseError;
use parser_combinators::{spaces, between, many, char, satisfy, Parser, ParserExt};
impl Command {
pub fn parse(s: &str) -> Result<(Command, &str), ParseError> |
}
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
info!("starting up");
//let mut text = Text::new();
let mut args = std::env::args();
args.next().unwrap();
let s = args.next().unwrap();
let cmd = Command::parse(&s);
println!("{:?}", cmd);
}
| {
let literal = between(char('/'), char('/'), many(satisfy(|c| c != '/')).map(Command::Insert));
let spaces = spaces();
spaces.with(char('i').with(literal)).parse(s)
} | identifier_body |
main.rs | #![feature(collections)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate parser_combinators;
// TODO:
// - Benchmarks
// - Cache of last piece
// - merge consecutive insert, delete
// - snapshots
// - Allow String, &str, &[u8], and Vec<u8> as parameter to insert, append
/// A append only buffer
/// (This is unnecessary inefficient because we copy, we could
/// just allocate new separate buffers)
#[derive(Debug)]
pub struct AppendOnlyBuffer {
buf: Vec<u8>,
}
#[derive(Debug,Copy,Clone,PartialEq)]
pub struct Span {
off1: u32,
off2: u32,
}
impl Span {
pub fn new(off1: u32, off2: u32) -> Span {
assert!(off2 >= off1);
Span { off1: off1, off2: off2 }
}
/// The empty span
pub fn empty() -> Span {
Span::new(0,0)
}
pub fn len(&self) -> u32 {
self.off2 - self.off1
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Split self such that the left piece has n characters.
pub fn split(&self, n: u32) -> Option<(Span, Span)> {
if n == 0 || n == self.len() {
None
} else |
}
}
impl AppendOnlyBuffer {
/// Constructs a new, empty AppendOnlyBuffer.
pub fn new() -> AppendOnlyBuffer {
AppendOnlyBuffer {
buf: Vec::with_capacity(4096)
}
}
/// Append a slice of bytes.
pub fn append(&mut self, bytes: &[u8]) -> Span {
let off1 = self.buf.len() as u32;
self.buf.push_all(bytes);
Span::new(off1, self.buf.len() as u32)
}
pub fn get(&self, s: Span) -> &[u8] {
&self.buf[s.off1 as usize .. s.off2 as usize]
}
pub fn get_byte(&self, p: u32) -> u8 {
self.buf[p as usize]
}
}
/// We represent pieces by their index in the vector that we use to allocate
/// them. That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len());
}
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn link(&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::FromUtf8Error> {
String::from_utf8(self.to_vec())
}
}
#[test]
fn test_pieces() {
let t = Text::new();
assert_eq!(t.pieces().collect::<Vec<_>>(), vec![]);
}
#[cfg(test)]
mod tests {
mod span {
use super::super::*;
#[test]
fn basics() {
let s = Span::new(1, 1);
assert_eq!(s.len(), 0);
assert!(s.is_empty());
let s2 = Span::new(3, 7);
assert!(s2.len() == 4);
}
#[test]
fn split() {
let s = Span::new(3, 7);
assert_eq!(s.split(0), None);
assert_eq!(s.split(4), None);
assert_eq!(s.split(3), Some((Span { off1: 3, off2: 6 }, Span { off1: 6, off2: 7 })));
}
}
mod append_only_buffer {
use super::super::*;
#[test]
fn basics() {
let mut b = AppendOnlyBuffer::new();
let bytes = "Hello World".as_bytes();
let sp = b.append(bytes);
assert_eq!(b.get(sp), bytes);
let bytes2 = "Just testing".as_bytes();
let sp2 = b.append(bytes2);
assert_eq!(b.get(sp), bytes);
assert_eq!(b.get(sp2), bytes2);
}
}
mod text {
use super::super::*;
#[test]
fn insert_beginning() {
let mut t = Text::new();
assert_eq!(t.len(), 0);
t.insert(0, "World".as_bytes());
assert_eq!(t.len(), 5);
assert_eq!(t.to_utf8_string().unwrap(), "World");
t.insert(0, "Hello ".as_bytes());
assert_eq!(t.len(), 11);
assert_eq!(t.to_utf8_string().unwrap(), "Hello World");
t.insert(0, "...".as_bytes());
assert_eq!(t.len(), 14);
assert_eq!(t.to_utf8_string().unwrap(), "...Hello World");
}
#[test]
fn append() {
let mut t = Text::new();
t.insert(0, "Hello".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello");
t.insert(5, " Bene".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello Bene");
}
#[test]
fn insert_middle() {
let mut t = Text::new();
t.insert(0, "1234".as_bytes());
t.insert(2, "x".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12x34");
t.insert(3, "yz".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12xyz34");
}
#[test]
fn delete_all1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_all2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_part1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn delete_part2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn bytes1() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), bytes);
}
#[test]
fn bytes2() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
let bytes2 = vec![3, 4];
t.insert(0, &bytes2);
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
}
}
/// Set of possible commands
#[derive(Debug, Clone)]
pub enum Command {
Insert(String),
}
use parser_combinators::primitives::ParseError;
use parser_combinators::{spaces, between, many, char, satisfy, Parser, ParserExt};
impl Command {
pub fn parse(s: &str) -> Result<(Command, &str), ParseError> {
let literal = between(char('/'), char('/'), many(satisfy(|c| c != '/')).map(Command::Insert));
let spaces = spaces();
spaces.with(char('i').with(literal)).parse(s)
}
}
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
info!("starting up");
//let mut text = Text::new();
let mut args = std::env::args();
args.next().unwrap();
let s = args.next().unwrap();
let cmd = Command::parse(&s);
println!("{:?}", cmd);
}
| {
Some((Span::new(self.off1, self.off1+n), Span::new(self.off1+n, self.off2)))
} | conditional_block |
main.rs | #![feature(collections)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate parser_combinators;
// TODO:
// - Benchmarks
// - Cache of last piece
// - merge consecutive insert, delete
// - snapshots
// - Allow String, &str, &[u8], and Vec<u8> as parameter to insert, append
/// A append only buffer
/// (This is unnecessary inefficient because we copy, we could
/// just allocate new separate buffers)
#[derive(Debug)]
pub struct AppendOnlyBuffer {
buf: Vec<u8>,
}
#[derive(Debug,Copy,Clone,PartialEq)]
pub struct Span {
off1: u32,
off2: u32,
}
impl Span {
pub fn new(off1: u32, off2: u32) -> Span {
assert!(off2 >= off1);
Span { off1: off1, off2: off2 }
}
/// The empty span
pub fn empty() -> Span {
Span::new(0,0)
}
pub fn len(&self) -> u32 {
self.off2 - self.off1
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Split self such that the left piece has n characters.
pub fn split(&self, n: u32) -> Option<(Span, Span)> {
if n == 0 || n == self.len() {
None
} else {
Some((Span::new(self.off1, self.off1+n), Span::new(self.off1+n, self.off2)))
}
}
}
impl AppendOnlyBuffer {
/// Constructs a new, empty AppendOnlyBuffer.
pub fn new() -> AppendOnlyBuffer {
AppendOnlyBuffer {
buf: Vec::with_capacity(4096)
}
}
/// Append a slice of bytes.
pub fn append(&mut self, bytes: &[u8]) -> Span {
let off1 = self.buf.len() as u32;
self.buf.push_all(bytes);
Span::new(off1, self.buf.len() as u32)
}
pub fn get(&self, s: Span) -> &[u8] {
&self.buf[s.off1 as usize .. s.off2 as usize]
}
pub fn get_byte(&self, p: u32) -> u8 {
self.buf[p as usize]
}
}
/// We represent pieces by their index in the vector that we use to allocate
/// them. That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0); | }
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn link(&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::FromUtf8Error> {
String::from_utf8(self.to_vec())
}
}
#[test]
fn test_pieces() {
let t = Text::new();
assert_eq!(t.pieces().collect::<Vec<_>>(), vec![]);
}
#[cfg(test)]
mod tests {
mod span {
use super::super::*;
#[test]
fn basics() {
let s = Span::new(1, 1);
assert_eq!(s.len(), 0);
assert!(s.is_empty());
let s2 = Span::new(3, 7);
assert!(s2.len() == 4);
}
#[test]
fn split() {
let s = Span::new(3, 7);
assert_eq!(s.split(0), None);
assert_eq!(s.split(4), None);
assert_eq!(s.split(3), Some((Span { off1: 3, off2: 6 }, Span { off1: 6, off2: 7 })));
}
}
mod append_only_buffer {
use super::super::*;
#[test]
fn basics() {
let mut b = AppendOnlyBuffer::new();
let bytes = "Hello World".as_bytes();
let sp = b.append(bytes);
assert_eq!(b.get(sp), bytes);
let bytes2 = "Just testing".as_bytes();
let sp2 = b.append(bytes2);
assert_eq!(b.get(sp), bytes);
assert_eq!(b.get(sp2), bytes2);
}
}
mod text {
use super::super::*;
#[test]
fn insert_beginning() {
let mut t = Text::new();
assert_eq!(t.len(), 0);
t.insert(0, "World".as_bytes());
assert_eq!(t.len(), 5);
assert_eq!(t.to_utf8_string().unwrap(), "World");
t.insert(0, "Hello ".as_bytes());
assert_eq!(t.len(), 11);
assert_eq!(t.to_utf8_string().unwrap(), "Hello World");
t.insert(0, "...".as_bytes());
assert_eq!(t.len(), 14);
assert_eq!(t.to_utf8_string().unwrap(), "...Hello World");
}
#[test]
fn append() {
let mut t = Text::new();
t.insert(0, "Hello".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello");
t.insert(5, " Bene".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello Bene");
}
#[test]
fn insert_middle() {
let mut t = Text::new();
t.insert(0, "1234".as_bytes());
t.insert(2, "x".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12x34");
t.insert(3, "yz".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12xyz34");
}
#[test]
fn delete_all1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_all2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_part1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn delete_part2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn bytes1() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), bytes);
}
#[test]
fn bytes2() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
let bytes2 = vec![3, 4];
t.insert(0, &bytes2);
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
}
}
/// Set of possible commands
#[derive(Debug, Clone)]
pub enum Command {
Insert(String),
}
use parser_combinators::primitives::ParseError;
use parser_combinators::{spaces, between, many, char, satisfy, Parser, ParserExt};
impl Command {
pub fn parse(s: &str) -> Result<(Command, &str), ParseError> {
let literal = between(char('/'), char('/'), many(satisfy(|c| c != '/')).map(Command::Insert));
let spaces = spaces();
spaces.with(char('i').with(literal)).parse(s)
}
}
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
info!("starting up");
//let mut text = Text::new();
let mut args = std::env::args();
args.next().unwrap();
let s = args.next().unwrap();
let cmd = Command::parse(&s);
println!("{:?}", cmd);
} | l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len()); | random_line_split |
main.rs | #![feature(collections)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate parser_combinators;
// TODO:
// - Benchmarks
// - Cache of last piece
// - merge consecutive insert, delete
// - snapshots
// - Allow String, &str, &[u8], and Vec<u8> as parameter to insert, append
/// A append only buffer
/// (This is unnecessary inefficient because we copy, we could
/// just allocate new separate buffers)
#[derive(Debug)]
pub struct AppendOnlyBuffer {
buf: Vec<u8>,
}
#[derive(Debug,Copy,Clone,PartialEq)]
pub struct Span {
off1: u32,
off2: u32,
}
impl Span {
pub fn new(off1: u32, off2: u32) -> Span {
assert!(off2 >= off1);
Span { off1: off1, off2: off2 }
}
/// The empty span
pub fn empty() -> Span {
Span::new(0,0)
}
pub fn len(&self) -> u32 {
self.off2 - self.off1
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Split self such that the left piece has n characters.
pub fn split(&self, n: u32) -> Option<(Span, Span)> {
if n == 0 || n == self.len() {
None
} else {
Some((Span::new(self.off1, self.off1+n), Span::new(self.off1+n, self.off2)))
}
}
}
impl AppendOnlyBuffer {
/// Constructs a new, empty AppendOnlyBuffer.
pub fn new() -> AppendOnlyBuffer {
AppendOnlyBuffer {
buf: Vec::with_capacity(4096)
}
}
/// Append a slice of bytes.
pub fn append(&mut self, bytes: &[u8]) -> Span {
let off1 = self.buf.len() as u32;
self.buf.push_all(bytes);
Span::new(off1, self.buf.len() as u32)
}
pub fn get(&self, s: Span) -> &[u8] {
&self.buf[s.off1 as usize .. s.off2 as usize]
}
pub fn get_byte(&self, p: u32) -> u8 {
self.buf[p as usize]
}
}
/// We represent pieces by their index in the vector that we use to allocate
/// them. That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len());
}
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn | (&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::FromUtf8Error> {
String::from_utf8(self.to_vec())
}
}
#[test]
fn test_pieces() {
let t = Text::new();
assert_eq!(t.pieces().collect::<Vec<_>>(), vec![]);
}
#[cfg(test)]
mod tests {
mod span {
use super::super::*;
#[test]
fn basics() {
let s = Span::new(1, 1);
assert_eq!(s.len(), 0);
assert!(s.is_empty());
let s2 = Span::new(3, 7);
assert!(s2.len() == 4);
}
#[test]
fn split() {
let s = Span::new(3, 7);
assert_eq!(s.split(0), None);
assert_eq!(s.split(4), None);
assert_eq!(s.split(3), Some((Span { off1: 3, off2: 6 }, Span { off1: 6, off2: 7 })));
}
}
mod append_only_buffer {
use super::super::*;
#[test]
fn basics() {
let mut b = AppendOnlyBuffer::new();
let bytes = "Hello World".as_bytes();
let sp = b.append(bytes);
assert_eq!(b.get(sp), bytes);
let bytes2 = "Just testing".as_bytes();
let sp2 = b.append(bytes2);
assert_eq!(b.get(sp), bytes);
assert_eq!(b.get(sp2), bytes2);
}
}
mod text {
use super::super::*;
#[test]
fn insert_beginning() {
let mut t = Text::new();
assert_eq!(t.len(), 0);
t.insert(0, "World".as_bytes());
assert_eq!(t.len(), 5);
assert_eq!(t.to_utf8_string().unwrap(), "World");
t.insert(0, "Hello ".as_bytes());
assert_eq!(t.len(), 11);
assert_eq!(t.to_utf8_string().unwrap(), "Hello World");
t.insert(0, "...".as_bytes());
assert_eq!(t.len(), 14);
assert_eq!(t.to_utf8_string().unwrap(), "...Hello World");
}
#[test]
fn append() {
let mut t = Text::new();
t.insert(0, "Hello".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello");
t.insert(5, " Bene".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello Bene");
}
#[test]
fn insert_middle() {
let mut t = Text::new();
t.insert(0, "1234".as_bytes());
t.insert(2, "x".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12x34");
t.insert(3, "yz".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12xyz34");
}
#[test]
fn delete_all1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_all2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_part1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn delete_part2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn bytes1() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), bytes);
}
#[test]
fn bytes2() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
let bytes2 = vec![3, 4];
t.insert(0, &bytes2);
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
}
}
/// Set of possible commands
#[derive(Debug, Clone)]
pub enum Command {
Insert(String),
}
use parser_combinators::primitives::ParseError;
use parser_combinators::{spaces, between, many, char, satisfy, Parser, ParserExt};
impl Command {
pub fn parse(s: &str) -> Result<(Command, &str), ParseError> {
let literal = between(char('/'), char('/'), many(satisfy(|c| c != '/')).map(Command::Insert));
let spaces = spaces();
spaces.with(char('i').with(literal)).parse(s)
}
}
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
info!("starting up");
//let mut text = Text::new();
let mut args = std::env::args();
args.next().unwrap();
let s = args.next().unwrap();
let cmd = Command::parse(&s);
println!("{:?}", cmd);
}
| link | identifier_name |
math.rs | use num_bigint::{BigInt, ToBigUint};
use num_traits::{Zero, One, Pow};
use std::collections::{BinaryHeap, HashSet, HashMap, VecDeque};
use std::cmp::{max, Ordering};
use std::hash::Hash;
use std::ops;
use std::fmt;
use crate::ast::{AST, as_int};
pub fn to_usize(n : &BigInt) -> Result<usize, String> {
match ToBigUint::to_biguint(n) {
Some(m) => Ok(m.iter_u64_digits()
.map(|d| d as usize)
.fold(0, |accum, d| accum * (std::u64::MAX as usize) + d)),
None => Err(format!("Could not convert {:?} to usize", n))
}
}
pub trait Sequence {
fn nth(&mut self, n : usize) -> Result<AST, String>;
fn increasing(&self) -> bool;
fn index_of(&mut self, v : AST) -> Option<usize>;
}
pub struct Naturals;
impl Naturals {
pub fn new() -> Naturals {
return Naturals;
}
}
impl Sequence for Naturals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(BigInt::from(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) => to_usize(&n).ok(),
_ => None
}
}
}
#[derive(Debug, Clone)]
pub struct Rat {
pub n : BigInt,
pub d : BigInt
}
impl PartialEq for Rat {
fn eq(&self, other : &Rat) -> bool {
return self.n.clone() * other.d.clone() == other.n.clone() * self.d.clone();
}
}
impl PartialOrd for Rat {
fn partial_cmp(&self, other : &Rat) -> Option<std::cmp::Ordering> {
return (self.n.clone() * other.d.clone()).partial_cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Eq for Rat {
}
impl Ord for Rat {
fn cmp(&self, other: &Rat) -> std::cmp::Ordering {
return (self.n.clone() * other.d.clone()).cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Hash for Rat {
fn hash<H>(&self, state : &mut H) where H: std::hash::Hasher {
let r = self.clone().simplify();
r.n.hash(state);
r.d.hash(state);
}
}
impl fmt::Display for Rat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.n, self.d)
}
}
pub fn gcd(a : BigInt, b : BigInt) -> BigInt {
let mut x = a;
let mut y = b;
while y != Zero::zero() {
let temp = y.clone();
y = x % y;
x = temp;
}
return x;
}
impl Rat {
pub fn new(n : BigInt, d : BigInt) -> Rat {
let r = Rat { n, d };
return r.simplify();
}
pub fn from_usize(n : usize) -> Rat {
return Rat::new(BigInt::from(n), One::one());
}
pub fn simplify(mut self) -> Rat {
let g = gcd(self.n.clone(), self.d.clone());
self.n /= g.clone();
self.d /= g;
if self.d < Zero::zero() && self.n < Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
}
return self;
}
pub fn pow(mut self, a : &BigInt) -> Rat {
if a > &Zero::zero() {
let mut n : BigInt = One::one();
let orig_n = self.n.clone();
let orig_d = self.d.clone();
while &n < a {
self.n *= orig_n.clone();
self.d *= orig_d.clone();
n += 1;
}
return self;
} else if a < &Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
return self.pow(&-a);
} else {
return Rat { n: One::one(), d: One::one() };
}
}
}
impl ops::Add<BigInt> for Rat {
type Output = Rat;
fn add(self, b : BigInt) -> Rat {
return Rat::new(self.n + b * self.d.clone(), self.d);
}
}
impl ops::Sub<BigInt> for Rat {
type Output = Rat;
fn sub(self, b : BigInt) -> Rat {
return Rat::new(self.n - b * self.d.clone(), self.d);
}
}
impl ops::Mul<BigInt> for Rat {
type Output = Rat;
fn mul(self, b : BigInt) -> Rat {
return Rat::new(self.n * b, self.d);
}
}
impl ops::Div<BigInt> for Rat {
type Output = Rat;
fn div(self, b : BigInt) -> Rat {
return Rat::new(self.n, self.d * b);
}
}
impl ops::Add<Rat> for Rat {
type Output = Rat;
fn add(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() + b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Sub<Rat> for Rat {
type Output = Rat;
fn sub(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() - b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Mul<Rat> for Rat {
type Output = Rat;
fn mul(self, b : Rat) -> Rat {
return Rat::new(self.n * b.n, self.d * b.d);
}
}
impl ops::Div<Rat> for Rat {
type Output = Rat;
fn div(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d, self.d * b.n);
}
}
impl ops::MulAssign<Rat> for Rat {
fn mul_assign(&mut self, b : Rat) {
self.n *= b.n;
self.d *= b.d;
}
}
impl ops::Neg for Rat {
type Output = Rat;
fn neg(self) -> Rat {
return Rat { n: -self.n, d: self.d };
}
}
pub struct Integers;
impl Integers {
pub fn new() -> Integers {
return Integers;
}
}
pub fn int_nth(n : usize) -> BigInt {
if n % 2 == 0 {
return BigInt::from(n / 2);
} else {
return -BigInt::from((n + 1) / 2);
}
}
impl Sequence for Integers {
// Enumerate the integers as 0,-1,1,-2,2,...
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(int_nth(n))); | }
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) =>
if n < Zero::zero() {
match to_usize(&-n) {
Ok(m) => Some(2*m - 1),
_ => None
}
} else {
match to_usize(&n) {
Ok(m) => Some(2*m),
_ => None
}
}
_ => None
}
}
}
pub fn prime_factor(n_in : BigInt, ps : &mut PrimeSeq) -> std::collections::hash_map::IntoIter<BigInt, BigInt> {
let mut n = n_in;
let mut powers = HashMap::new();
let mut m = 0;
loop {
let p = ps.at(m);
if p.clone()*p.clone() > n {
break;
}
if n.clone() % p.clone() == Zero::zero() {
*powers.entry(p.clone()).or_insert(Zero::zero()) += 1;
n /= p;
m = 0;
} else {
m += 1;
}
}
*powers.entry(n).or_insert(Zero::zero()) += 1;
return powers.into_iter();
}
pub struct Rationals {
ps : PrimeSeq
}
impl Rationals {
pub fn new() -> Rationals {
return Rationals { ps : PrimeSeq::new() };
}
fn calc_nth(&mut self, n : usize) -> Result<Rat, String> {
let mut res = Rat::from_usize(1);
for (p,a) in prime_factor(BigInt::from(n), &mut self.ps) {
let b = int_nth(to_usize(&a)?);
let r = Rat::new(p.clone(), One::one()).pow(&b);
// println!("{}: {}^({} => {}) = {}", n, p, a, b, r);
res *= r;
}
return Ok(res);
}
}
impl Sequence for Rationals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
if n == 0 {
return Ok(AST::Rat(Rat::from_usize(0)));
}
if n % 2 == 0 {
return Ok(AST::Rat(self.calc_nth(n / 2)?));
} else {
return Ok(AST::Rat(-self.calc_nth((n + 1) / 2)?));
}
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let (mut n,d) = match v {
AST::Int(n) => (n, One::one()),
AST::Rat(Rat{n,d}) => (n,d),
_ => return None
};
let neg = n < Zero::zero();
if neg {
n = -n;
}
let mut powers : HashMap<BigInt, BigInt> = HashMap::new();
for (p,a) in prime_factor(n, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) += a;
}
for (p,a) in prime_factor(d, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) -= a;
}
let mut res = 1;
for (p,a) in powers.into_iter() {
res *= Pow::pow(to_usize(&p).ok()?, Integers.index_of(AST::Int(a))?);
}
if neg {
return Some(2*res - 1);
} else {
return Some(2*res);
}
}
}
pub struct PrimeSeq {
max : usize,
primes : Vec<BigInt>,
primes_set : HashSet<BigInt>,
sieve : Vec<bool>
}
impl PrimeSeq {
pub fn new() -> PrimeSeq {
return PrimeSeq {
max: 3,
primes: vec!(BigInt::from(2)),
primes_set : vec!(BigInt::from(2)).into_iter().collect(),
sieve : vec!(false, false, true)
};
}
fn run_sieve(&mut self, increment : usize) {
let mut i = 0;
while i < increment {
self.sieve.push(true);
i += 1;
}
println!("\nRunning sieve to {}", increment + self.max);
let mut p = 0;
while p < self.sieve.len() {
if self.sieve[p] {
let start = max(p*p, p * (self.max / p + 1));
let mut i = start;
while i < self.sieve.len() {
self.sieve[i] = false;
i += p;
}
if p >= self.max {
self.primes.push(BigInt::from(p));
self.primes_set.insert(BigInt::from(p));
}
}
p += 1;
}
self.max += increment;
}
fn at(&mut self, n : usize) -> BigInt {
if n >= self.primes.len() {
// See: https://en.wikipedia.org/wiki/Prime_number_theorem#Approximations_for_the_nth_prime_number
// This guarantees we will find the nth prime in this round of the sieve
let new_max = if n < 2 { // If n = 1, then loglog(n) is undefined, choose 100 because why not
100
} else {
// We use log2 here because it will overshoot even more than we need, and there's
// no built-in ln anyway.
n*(n.log2() + n.log2().log2())
};
self.run_sieve(new_max - self.max);
}
return self.primes[n].clone();
}
}
pub struct PrimeIt {
n : usize,
seq : PrimeSeq
}
pub fn primes() -> PrimeIt {
return PrimeIt { n : 0, seq : PrimeSeq::new() };
}
impl Iterator for PrimeIt {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
let idx = self.n;
let p = self.seq.at(idx);
self.n += 1;
return Some(p);
}
}
pub struct Factors {
n : BigInt,
m : usize,
ps : PrimeSeq
}
pub fn factor(n : BigInt) -> Factors {
return Factors { n: n, m: 0, ps: PrimeSeq::new() };
}
impl Iterator for Factors {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
if self.n <= One::one() {
return None;
}
loop {
let p = self.ps.at(self.m);
if self.n.clone() % p.clone() == Zero::zero() {
self.m = 0;
self.n /= p.clone();
return Some(p);
}
self.m += 1;
}
}
}
impl Sequence for PrimeSeq {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(self.at(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let n = as_int(v).ok()?;
// The list of primes is never empty.
if &n > self.primes.last().unwrap() {
self.run_sieve(to_usize(&n).ok()? - self.max);
}
let mut min_idx = 0;
let mut max_idx = self.primes.len() - 1;
while max_idx - min_idx > 1 {
let guess = (min_idx + max_idx) / 2;
match self.primes[guess].cmp(&n) {
Ordering::Less => min_idx = guess,
Ordering::Greater => max_idx = guess,
Ordering::Equal => return Some(guess)
}
}
return None;
}
} | }
fn increasing(&self) -> bool {
return false; | random_line_split |
math.rs | use num_bigint::{BigInt, ToBigUint};
use num_traits::{Zero, One, Pow};
use std::collections::{BinaryHeap, HashSet, HashMap, VecDeque};
use std::cmp::{max, Ordering};
use std::hash::Hash;
use std::ops;
use std::fmt;
use crate::ast::{AST, as_int};
pub fn to_usize(n : &BigInt) -> Result<usize, String> {
match ToBigUint::to_biguint(n) {
Some(m) => Ok(m.iter_u64_digits()
.map(|d| d as usize)
.fold(0, |accum, d| accum * (std::u64::MAX as usize) + d)),
None => Err(format!("Could not convert {:?} to usize", n))
}
}
pub trait Sequence {
fn nth(&mut self, n : usize) -> Result<AST, String>;
fn increasing(&self) -> bool;
fn index_of(&mut self, v : AST) -> Option<usize>;
}
pub struct Naturals;
impl Naturals {
pub fn new() -> Naturals {
return Naturals;
}
}
impl Sequence for Naturals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(BigInt::from(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) => to_usize(&n).ok(),
_ => None
}
}
}
#[derive(Debug, Clone)]
pub struct Rat {
pub n : BigInt,
pub d : BigInt
}
impl PartialEq for Rat {
fn eq(&self, other : &Rat) -> bool {
return self.n.clone() * other.d.clone() == other.n.clone() * self.d.clone();
}
}
impl PartialOrd for Rat {
fn partial_cmp(&self, other : &Rat) -> Option<std::cmp::Ordering> {
return (self.n.clone() * other.d.clone()).partial_cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Eq for Rat {
}
impl Ord for Rat {
fn cmp(&self, other: &Rat) -> std::cmp::Ordering {
return (self.n.clone() * other.d.clone()).cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Hash for Rat {
fn hash<H>(&self, state : &mut H) where H: std::hash::Hasher {
let r = self.clone().simplify();
r.n.hash(state);
r.d.hash(state);
}
}
impl fmt::Display for Rat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.n, self.d)
}
}
pub fn gcd(a : BigInt, b : BigInt) -> BigInt {
let mut x = a;
let mut y = b;
while y != Zero::zero() {
let temp = y.clone();
y = x % y;
x = temp;
}
return x;
}
impl Rat {
pub fn new(n : BigInt, d : BigInt) -> Rat {
let r = Rat { n, d };
return r.simplify();
}
pub fn from_usize(n : usize) -> Rat {
return Rat::new(BigInt::from(n), One::one());
}
pub fn simplify(mut self) -> Rat {
let g = gcd(self.n.clone(), self.d.clone());
self.n /= g.clone();
self.d /= g;
if self.d < Zero::zero() && self.n < Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
}
return self;
}
pub fn pow(mut self, a : &BigInt) -> Rat {
if a > &Zero::zero() {
let mut n : BigInt = One::one();
let orig_n = self.n.clone();
let orig_d = self.d.clone();
while &n < a {
self.n *= orig_n.clone();
self.d *= orig_d.clone();
n += 1;
}
return self;
} else if a < &Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
return self.pow(&-a);
} else {
return Rat { n: One::one(), d: One::one() };
}
}
}
impl ops::Add<BigInt> for Rat {
type Output = Rat;
fn add(self, b : BigInt) -> Rat {
return Rat::new(self.n + b * self.d.clone(), self.d);
}
}
impl ops::Sub<BigInt> for Rat {
type Output = Rat;
fn sub(self, b : BigInt) -> Rat {
return Rat::new(self.n - b * self.d.clone(), self.d);
}
}
impl ops::Mul<BigInt> for Rat {
type Output = Rat;
fn mul(self, b : BigInt) -> Rat {
return Rat::new(self.n * b, self.d);
}
}
impl ops::Div<BigInt> for Rat {
type Output = Rat;
fn div(self, b : BigInt) -> Rat {
return Rat::new(self.n, self.d * b);
}
}
impl ops::Add<Rat> for Rat {
type Output = Rat;
fn add(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() + b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Sub<Rat> for Rat {
type Output = Rat;
fn sub(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() - b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Mul<Rat> for Rat {
type Output = Rat;
fn mul(self, b : Rat) -> Rat {
return Rat::new(self.n * b.n, self.d * b.d);
}
}
impl ops::Div<Rat> for Rat {
type Output = Rat;
fn div(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d, self.d * b.n);
}
}
impl ops::MulAssign<Rat> for Rat {
fn mul_assign(&mut self, b : Rat) {
self.n *= b.n;
self.d *= b.d;
}
}
impl ops::Neg for Rat {
type Output = Rat;
fn neg(self) -> Rat {
return Rat { n: -self.n, d: self.d };
}
}
pub struct Integers;
impl Integers {
pub fn new() -> Integers {
return Integers;
}
}
pub fn int_nth(n : usize) -> BigInt {
if n % 2 == 0 {
return BigInt::from(n / 2);
} else {
return -BigInt::from((n + 1) / 2);
}
}
impl Sequence for Integers {
// Enumerate the integers as 0,-1,1,-2,2,...
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(int_nth(n)));
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) =>
if n < Zero::zero() {
match to_usize(&-n) {
Ok(m) => Some(2*m - 1),
_ => None
}
} else {
match to_usize(&n) {
Ok(m) => Some(2*m),
_ => None
}
}
_ => None
}
}
}
pub fn prime_factor(n_in : BigInt, ps : &mut PrimeSeq) -> std::collections::hash_map::IntoIter<BigInt, BigInt> {
let mut n = n_in;
let mut powers = HashMap::new();
let mut m = 0;
loop {
let p = ps.at(m);
if p.clone()*p.clone() > n {
break;
}
if n.clone() % p.clone() == Zero::zero() {
*powers.entry(p.clone()).or_insert(Zero::zero()) += 1;
n /= p;
m = 0;
} else {
m += 1;
}
}
*powers.entry(n).or_insert(Zero::zero()) += 1;
return powers.into_iter();
}
pub struct Rationals {
ps : PrimeSeq
}
impl Rationals {
pub fn new() -> Rationals |
fn calc_nth(&mut self, n : usize) -> Result<Rat, String> {
let mut res = Rat::from_usize(1);
for (p,a) in prime_factor(BigInt::from(n), &mut self.ps) {
let b = int_nth(to_usize(&a)?);
let r = Rat::new(p.clone(), One::one()).pow(&b);
// println!("{}: {}^({} => {}) = {}", n, p, a, b, r);
res *= r;
}
return Ok(res);
}
}
impl Sequence for Rationals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
if n == 0 {
return Ok(AST::Rat(Rat::from_usize(0)));
}
if n % 2 == 0 {
return Ok(AST::Rat(self.calc_nth(n / 2)?));
} else {
return Ok(AST::Rat(-self.calc_nth((n + 1) / 2)?));
}
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let (mut n,d) = match v {
AST::Int(n) => (n, One::one()),
AST::Rat(Rat{n,d}) => (n,d),
_ => return None
};
let neg = n < Zero::zero();
if neg {
n = -n;
}
let mut powers : HashMap<BigInt, BigInt> = HashMap::new();
for (p,a) in prime_factor(n, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) += a;
}
for (p,a) in prime_factor(d, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) -= a;
}
let mut res = 1;
for (p,a) in powers.into_iter() {
res *= Pow::pow(to_usize(&p).ok()?, Integers.index_of(AST::Int(a))?);
}
if neg {
return Some(2*res - 1);
} else {
return Some(2*res);
}
}
}
pub struct PrimeSeq {
max : usize,
primes : Vec<BigInt>,
primes_set : HashSet<BigInt>,
sieve : Vec<bool>
}
impl PrimeSeq {
pub fn new() -> PrimeSeq {
return PrimeSeq {
max: 3,
primes: vec!(BigInt::from(2)),
primes_set : vec!(BigInt::from(2)).into_iter().collect(),
sieve : vec!(false, false, true)
};
}
fn run_sieve(&mut self, increment : usize) {
let mut i = 0;
while i < increment {
self.sieve.push(true);
i += 1;
}
println!("\nRunning sieve to {}", increment + self.max);
let mut p = 0;
while p < self.sieve.len() {
if self.sieve[p] {
let start = max(p*p, p * (self.max / p + 1));
let mut i = start;
while i < self.sieve.len() {
self.sieve[i] = false;
i += p;
}
if p >= self.max {
self.primes.push(BigInt::from(p));
self.primes_set.insert(BigInt::from(p));
}
}
p += 1;
}
self.max += increment;
}
fn at(&mut self, n : usize) -> BigInt {
if n >= self.primes.len() {
// See: https://en.wikipedia.org/wiki/Prime_number_theorem#Approximations_for_the_nth_prime_number
// This guarantees we will find the nth prime in this round of the sieve
let new_max = if n < 2 { // If n = 1, then loglog(n) is undefined, choose 100 because why not
100
} else {
// We use log2 here because it will overshoot even more than we need, and there's
// no built-in ln anyway.
n*(n.log2() + n.log2().log2())
};
self.run_sieve(new_max - self.max);
}
return self.primes[n].clone();
}
}
pub struct PrimeIt {
n : usize,
seq : PrimeSeq
}
pub fn primes() -> PrimeIt {
return PrimeIt { n : 0, seq : PrimeSeq::new() };
}
impl Iterator for PrimeIt {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
let idx = self.n;
let p = self.seq.at(idx);
self.n += 1;
return Some(p);
}
}
pub struct Factors {
n : BigInt,
m : usize,
ps : PrimeSeq
}
pub fn factor(n : BigInt) -> Factors {
return Factors { n: n, m: 0, ps: PrimeSeq::new() };
}
impl Iterator for Factors {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
if self.n <= One::one() {
return None;
}
loop {
let p = self.ps.at(self.m);
if self.n.clone() % p.clone() == Zero::zero() {
self.m = 0;
self.n /= p.clone();
return Some(p);
}
self.m += 1;
}
}
}
impl Sequence for PrimeSeq {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(self.at(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let n = as_int(v).ok()?;
// The list of primes is never empty.
if &n > self.primes.last().unwrap() {
self.run_sieve(to_usize(&n).ok()? - self.max);
}
let mut min_idx = 0;
let mut max_idx = self.primes.len() - 1;
while max_idx - min_idx > 1 {
let guess = (min_idx + max_idx) / 2;
match self.primes[guess].cmp(&n) {
Ordering::Less => min_idx = guess,
Ordering::Greater => max_idx = guess,
Ordering::Equal => return Some(guess)
}
}
return None;
}
}
| {
return Rationals { ps : PrimeSeq::new() };
} | identifier_body |
math.rs | use num_bigint::{BigInt, ToBigUint};
use num_traits::{Zero, One, Pow};
use std::collections::{BinaryHeap, HashSet, HashMap, VecDeque};
use std::cmp::{max, Ordering};
use std::hash::Hash;
use std::ops;
use std::fmt;
use crate::ast::{AST, as_int};
pub fn to_usize(n : &BigInt) -> Result<usize, String> {
match ToBigUint::to_biguint(n) {
Some(m) => Ok(m.iter_u64_digits()
.map(|d| d as usize)
.fold(0, |accum, d| accum * (std::u64::MAX as usize) + d)),
None => Err(format!("Could not convert {:?} to usize", n))
}
}
pub trait Sequence {
fn nth(&mut self, n : usize) -> Result<AST, String>;
fn increasing(&self) -> bool;
fn index_of(&mut self, v : AST) -> Option<usize>;
}
pub struct Naturals;
impl Naturals {
pub fn new() -> Naturals {
return Naturals;
}
}
impl Sequence for Naturals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(BigInt::from(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) => to_usize(&n).ok(),
_ => None
}
}
}
#[derive(Debug, Clone)]
pub struct Rat {
pub n : BigInt,
pub d : BigInt
}
impl PartialEq for Rat {
fn eq(&self, other : &Rat) -> bool {
return self.n.clone() * other.d.clone() == other.n.clone() * self.d.clone();
}
}
impl PartialOrd for Rat {
fn partial_cmp(&self, other : &Rat) -> Option<std::cmp::Ordering> {
return (self.n.clone() * other.d.clone()).partial_cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Eq for Rat {
}
impl Ord for Rat {
fn cmp(&self, other: &Rat) -> std::cmp::Ordering {
return (self.n.clone() * other.d.clone()).cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Hash for Rat {
fn hash<H>(&self, state : &mut H) where H: std::hash::Hasher {
let r = self.clone().simplify();
r.n.hash(state);
r.d.hash(state);
}
}
impl fmt::Display for Rat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.n, self.d)
}
}
pub fn gcd(a : BigInt, b : BigInt) -> BigInt {
let mut x = a;
let mut y = b;
while y != Zero::zero() {
let temp = y.clone();
y = x % y;
x = temp;
}
return x;
}
impl Rat {
pub fn new(n : BigInt, d : BigInt) -> Rat {
let r = Rat { n, d };
return r.simplify();
}
pub fn from_usize(n : usize) -> Rat {
return Rat::new(BigInt::from(n), One::one());
}
pub fn simplify(mut self) -> Rat {
let g = gcd(self.n.clone(), self.d.clone());
self.n /= g.clone();
self.d /= g;
if self.d < Zero::zero() && self.n < Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
}
return self;
}
pub fn pow(mut self, a : &BigInt) -> Rat {
if a > &Zero::zero() {
let mut n : BigInt = One::one();
let orig_n = self.n.clone();
let orig_d = self.d.clone();
while &n < a {
self.n *= orig_n.clone();
self.d *= orig_d.clone();
n += 1;
}
return self;
} else if a < &Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
return self.pow(&-a);
} else {
return Rat { n: One::one(), d: One::one() };
}
}
}
impl ops::Add<BigInt> for Rat {
type Output = Rat;
fn add(self, b : BigInt) -> Rat {
return Rat::new(self.n + b * self.d.clone(), self.d);
}
}
impl ops::Sub<BigInt> for Rat {
type Output = Rat;
fn sub(self, b : BigInt) -> Rat {
return Rat::new(self.n - b * self.d.clone(), self.d);
}
}
impl ops::Mul<BigInt> for Rat {
type Output = Rat;
fn mul(self, b : BigInt) -> Rat {
return Rat::new(self.n * b, self.d);
}
}
impl ops::Div<BigInt> for Rat {
type Output = Rat;
fn div(self, b : BigInt) -> Rat {
return Rat::new(self.n, self.d * b);
}
}
impl ops::Add<Rat> for Rat {
type Output = Rat;
fn add(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() + b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Sub<Rat> for Rat {
type Output = Rat;
fn sub(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() - b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Mul<Rat> for Rat {
type Output = Rat;
fn mul(self, b : Rat) -> Rat {
return Rat::new(self.n * b.n, self.d * b.d);
}
}
impl ops::Div<Rat> for Rat {
type Output = Rat;
fn div(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d, self.d * b.n);
}
}
impl ops::MulAssign<Rat> for Rat {
fn mul_assign(&mut self, b : Rat) {
self.n *= b.n;
self.d *= b.d;
}
}
impl ops::Neg for Rat {
type Output = Rat;
fn neg(self) -> Rat {
return Rat { n: -self.n, d: self.d };
}
}
pub struct Integers;
impl Integers {
pub fn new() -> Integers {
return Integers;
}
}
pub fn int_nth(n : usize) -> BigInt {
if n % 2 == 0 {
return BigInt::from(n / 2);
} else {
return -BigInt::from((n + 1) / 2);
}
}
impl Sequence for Integers {
// Enumerate the integers as 0,-1,1,-2,2,...
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(int_nth(n)));
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) =>
if n < Zero::zero() {
match to_usize(&-n) {
Ok(m) => Some(2*m - 1),
_ => None
}
} else {
match to_usize(&n) {
Ok(m) => Some(2*m),
_ => None
}
}
_ => None
}
}
}
pub fn prime_factor(n_in : BigInt, ps : &mut PrimeSeq) -> std::collections::hash_map::IntoIter<BigInt, BigInt> {
let mut n = n_in;
let mut powers = HashMap::new();
let mut m = 0;
loop {
let p = ps.at(m);
if p.clone()*p.clone() > n {
break;
}
if n.clone() % p.clone() == Zero::zero() {
*powers.entry(p.clone()).or_insert(Zero::zero()) += 1;
n /= p;
m = 0;
} else {
m += 1;
}
}
*powers.entry(n).or_insert(Zero::zero()) += 1;
return powers.into_iter();
}
pub struct Rationals {
ps : PrimeSeq
}
impl Rationals {
pub fn new() -> Rationals {
return Rationals { ps : PrimeSeq::new() };
}
fn calc_nth(&mut self, n : usize) -> Result<Rat, String> {
let mut res = Rat::from_usize(1);
for (p,a) in prime_factor(BigInt::from(n), &mut self.ps) {
let b = int_nth(to_usize(&a)?);
let r = Rat::new(p.clone(), One::one()).pow(&b);
// println!("{}: {}^({} => {}) = {}", n, p, a, b, r);
res *= r;
}
return Ok(res);
}
}
impl Sequence for Rationals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
if n == 0 {
return Ok(AST::Rat(Rat::from_usize(0)));
}
if n % 2 == 0 {
return Ok(AST::Rat(self.calc_nth(n / 2)?));
} else {
return Ok(AST::Rat(-self.calc_nth((n + 1) / 2)?));
}
}
fn | (&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let (mut n,d) = match v {
AST::Int(n) => (n, One::one()),
AST::Rat(Rat{n,d}) => (n,d),
_ => return None
};
let neg = n < Zero::zero();
if neg {
n = -n;
}
let mut powers : HashMap<BigInt, BigInt> = HashMap::new();
for (p,a) in prime_factor(n, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) += a;
}
for (p,a) in prime_factor(d, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) -= a;
}
let mut res = 1;
for (p,a) in powers.into_iter() {
res *= Pow::pow(to_usize(&p).ok()?, Integers.index_of(AST::Int(a))?);
}
if neg {
return Some(2*res - 1);
} else {
return Some(2*res);
}
}
}
pub struct PrimeSeq {
max : usize,
primes : Vec<BigInt>,
primes_set : HashSet<BigInt>,
sieve : Vec<bool>
}
impl PrimeSeq {
pub fn new() -> PrimeSeq {
return PrimeSeq {
max: 3,
primes: vec!(BigInt::from(2)),
primes_set : vec!(BigInt::from(2)).into_iter().collect(),
sieve : vec!(false, false, true)
};
}
fn run_sieve(&mut self, increment : usize) {
let mut i = 0;
while i < increment {
self.sieve.push(true);
i += 1;
}
println!("\nRunning sieve to {}", increment + self.max);
let mut p = 0;
while p < self.sieve.len() {
if self.sieve[p] {
let start = max(p*p, p * (self.max / p + 1));
let mut i = start;
while i < self.sieve.len() {
self.sieve[i] = false;
i += p;
}
if p >= self.max {
self.primes.push(BigInt::from(p));
self.primes_set.insert(BigInt::from(p));
}
}
p += 1;
}
self.max += increment;
}
fn at(&mut self, n : usize) -> BigInt {
if n >= self.primes.len() {
// See: https://en.wikipedia.org/wiki/Prime_number_theorem#Approximations_for_the_nth_prime_number
// This guarantees we will find the nth prime in this round of the sieve
let new_max = if n < 2 { // If n = 1, then loglog(n) is undefined, choose 100 because why not
100
} else {
// We use log2 here because it will overshoot even more than we need, and there's
// no built-in ln anyway.
n*(n.log2() + n.log2().log2())
};
self.run_sieve(new_max - self.max);
}
return self.primes[n].clone();
}
}
pub struct PrimeIt {
n : usize,
seq : PrimeSeq
}
pub fn primes() -> PrimeIt {
return PrimeIt { n : 0, seq : PrimeSeq::new() };
}
impl Iterator for PrimeIt {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
let idx = self.n;
let p = self.seq.at(idx);
self.n += 1;
return Some(p);
}
}
pub struct Factors {
n : BigInt,
m : usize,
ps : PrimeSeq
}
pub fn factor(n : BigInt) -> Factors {
return Factors { n: n, m: 0, ps: PrimeSeq::new() };
}
impl Iterator for Factors {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
if self.n <= One::one() {
return None;
}
loop {
let p = self.ps.at(self.m);
if self.n.clone() % p.clone() == Zero::zero() {
self.m = 0;
self.n /= p.clone();
return Some(p);
}
self.m += 1;
}
}
}
impl Sequence for PrimeSeq {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(self.at(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let n = as_int(v).ok()?;
// The list of primes is never empty.
if &n > self.primes.last().unwrap() {
self.run_sieve(to_usize(&n).ok()? - self.max);
}
let mut min_idx = 0;
let mut max_idx = self.primes.len() - 1;
while max_idx - min_idx > 1 {
let guess = (min_idx + max_idx) / 2;
match self.primes[guess].cmp(&n) {
Ordering::Less => min_idx = guess,
Ordering::Greater => max_idx = guess,
Ordering::Equal => return Some(guess)
}
}
return None;
}
}
| increasing | identifier_name |
rtorrent.py | import logging
import re
from datetime import datetime
from pathlib import Path
from urllib.parse import urlencode, urlsplit
from xml.parsers.expat import ExpatError
from xmlrpc.client import Error as XMLRPCError
from xmlrpc.client import ServerProxy
import pytz
from ..baseclient import BaseClient
from ..bencode import bencode
from ..exceptions import FailedToExecuteException
from ..scgitransport import SCGITransport
from ..torrent import TorrentData, TorrentFile, TorrentState
from ..utils import (
calculate_minimum_expected_data,
has_minimum_expected_data,
map_existing_files,
)
logger = logging.getLogger(__name__)
def create_proxy(url):
parsed = urlsplit(url)
proto = url.split(":")[0].lower()
if proto == "scgi":
|
else:
logger.debug(f"Creating Normal XMLRPC Proxy with url {url}")
return ServerProxy(url)
def bitfield_to_string(bitfield):
"""
Converts a list of booleans into a bitfield
"""
retval = bytearray((len(bitfield) + 7) // 8)
for piece, bit in enumerate(bitfield):
if bit:
retval[piece // 8] |= 1 << (7 - piece % 8)
return bytes(retval)
class RTorrentClient(BaseClient):
identifier = "rtorrent"
display_name = "rtorrent"
_methods = None
def __init__(self, url, session_path=None, torrent_temp_path=None):
self.url = url
self.proxy = create_proxy(url)
self.session_path = session_path and Path(session_path)
self.torrent_temp_path = torrent_temp_path and Path(torrent_temp_path)
def _fetch_list_result(self, view):
result = []
try:
torrents = self.proxy.d.multicall2(
"",
view,
"d.hash=",
"d.name=",
"d.is_active=",
"d.message=",
"d.size_bytes=",
"d.completed_bytes=",
"d.up.total=",
"d.up.rate=",
"d.down.rate=",
"d.timestamp.finished=",
"t.multicall=,t.url=",
"d.custom1=",
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
for torrent in torrents:
if torrent[3]:
state = TorrentState.ERROR
elif torrent[2] == 0:
state = TorrentState.STOPPED
else:
state = TorrentState.ACTIVE
progress = (torrent[5] / torrent[4]) * 100
if torrent[10]:
tracker = ".".join(torrent[10][0][0].split("/")[2].rsplit(".", 2)[1:])
else:
tracker = "None"
result.append(
TorrentData(
torrent[0].lower(),
torrent[1],
torrent[4],
state,
progress,
torrent[6],
datetime.utcfromtimestamp(torrent[9]).astimezone(pytz.UTC),
tracker,
torrent[7],
torrent[8],
torrent[11],
)
)
return result
def get_methods(self):
if self._methods is None:
self._methods = self.proxy.system.listMethods()
return self._methods
def list(self):
return self._fetch_list_result("main")
def list_active(self):
try:
if "spreadsheet_active" not in self.proxy.view.list():
self.proxy.view.add("", "spreadsheet_active")
self.proxy.view.filter(
"", "spreadsheet_active", "or={d.up.rate=,d.down.rate=}"
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
return self._fetch_list_result("spreadsheet_active")
def start(self, infohash):
try:
self.proxy.d.start(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def stop(self, infohash):
try:
self.proxy.d.stop(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def test_connection(self):
try:
return self.proxy.system.pid() is not None
except (XMLRPCError, ConnectionError, OSError, ExpatError):
return False
def add(
self,
torrent,
destination_path,
fast_resume=False,
add_name_to_folder=True,
minimum_expected_data="none",
stopped=False,
):
current_expected_data = calculate_minimum_expected_data(
torrent, destination_path, add_name_to_folder
)
if not has_minimum_expected_data(minimum_expected_data, current_expected_data):
raise FailedToExecuteException(
f"Minimum expected data not reached, wanted {minimum_expected_data} actual {current_expected_data}"
)
destination_path = destination_path.resolve()
if fast_resume:
logger.info("Adding fast resume data")
psize = torrent[b"info"][b"piece length"]
pieces = len(torrent[b"info"][b"pieces"]) // 20
bitfield = [True] * pieces
torrent[b"libtorrent_resume"] = {b"files": []}
files = map_existing_files(torrent, destination_path)
current_position = 0
for fp, f, size, exists in files:
logger.debug(f"Handling file {fp!r}")
result = {b"priority": 1, b"completed": int(exists)}
if exists:
result[b"mtime"] = int(fp.stat().st_mtime)
torrent[b"libtorrent_resume"][b"files"].append(result)
last_position = current_position + size
first_piece = current_position // psize
last_piece = (last_position + psize - 1) // psize
for piece in range(first_piece, last_piece):
logger.debug(f"Setting piece {piece} to {exists}")
bitfield[piece] *= exists
current_position = last_position
if all(bitfield):
logger.info("This torrent is complete, setting bitfield to chunk count")
torrent[b"libtorrent_resume"][
b"bitfield"
] = pieces # rtorrent wants the number of pieces when torrent is complete
else:
logger.info("This torrent is incomplete, setting bitfield")
torrent[b"libtorrent_resume"][b"bitfield"] = bitfield_to_string(
bitfield
)
encoded_torrent = bencode(torrent)
cmd = [encoded_torrent]
if add_name_to_folder:
cmd.append(f'd.directory.set="{destination_path!s}"')
else:
cmd.append(f'd.directory_base.set="{destination_path!s}"')
logger.info(f"Sending to rtorrent: {cmd!r}")
try: # TODO: use torrent_temp_path if payload is too big
if stopped:
self.proxy.load.raw("", *cmd)
else:
self.proxy.load.raw_start("", *cmd)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def remove(self, infohash):
try:
self.proxy.d.erase(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def retrieve_torrentfile(self, infohash):
if not self.session_path:
raise FailedToExecuteException("Session path is not configured")
torrent_path = self.session_path / f"{infohash.upper()}.torrent"
if not torrent_path.is_file():
raise FailedToExecuteException("Torrent file does not exist")
return torrent_path.read_bytes()
def get_download_path(self, infohash):
try:
return Path(self.proxy.d.directory(infohash))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve download path")
def get_files(self, infohash):
result = []
try:
files = self.proxy.f.multicall(
infohash,
"",
"f.path=",
"f.size_bytes=",
"f.completed_chunks=",
"f.size_chunks=",
)
for f in files:
path, size, completed_chunks, size_chunks = f
if completed_chunks > size_chunks:
completed_chunks = size_chunks
if size_chunks == 0:
progress = 0.0
else:
progress = (completed_chunks / size_chunks) * 100
result.append(TorrentFile(path, size, progress))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve files")
return result
def serialize_configuration(self):
url = f"{self.identifier}+{self.url}"
query = {}
if self.session_path:
query["session_path"] = str(self.session_path)
if query:
url += f"?{urlencode(query)}"
return url
@classmethod
def auto_configure(cls, path="~/.rtorrent.rc"):
# Does not work with latest rtorrent config
config_path = Path(path).expanduser()
if not config_path.is_file():
raise FailedToExecuteException("Unable to find config file")
try:
config_data = config_path.read_text()
except PermissionError:
raise FailedToExecuteException("Config file not accessible")
scgi_info = re.findall(
r"^\s*scgi_(port|local)\s*=\s*(.+)\s*$", str(config_data), re.MULTILINE
)
if not scgi_info:
raise FailedToExecuteException("No scgi info found in configuration file")
scgi_method, scgi_url = scgi_info[0]
if scgi_method == "port":
scgi_url = scgi_url.strip()
else:
scgi_url = Path(scgi_url.strip()).expanduser().resolve()
client = cls(f"scgi://{scgi_url}")
session_path = Path(client.proxy.session.path()).resolve()
if session_path.is_dir():
client.session_path = session_path
return client
| if parsed.netloc:
url = f"http://{parsed.netloc}"
logger.debug(f"Creating SCGI XMLRPC Proxy with url {url}")
return ServerProxy(url, transport=SCGITransport())
else:
path = parsed.path
logger.debug(f"Creating SCGI XMLRPC Socket Proxy with socket file {path}")
return ServerProxy("http://1", transport=SCGITransport(socket_path=path)) | conditional_block |
rtorrent.py | import logging
import re
from datetime import datetime
from pathlib import Path
from urllib.parse import urlencode, urlsplit
from xml.parsers.expat import ExpatError
from xmlrpc.client import Error as XMLRPCError
from xmlrpc.client import ServerProxy
import pytz
from ..baseclient import BaseClient
from ..bencode import bencode
from ..exceptions import FailedToExecuteException
from ..scgitransport import SCGITransport
from ..torrent import TorrentData, TorrentFile, TorrentState
from ..utils import (
calculate_minimum_expected_data,
has_minimum_expected_data,
map_existing_files,
)
logger = logging.getLogger(__name__)
def create_proxy(url):
parsed = urlsplit(url)
proto = url.split(":")[0].lower()
if proto == "scgi":
if parsed.netloc:
url = f"http://{parsed.netloc}"
logger.debug(f"Creating SCGI XMLRPC Proxy with url {url}")
return ServerProxy(url, transport=SCGITransport())
else:
path = parsed.path
logger.debug(f"Creating SCGI XMLRPC Socket Proxy with socket file {path}")
return ServerProxy("http://1", transport=SCGITransport(socket_path=path))
else:
logger.debug(f"Creating Normal XMLRPC Proxy with url {url}")
return ServerProxy(url)
def bitfield_to_string(bitfield):
"""
Converts a list of booleans into a bitfield
"""
retval = bytearray((len(bitfield) + 7) // 8)
for piece, bit in enumerate(bitfield):
if bit:
retval[piece // 8] |= 1 << (7 - piece % 8)
return bytes(retval)
class RTorrentClient(BaseClient):
identifier = "rtorrent"
display_name = "rtorrent"
_methods = None
def __init__(self, url, session_path=None, torrent_temp_path=None):
self.url = url
self.proxy = create_proxy(url)
self.session_path = session_path and Path(session_path)
self.torrent_temp_path = torrent_temp_path and Path(torrent_temp_path)
def _fetch_list_result(self, view):
result = []
try:
torrents = self.proxy.d.multicall2(
"",
view,
"d.hash=",
"d.name=",
"d.is_active=",
"d.message=",
"d.size_bytes=",
"d.completed_bytes=",
"d.up.total=",
"d.up.rate=",
"d.down.rate=",
"d.timestamp.finished=",
"t.multicall=,t.url=",
"d.custom1=",
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
for torrent in torrents:
if torrent[3]:
state = TorrentState.ERROR
elif torrent[2] == 0:
state = TorrentState.STOPPED
else:
state = TorrentState.ACTIVE
progress = (torrent[5] / torrent[4]) * 100
if torrent[10]:
tracker = ".".join(torrent[10][0][0].split("/")[2].rsplit(".", 2)[1:])
else:
tracker = "None"
result.append(
TorrentData(
torrent[0].lower(),
torrent[1],
torrent[4],
state,
progress,
torrent[6],
datetime.utcfromtimestamp(torrent[9]).astimezone(pytz.UTC),
tracker,
torrent[7],
torrent[8],
torrent[11],
)
)
return result
def get_methods(self):
if self._methods is None:
self._methods = self.proxy.system.listMethods()
return self._methods
def list(self):
return self._fetch_list_result("main")
def list_active(self):
try:
if "spreadsheet_active" not in self.proxy.view.list():
self.proxy.view.add("", "spreadsheet_active")
self.proxy.view.filter(
"", "spreadsheet_active", "or={d.up.rate=,d.down.rate=}"
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
return self._fetch_list_result("spreadsheet_active")
def start(self, infohash):
try:
self.proxy.d.start(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def stop(self, infohash):
try:
self.proxy.d.stop(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def test_connection(self):
try:
return self.proxy.system.pid() is not None
except (XMLRPCError, ConnectionError, OSError, ExpatError):
return False
def add(
self,
torrent,
destination_path,
fast_resume=False,
add_name_to_folder=True,
minimum_expected_data="none",
stopped=False,
):
current_expected_data = calculate_minimum_expected_data(
torrent, destination_path, add_name_to_folder
)
if not has_minimum_expected_data(minimum_expected_data, current_expected_data):
raise FailedToExecuteException(
f"Minimum expected data not reached, wanted {minimum_expected_data} actual {current_expected_data}"
)
destination_path = destination_path.resolve()
if fast_resume:
logger.info("Adding fast resume data")
psize = torrent[b"info"][b"piece length"]
pieces = len(torrent[b"info"][b"pieces"]) // 20
bitfield = [True] * pieces
torrent[b"libtorrent_resume"] = {b"files": []}
files = map_existing_files(torrent, destination_path)
current_position = 0
for fp, f, size, exists in files:
logger.debug(f"Handling file {fp!r}")
result = {b"priority": 1, b"completed": int(exists)}
if exists:
result[b"mtime"] = int(fp.stat().st_mtime)
torrent[b"libtorrent_resume"][b"files"].append(result)
last_position = current_position + size
first_piece = current_position // psize
last_piece = (last_position + psize - 1) // psize
for piece in range(first_piece, last_piece):
logger.debug(f"Setting piece {piece} to {exists}")
bitfield[piece] *= exists
current_position = last_position
if all(bitfield):
logger.info("This torrent is complete, setting bitfield to chunk count")
torrent[b"libtorrent_resume"][
b"bitfield"
] = pieces # rtorrent wants the number of pieces when torrent is complete
else:
logger.info("This torrent is incomplete, setting bitfield")
torrent[b"libtorrent_resume"][b"bitfield"] = bitfield_to_string(
bitfield
)
encoded_torrent = bencode(torrent)
cmd = [encoded_torrent]
if add_name_to_folder:
cmd.append(f'd.directory.set="{destination_path!s}"')
else:
cmd.append(f'd.directory_base.set="{destination_path!s}"')
logger.info(f"Sending to rtorrent: {cmd!r}")
try: # TODO: use torrent_temp_path if payload is too big
if stopped:
self.proxy.load.raw("", *cmd)
else:
self.proxy.load.raw_start("", *cmd)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def remove(self, infohash):
try:
self.proxy.d.erase(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def retrieve_torrentfile(self, infohash):
if not self.session_path:
raise FailedToExecuteException("Session path is not configured")
torrent_path = self.session_path / f"{infohash.upper()}.torrent"
if not torrent_path.is_file():
raise FailedToExecuteException("Torrent file does not exist")
return torrent_path.read_bytes()
def get_download_path(self, infohash):
try:
return Path(self.proxy.d.directory(infohash))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve download path")
def get_files(self, infohash):
result = []
try:
files = self.proxy.f.multicall(
infohash,
"",
"f.path=",
"f.size_bytes=",
"f.completed_chunks=",
"f.size_chunks=",
)
for f in files:
path, size, completed_chunks, size_chunks = f
if completed_chunks > size_chunks:
completed_chunks = size_chunks
if size_chunks == 0:
progress = 0.0
else:
progress = (completed_chunks / size_chunks) * 100
result.append(TorrentFile(path, size, progress))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve files")
return result
def | (self):
url = f"{self.identifier}+{self.url}"
query = {}
if self.session_path:
query["session_path"] = str(self.session_path)
if query:
url += f"?{urlencode(query)}"
return url
@classmethod
def auto_configure(cls, path="~/.rtorrent.rc"):
# Does not work with latest rtorrent config
config_path = Path(path).expanduser()
if not config_path.is_file():
raise FailedToExecuteException("Unable to find config file")
try:
config_data = config_path.read_text()
except PermissionError:
raise FailedToExecuteException("Config file not accessible")
scgi_info = re.findall(
r"^\s*scgi_(port|local)\s*=\s*(.+)\s*$", str(config_data), re.MULTILINE
)
if not scgi_info:
raise FailedToExecuteException("No scgi info found in configuration file")
scgi_method, scgi_url = scgi_info[0]
if scgi_method == "port":
scgi_url = scgi_url.strip()
else:
scgi_url = Path(scgi_url.strip()).expanduser().resolve()
client = cls(f"scgi://{scgi_url}")
session_path = Path(client.proxy.session.path()).resolve()
if session_path.is_dir():
client.session_path = session_path
return client
| serialize_configuration | identifier_name |
rtorrent.py | import logging
import re
from datetime import datetime
from pathlib import Path
from urllib.parse import urlencode, urlsplit
from xml.parsers.expat import ExpatError
from xmlrpc.client import Error as XMLRPCError
from xmlrpc.client import ServerProxy
import pytz
from ..baseclient import BaseClient
from ..bencode import bencode
from ..exceptions import FailedToExecuteException
from ..scgitransport import SCGITransport
from ..torrent import TorrentData, TorrentFile, TorrentState
from ..utils import (
calculate_minimum_expected_data,
has_minimum_expected_data,
map_existing_files,
)
logger = logging.getLogger(__name__)
def create_proxy(url):
parsed = urlsplit(url)
proto = url.split(":")[0].lower()
if proto == "scgi": | return ServerProxy(url, transport=SCGITransport())
else:
path = parsed.path
logger.debug(f"Creating SCGI XMLRPC Socket Proxy with socket file {path}")
return ServerProxy("http://1", transport=SCGITransport(socket_path=path))
else:
logger.debug(f"Creating Normal XMLRPC Proxy with url {url}")
return ServerProxy(url)
def bitfield_to_string(bitfield):
"""
Converts a list of booleans into a bitfield
"""
retval = bytearray((len(bitfield) + 7) // 8)
for piece, bit in enumerate(bitfield):
if bit:
retval[piece // 8] |= 1 << (7 - piece % 8)
return bytes(retval)
class RTorrentClient(BaseClient):
identifier = "rtorrent"
display_name = "rtorrent"
_methods = None
def __init__(self, url, session_path=None, torrent_temp_path=None):
self.url = url
self.proxy = create_proxy(url)
self.session_path = session_path and Path(session_path)
self.torrent_temp_path = torrent_temp_path and Path(torrent_temp_path)
def _fetch_list_result(self, view):
result = []
try:
torrents = self.proxy.d.multicall2(
"",
view,
"d.hash=",
"d.name=",
"d.is_active=",
"d.message=",
"d.size_bytes=",
"d.completed_bytes=",
"d.up.total=",
"d.up.rate=",
"d.down.rate=",
"d.timestamp.finished=",
"t.multicall=,t.url=",
"d.custom1=",
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
for torrent in torrents:
if torrent[3]:
state = TorrentState.ERROR
elif torrent[2] == 0:
state = TorrentState.STOPPED
else:
state = TorrentState.ACTIVE
progress = (torrent[5] / torrent[4]) * 100
if torrent[10]:
tracker = ".".join(torrent[10][0][0].split("/")[2].rsplit(".", 2)[1:])
else:
tracker = "None"
result.append(
TorrentData(
torrent[0].lower(),
torrent[1],
torrent[4],
state,
progress,
torrent[6],
datetime.utcfromtimestamp(torrent[9]).astimezone(pytz.UTC),
tracker,
torrent[7],
torrent[8],
torrent[11],
)
)
return result
def get_methods(self):
if self._methods is None:
self._methods = self.proxy.system.listMethods()
return self._methods
def list(self):
return self._fetch_list_result("main")
def list_active(self):
try:
if "spreadsheet_active" not in self.proxy.view.list():
self.proxy.view.add("", "spreadsheet_active")
self.proxy.view.filter(
"", "spreadsheet_active", "or={d.up.rate=,d.down.rate=}"
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
return self._fetch_list_result("spreadsheet_active")
def start(self, infohash):
try:
self.proxy.d.start(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def stop(self, infohash):
try:
self.proxy.d.stop(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def test_connection(self):
try:
return self.proxy.system.pid() is not None
except (XMLRPCError, ConnectionError, OSError, ExpatError):
return False
def add(
self,
torrent,
destination_path,
fast_resume=False,
add_name_to_folder=True,
minimum_expected_data="none",
stopped=False,
):
current_expected_data = calculate_minimum_expected_data(
torrent, destination_path, add_name_to_folder
)
if not has_minimum_expected_data(minimum_expected_data, current_expected_data):
raise FailedToExecuteException(
f"Minimum expected data not reached, wanted {minimum_expected_data} actual {current_expected_data}"
)
destination_path = destination_path.resolve()
if fast_resume:
logger.info("Adding fast resume data")
psize = torrent[b"info"][b"piece length"]
pieces = len(torrent[b"info"][b"pieces"]) // 20
bitfield = [True] * pieces
torrent[b"libtorrent_resume"] = {b"files": []}
files = map_existing_files(torrent, destination_path)
current_position = 0
for fp, f, size, exists in files:
logger.debug(f"Handling file {fp!r}")
result = {b"priority": 1, b"completed": int(exists)}
if exists:
result[b"mtime"] = int(fp.stat().st_mtime)
torrent[b"libtorrent_resume"][b"files"].append(result)
last_position = current_position + size
first_piece = current_position // psize
last_piece = (last_position + psize - 1) // psize
for piece in range(first_piece, last_piece):
logger.debug(f"Setting piece {piece} to {exists}")
bitfield[piece] *= exists
current_position = last_position
if all(bitfield):
logger.info("This torrent is complete, setting bitfield to chunk count")
torrent[b"libtorrent_resume"][
b"bitfield"
] = pieces # rtorrent wants the number of pieces when torrent is complete
else:
logger.info("This torrent is incomplete, setting bitfield")
torrent[b"libtorrent_resume"][b"bitfield"] = bitfield_to_string(
bitfield
)
encoded_torrent = bencode(torrent)
cmd = [encoded_torrent]
if add_name_to_folder:
cmd.append(f'd.directory.set="{destination_path!s}"')
else:
cmd.append(f'd.directory_base.set="{destination_path!s}"')
logger.info(f"Sending to rtorrent: {cmd!r}")
try: # TODO: use torrent_temp_path if payload is too big
if stopped:
self.proxy.load.raw("", *cmd)
else:
self.proxy.load.raw_start("", *cmd)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def remove(self, infohash):
try:
self.proxy.d.erase(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def retrieve_torrentfile(self, infohash):
if not self.session_path:
raise FailedToExecuteException("Session path is not configured")
torrent_path = self.session_path / f"{infohash.upper()}.torrent"
if not torrent_path.is_file():
raise FailedToExecuteException("Torrent file does not exist")
return torrent_path.read_bytes()
def get_download_path(self, infohash):
try:
return Path(self.proxy.d.directory(infohash))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve download path")
def get_files(self, infohash):
result = []
try:
files = self.proxy.f.multicall(
infohash,
"",
"f.path=",
"f.size_bytes=",
"f.completed_chunks=",
"f.size_chunks=",
)
for f in files:
path, size, completed_chunks, size_chunks = f
if completed_chunks > size_chunks:
completed_chunks = size_chunks
if size_chunks == 0:
progress = 0.0
else:
progress = (completed_chunks / size_chunks) * 100
result.append(TorrentFile(path, size, progress))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve files")
return result
def serialize_configuration(self):
url = f"{self.identifier}+{self.url}"
query = {}
if self.session_path:
query["session_path"] = str(self.session_path)
if query:
url += f"?{urlencode(query)}"
return url
@classmethod
def auto_configure(cls, path="~/.rtorrent.rc"):
# Does not work with latest rtorrent config
config_path = Path(path).expanduser()
if not config_path.is_file():
raise FailedToExecuteException("Unable to find config file")
try:
config_data = config_path.read_text()
except PermissionError:
raise FailedToExecuteException("Config file not accessible")
scgi_info = re.findall(
r"^\s*scgi_(port|local)\s*=\s*(.+)\s*$", str(config_data), re.MULTILINE
)
if not scgi_info:
raise FailedToExecuteException("No scgi info found in configuration file")
scgi_method, scgi_url = scgi_info[0]
if scgi_method == "port":
scgi_url = scgi_url.strip()
else:
scgi_url = Path(scgi_url.strip()).expanduser().resolve()
client = cls(f"scgi://{scgi_url}")
session_path = Path(client.proxy.session.path()).resolve()
if session_path.is_dir():
client.session_path = session_path
return client | if parsed.netloc:
url = f"http://{parsed.netloc}"
logger.debug(f"Creating SCGI XMLRPC Proxy with url {url}") | random_line_split |
rtorrent.py | import logging
import re
from datetime import datetime
from pathlib import Path
from urllib.parse import urlencode, urlsplit
from xml.parsers.expat import ExpatError
from xmlrpc.client import Error as XMLRPCError
from xmlrpc.client import ServerProxy
import pytz
from ..baseclient import BaseClient
from ..bencode import bencode
from ..exceptions import FailedToExecuteException
from ..scgitransport import SCGITransport
from ..torrent import TorrentData, TorrentFile, TorrentState
from ..utils import (
calculate_minimum_expected_data,
has_minimum_expected_data,
map_existing_files,
)
logger = logging.getLogger(__name__)
def create_proxy(url):
|
def bitfield_to_string(bitfield):
"""
Converts a list of booleans into a bitfield
"""
retval = bytearray((len(bitfield) + 7) // 8)
for piece, bit in enumerate(bitfield):
if bit:
retval[piece // 8] |= 1 << (7 - piece % 8)
return bytes(retval)
class RTorrentClient(BaseClient):
identifier = "rtorrent"
display_name = "rtorrent"
_methods = None
def __init__(self, url, session_path=None, torrent_temp_path=None):
self.url = url
self.proxy = create_proxy(url)
self.session_path = session_path and Path(session_path)
self.torrent_temp_path = torrent_temp_path and Path(torrent_temp_path)
def _fetch_list_result(self, view):
result = []
try:
torrents = self.proxy.d.multicall2(
"",
view,
"d.hash=",
"d.name=",
"d.is_active=",
"d.message=",
"d.size_bytes=",
"d.completed_bytes=",
"d.up.total=",
"d.up.rate=",
"d.down.rate=",
"d.timestamp.finished=",
"t.multicall=,t.url=",
"d.custom1=",
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
for torrent in torrents:
if torrent[3]:
state = TorrentState.ERROR
elif torrent[2] == 0:
state = TorrentState.STOPPED
else:
state = TorrentState.ACTIVE
progress = (torrent[5] / torrent[4]) * 100
if torrent[10]:
tracker = ".".join(torrent[10][0][0].split("/")[2].rsplit(".", 2)[1:])
else:
tracker = "None"
result.append(
TorrentData(
torrent[0].lower(),
torrent[1],
torrent[4],
state,
progress,
torrent[6],
datetime.utcfromtimestamp(torrent[9]).astimezone(pytz.UTC),
tracker,
torrent[7],
torrent[8],
torrent[11],
)
)
return result
def get_methods(self):
if self._methods is None:
self._methods = self.proxy.system.listMethods()
return self._methods
def list(self):
return self._fetch_list_result("main")
def list_active(self):
try:
if "spreadsheet_active" not in self.proxy.view.list():
self.proxy.view.add("", "spreadsheet_active")
self.proxy.view.filter(
"", "spreadsheet_active", "or={d.up.rate=,d.down.rate=}"
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
return self._fetch_list_result("spreadsheet_active")
def start(self, infohash):
try:
self.proxy.d.start(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def stop(self, infohash):
try:
self.proxy.d.stop(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def test_connection(self):
try:
return self.proxy.system.pid() is not None
except (XMLRPCError, ConnectionError, OSError, ExpatError):
return False
def add(
self,
torrent,
destination_path,
fast_resume=False,
add_name_to_folder=True,
minimum_expected_data="none",
stopped=False,
):
current_expected_data = calculate_minimum_expected_data(
torrent, destination_path, add_name_to_folder
)
if not has_minimum_expected_data(minimum_expected_data, current_expected_data):
raise FailedToExecuteException(
f"Minimum expected data not reached, wanted {minimum_expected_data} actual {current_expected_data}"
)
destination_path = destination_path.resolve()
if fast_resume:
logger.info("Adding fast resume data")
psize = torrent[b"info"][b"piece length"]
pieces = len(torrent[b"info"][b"pieces"]) // 20
bitfield = [True] * pieces
torrent[b"libtorrent_resume"] = {b"files": []}
files = map_existing_files(torrent, destination_path)
current_position = 0
for fp, f, size, exists in files:
logger.debug(f"Handling file {fp!r}")
result = {b"priority": 1, b"completed": int(exists)}
if exists:
result[b"mtime"] = int(fp.stat().st_mtime)
torrent[b"libtorrent_resume"][b"files"].append(result)
last_position = current_position + size
first_piece = current_position // psize
last_piece = (last_position + psize - 1) // psize
for piece in range(first_piece, last_piece):
logger.debug(f"Setting piece {piece} to {exists}")
bitfield[piece] *= exists
current_position = last_position
if all(bitfield):
logger.info("This torrent is complete, setting bitfield to chunk count")
torrent[b"libtorrent_resume"][
b"bitfield"
] = pieces # rtorrent wants the number of pieces when torrent is complete
else:
logger.info("This torrent is incomplete, setting bitfield")
torrent[b"libtorrent_resume"][b"bitfield"] = bitfield_to_string(
bitfield
)
encoded_torrent = bencode(torrent)
cmd = [encoded_torrent]
if add_name_to_folder:
cmd.append(f'd.directory.set="{destination_path!s}"')
else:
cmd.append(f'd.directory_base.set="{destination_path!s}"')
logger.info(f"Sending to rtorrent: {cmd!r}")
try: # TODO: use torrent_temp_path if payload is too big
if stopped:
self.proxy.load.raw("", *cmd)
else:
self.proxy.load.raw_start("", *cmd)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def remove(self, infohash):
try:
self.proxy.d.erase(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def retrieve_torrentfile(self, infohash):
if not self.session_path:
raise FailedToExecuteException("Session path is not configured")
torrent_path = self.session_path / f"{infohash.upper()}.torrent"
if not torrent_path.is_file():
raise FailedToExecuteException("Torrent file does not exist")
return torrent_path.read_bytes()
def get_download_path(self, infohash):
try:
return Path(self.proxy.d.directory(infohash))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve download path")
def get_files(self, infohash):
result = []
try:
files = self.proxy.f.multicall(
infohash,
"",
"f.path=",
"f.size_bytes=",
"f.completed_chunks=",
"f.size_chunks=",
)
for f in files:
path, size, completed_chunks, size_chunks = f
if completed_chunks > size_chunks:
completed_chunks = size_chunks
if size_chunks == 0:
progress = 0.0
else:
progress = (completed_chunks / size_chunks) * 100
result.append(TorrentFile(path, size, progress))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve files")
return result
def serialize_configuration(self):
url = f"{self.identifier}+{self.url}"
query = {}
if self.session_path:
query["session_path"] = str(self.session_path)
if query:
url += f"?{urlencode(query)}"
return url
@classmethod
def auto_configure(cls, path="~/.rtorrent.rc"):
# Does not work with latest rtorrent config
config_path = Path(path).expanduser()
if not config_path.is_file():
raise FailedToExecuteException("Unable to find config file")
try:
config_data = config_path.read_text()
except PermissionError:
raise FailedToExecuteException("Config file not accessible")
scgi_info = re.findall(
r"^\s*scgi_(port|local)\s*=\s*(.+)\s*$", str(config_data), re.MULTILINE
)
if not scgi_info:
raise FailedToExecuteException("No scgi info found in configuration file")
scgi_method, scgi_url = scgi_info[0]
if scgi_method == "port":
scgi_url = scgi_url.strip()
else:
scgi_url = Path(scgi_url.strip()).expanduser().resolve()
client = cls(f"scgi://{scgi_url}")
session_path = Path(client.proxy.session.path()).resolve()
if session_path.is_dir():
client.session_path = session_path
return client
| parsed = urlsplit(url)
proto = url.split(":")[0].lower()
if proto == "scgi":
if parsed.netloc:
url = f"http://{parsed.netloc}"
logger.debug(f"Creating SCGI XMLRPC Proxy with url {url}")
return ServerProxy(url, transport=SCGITransport())
else:
path = parsed.path
logger.debug(f"Creating SCGI XMLRPC Socket Proxy with socket file {path}")
return ServerProxy("http://1", transport=SCGITransport(socket_path=path))
else:
logger.debug(f"Creating Normal XMLRPC Proxy with url {url}")
return ServerProxy(url) | identifier_body |
server.rs | //! A generic MIO server.
use error::{MioResult, MioError};
use handler::Handler;
use io::{Ready, IoHandle, IoReader, IoWriter, IoAcceptor};
use iobuf::{Iobuf, RWIobuf};
use reactor::Reactor;
use socket::{TcpSocket, TcpAcceptor, SockAddr};
use std::cell::RefCell;
use std::collections::{Deque, RingBuf};
use std::rc::Rc;
// TODO(cgaebel): There's currently no way to kill a server waiting on an
// `accept`.
static READBUF_SIZE: uint = 4096;
// The number of sends that we queue up before pushing back on the client.
static MAX_OUTSTANDING_SENDS: uint = 1;
pub trait PerClient<St> {
fn on_start(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
fn on_read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE {
return RWIobuf::new(capacity);
}
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn checked_tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len != 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if !in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if !self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor, | global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
// connections.
_ => return Ok(()),
};
debug!("spawning server.");
let fd = socket.desc().fd;
let per_client = (self.on_accept)(reactor);
let handler = Connection::new(socket, self.global.clone(), per_client);
try!(reactor.register(fd, handler));
debug!("done accept.");
Ok(())
}
fn writable(&mut self, _reactor: &mut Reactor) -> MioResult<()> {
warn!("Accepting socket got a `writable` notification. How odd. Ignoring.");
Ok(())
}
}
// TODO(cgaebel): The connection factory `F` should take the reactor, but
// doesn't because I have no idea how to pass a &mut to an unboxed closure.
pub fn gen_tcp_server<St, C: PerClient<St>>(
reactor: &mut Reactor,
listen_on: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
backlog: uint,
shared_state: St,
on_accept: fn(reactor: &mut Reactor) -> C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let accept_socket: TcpSocket = try!(TcpSocket::v4());
tweak_sock_opts(&accept_socket);
let acceptor: TcpAcceptor = try!(accept_socket.bind(listen_on));
let global = Rc::new(Global::new(shared_state));
let mut on_accept = Some(on_accept);
reactor.listen(acceptor, backlog, |socket| {
AcceptHandler::new(socket, global.clone(), on_accept.take().unwrap())
})
}
pub fn gen_tcp_client<C: PerClient<()>>(
reactor: &mut Reactor,
connect_to: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
client: C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let socket: TcpSocket = try!(TcpSocket::v4());
let mut client = Some(client);
let global = Rc::new(Global::new(()));
reactor.connect(socket, connect_to, |socket| {
tweak_sock_opts(&socket);
Connection::new(socket, global.clone(), client.take().unwrap())
})
} | random_line_split | |
server.rs | //! A generic MIO server.
use error::{MioResult, MioError};
use handler::Handler;
use io::{Ready, IoHandle, IoReader, IoWriter, IoAcceptor};
use iobuf::{Iobuf, RWIobuf};
use reactor::Reactor;
use socket::{TcpSocket, TcpAcceptor, SockAddr};
use std::cell::RefCell;
use std::collections::{Deque, RingBuf};
use std::rc::Rc;
// TODO(cgaebel): There's currently no way to kill a server waiting on an
// `accept`.
static READBUF_SIZE: uint = 4096;
// The number of sends that we queue up before pushing back on the client.
static MAX_OUTSTANDING_SENDS: uint = 1;
pub trait PerClient<St> {
fn on_start(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
fn on_read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE {
return RWIobuf::new(capacity);
}
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn checked_tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len != 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if !in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if !self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
// connections.
_ => return Ok(()),
};
debug!("spawning server.");
let fd = socket.desc().fd;
let per_client = (self.on_accept)(reactor);
let handler = Connection::new(socket, self.global.clone(), per_client);
try!(reactor.register(fd, handler));
debug!("done accept.");
Ok(())
}
fn writable(&mut self, _reactor: &mut Reactor) -> MioResult<()> {
warn!("Accepting socket got a `writable` notification. How odd. Ignoring.");
Ok(())
}
}
// TODO(cgaebel): The connection factory `F` should take the reactor, but
// doesn't because I have no idea how to pass a &mut to an unboxed closure.
pub fn gen_tcp_server<St, C: PerClient<St>>(
reactor: &mut Reactor,
listen_on: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
backlog: uint,
shared_state: St,
on_accept: fn(reactor: &mut Reactor) -> C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let accept_socket: TcpSocket = try!(TcpSocket::v4());
tweak_sock_opts(&accept_socket);
let acceptor: TcpAcceptor = try!(accept_socket.bind(listen_on));
let global = Rc::new(Global::new(shared_state));
let mut on_accept = Some(on_accept);
reactor.listen(acceptor, backlog, |socket| {
AcceptHandler::new(socket, global.clone(), on_accept.take().unwrap())
})
}
pub fn gen_tcp_client<C: PerClient<()>>(
reactor: &mut Reactor,
connect_to: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
client: C)
-> MioResult<()> | {
// TODO(cgaebel): ipv6? udp?
let socket: TcpSocket = try!(TcpSocket::v4());
let mut client = Some(client);
let global = Rc::new(Global::new(()));
reactor.connect(socket, connect_to, |socket| {
tweak_sock_opts(&socket);
Connection::new(socket, global.clone(), client.take().unwrap())
})
} | identifier_body | |
server.rs | //! A generic MIO server.
use error::{MioResult, MioError};
use handler::Handler;
use io::{Ready, IoHandle, IoReader, IoWriter, IoAcceptor};
use iobuf::{Iobuf, RWIobuf};
use reactor::Reactor;
use socket::{TcpSocket, TcpAcceptor, SockAddr};
use std::cell::RefCell;
use std::collections::{Deque, RingBuf};
use std::rc::Rc;
// TODO(cgaebel): There's currently no way to kill a server waiting on an
// `accept`.
static READBUF_SIZE: uint = 4096;
// The number of sends that we queue up before pushing back on the client.
static MAX_OUTSTANDING_SENDS: uint = 1;
pub trait PerClient<St> {
fn on_start(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
fn on_read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE |
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn checked_tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len != 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if !in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if !self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
// connections.
_ => return Ok(()),
};
debug!("spawning server.");
let fd = socket.desc().fd;
let per_client = (self.on_accept)(reactor);
let handler = Connection::new(socket, self.global.clone(), per_client);
try!(reactor.register(fd, handler));
debug!("done accept.");
Ok(())
}
fn writable(&mut self, _reactor: &mut Reactor) -> MioResult<()> {
warn!("Accepting socket got a `writable` notification. How odd. Ignoring.");
Ok(())
}
}
// TODO(cgaebel): The connection factory `F` should take the reactor, but
// doesn't because I have no idea how to pass a &mut to an unboxed closure.
pub fn gen_tcp_server<St, C: PerClient<St>>(
reactor: &mut Reactor,
listen_on: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
backlog: uint,
shared_state: St,
on_accept: fn(reactor: &mut Reactor) -> C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let accept_socket: TcpSocket = try!(TcpSocket::v4());
tweak_sock_opts(&accept_socket);
let acceptor: TcpAcceptor = try!(accept_socket.bind(listen_on));
let global = Rc::new(Global::new(shared_state));
let mut on_accept = Some(on_accept);
reactor.listen(acceptor, backlog, |socket| {
AcceptHandler::new(socket, global.clone(), on_accept.take().unwrap())
})
}
pub fn gen_tcp_client<C: PerClient<()>>(
reactor: &mut Reactor,
connect_to: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
client: C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let socket: TcpSocket = try!(TcpSocket::v4());
let mut client = Some(client);
let global = Rc::new(Global::new(()));
reactor.connect(socket, connect_to, |socket| {
tweak_sock_opts(&socket);
Connection::new(socket, global.clone(), client.take().unwrap())
})
}
| {
return RWIobuf::new(capacity);
} | conditional_block |
server.rs | //! A generic MIO server.
use error::{MioResult, MioError};
use handler::Handler;
use io::{Ready, IoHandle, IoReader, IoWriter, IoAcceptor};
use iobuf::{Iobuf, RWIobuf};
use reactor::Reactor;
use socket::{TcpSocket, TcpAcceptor, SockAddr};
use std::cell::RefCell;
use std::collections::{Deque, RingBuf};
use std::rc::Rc;
// TODO(cgaebel): There's currently no way to kill a server waiting on an
// `accept`.
static READBUF_SIZE: uint = 4096;
// The number of sends that we queue up before pushing back on the client.
static MAX_OUTSTANDING_SENDS: uint = 1;
pub trait PerClient<St> {
fn on_start(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
fn on_read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE {
return RWIobuf::new(capacity);
}
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn | (&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len != 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if !in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if !self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
// connections.
_ => return Ok(()),
};
debug!("spawning server.");
let fd = socket.desc().fd;
let per_client = (self.on_accept)(reactor);
let handler = Connection::new(socket, self.global.clone(), per_client);
try!(reactor.register(fd, handler));
debug!("done accept.");
Ok(())
}
fn writable(&mut self, _reactor: &mut Reactor) -> MioResult<()> {
warn!("Accepting socket got a `writable` notification. How odd. Ignoring.");
Ok(())
}
}
// TODO(cgaebel): The connection factory `F` should take the reactor, but
// doesn't because I have no idea how to pass a &mut to an unboxed closure.
pub fn gen_tcp_server<St, C: PerClient<St>>(
reactor: &mut Reactor,
listen_on: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
backlog: uint,
shared_state: St,
on_accept: fn(reactor: &mut Reactor) -> C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let accept_socket: TcpSocket = try!(TcpSocket::v4());
tweak_sock_opts(&accept_socket);
let acceptor: TcpAcceptor = try!(accept_socket.bind(listen_on));
let global = Rc::new(Global::new(shared_state));
let mut on_accept = Some(on_accept);
reactor.listen(acceptor, backlog, |socket| {
AcceptHandler::new(socket, global.clone(), on_accept.take().unwrap())
})
}
pub fn gen_tcp_client<C: PerClient<()>>(
reactor: &mut Reactor,
connect_to: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
client: C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let socket: TcpSocket = try!(TcpSocket::v4());
let mut client = Some(client);
let global = Rc::new(Global::new(()));
reactor.connect(socket, connect_to, |socket| {
tweak_sock_opts(&socket);
Connection::new(socket, global.clone(), client.take().unwrap())
})
}
| checked_tick | identifier_name |
main.go | package main
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"html/template"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"regexp"
"sort"
"strings"
"time"
"github.com/getsentry/sentry-go"
sentrygin "github.com/getsentry/sentry-go/gin"
"github.com/gin-gonic/gin"
redis "github.com/go-redis/redis/v7"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/russross/blackfriday"
)
const (
zsetKey = "blogtopn"
)
var (
filenameRegex = regexp.MustCompile(`(\d{4}_\d{2}_\d{2})-.+\..+`)
articles = LoadMDs("articles")
db *sqlx.DB
redisClient *redis.Client
categoryMap = map[string]string{
"golang": "Golang简明教程",
"python": "Python教程",
"data_structure": "数据结构在实际项目中的使用",
}
// ErrNotFound means article not found
ErrNotFound = errors.New("Article Not Found")
// ErrFailedToLoad failed to load article
ErrFailedToLoad = errors.New("Failed To Load Article")
// Prometheus
totalRequests = promauto.NewCounter(prometheus.CounterOpts{Name: "total_requests_total"})
)
// InitSentry 初始化sentry
func InitSentry() error {
return sentry.Init(sentry.ClientOptions{
Dsn: os.Getenv("SENTRY_DSN"),
// Specify a fixed sample rate:
TracesSampleRate: 1.0,
})
}
// InitializeDB 初始化数据库连接
func InitializeDB() {
var err error
db, err = sqlx.Connect("my | env("SQLX_URL"))
if err != nil {
log.Fatalf("failed to connect to the db: %s", err)
}
}
// InitializeRedis 初始化Redis
func InitializeRedis() {
opt, err := redis.ParseURL(os.Getenv("REDIS_URL"))
if err != nil {
log.Fatalf("failed to connect to redis db: %s", err)
}
// Create client as usually.
redisClient = redis.NewClient(opt)
}
// Article 就是文章
type Article struct {
Title string `json:"title"`
Date string `json:"date_str"`
Filename string `json:"file_name"`
DirName string `json:"dir_name"`
PubDate time.Time `json:"-"`
Description string `json:"description"`
}
// Articles 文章列表
type Articles []Article
func (a Articles) Len() int { return len(a) }
func (a Articles) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a Articles) Less(i, j int) bool {
v := strings.Compare(a[i].Date, a[j].Date)
if v <= 0 {
return true
}
return false
}
// RandomN return n articles by random
func (a Articles) RandomN(n int) Articles {
if n <= 0 {
return nil
}
length := len(a)
pos := rand.Intn(length - n)
return a[pos : pos+n]
}
func isBlogApp(c *gin.Context) bool {
ua := c.GetHeader("User-Agent")
if strings.HasPrefix(ua, "BlogApp/") {
return true
}
return false
}
func getFilePath(path string) string {
suffix := ".html"
if strings.HasSuffix(path, suffix) {
path = path[:len(path)-len(suffix)]
}
return "./" + path
}
// ReadDesc 把简介读出来
func ReadDesc(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
reader := bufio.NewReader(file)
reader.ReadLine() // 忽略第一行(标题)
reader.ReadLine() // 忽略第二行(空行)
desc := ""
for i := 0; i < 3; i++ {
line, _, err := reader.ReadLine()
if err != nil && err != io.EOF {
log.Printf("failed to read desc of file(%s): %s", path, err)
continue
}
desc += string(line)
}
trimChars := "\n,。:,.:"
return strings.TrimRight(strings.TrimLeft(desc, trimChars), trimChars) + "..."
}
// ReadTitle 把标题读出来
func ReadTitle(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
line, _, err := bufio.NewReader(file).ReadLine()
if err != nil {
log.Printf("failed to read title of file(%s): %s", path, err)
return ""
}
title := strings.Replace(string(line), "# ", "", -1)
return title
}
// VisitedArticle is for remember which article had been visited
type VisitedArticle struct {
URLPath string `json:"url_path"`
Title string `json:"title"`
}
func genVisited(urlPath, subTitle string) (string, error) {
title := ReadTitle(urlPath)
if title == "" {
return "", ErrNotFound
}
if subTitle != "" {
title += " - " + subTitle
}
visited := VisitedArticle{URLPath: urlPath, Title: title}
b, err := json.Marshal(visited)
if err != nil {
return "", ErrFailedToLoad
}
return string(b), nil
}
func getTopVisited(n int) []VisitedArticle {
visitedArticles := []VisitedArticle{}
articles, err := redisClient.ZRevRangeByScore(zsetKey, &redis.ZRangeBy{
Min: "-inf", Max: "+inf", Offset: 0, Count: int64(n),
}).Result()
if err != nil {
log.Printf("failed to get top %d visited articles: %s", n, err)
return nil
}
for _, article := range articles {
var va VisitedArticle
if err := json.Unmarshal([]byte(article), &va); err != nil {
log.Printf("failed to unmarshal article: %s", err)
continue
}
visitedArticles = append(visitedArticles, va)
}
return visitedArticles
}
// LoadArticle 把文章的元信息读出来
func LoadArticle(dirname, filename string) *Article {
match := filenameRegex.FindStringSubmatch(filename)
if len(match) != 2 {
return nil
}
dateString := strings.Replace(match[1], "_", "-", -1)
filepath := fmt.Sprintf("./%s/%s", dirname, filename)
title := ReadTitle(filepath)
pubDate, err := time.Parse("2006-01-02", dateString)
if err != nil {
log.Panicf("failed to parse date: %s", err)
}
desc := ReadDesc(filepath)
return &Article{
Title: title,
Date: dateString,
Filename: filename,
DirName: dirname,
PubDate: pubDate,
Description: desc,
}
}
// LoadMDs 读取给定目录中的所有markdown文章
func LoadMDs(dirname string) Articles {
files, err := ioutil.ReadDir(dirname)
if err != nil {
log.Fatalf("failed to read dir(%s): %s", dirname, err)
return nil
}
var articles Articles
for _, file := range files {
filename := file.Name()
if article := LoadArticle(dirname, filename); article != nil {
articles = append(articles, *article)
}
}
sort.Sort(sort.Reverse(articles))
return articles
}
// IndexHandler 首页
func IndexHandler(c *gin.Context) {
topArticles := getTopVisited(15)
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles[:100],
"totalCount": len(articles),
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
"topArticles": topArticles,
},
)
}
// ArchiveHandler 全部文章
func ArchiveHandler(c *gin.Context) {
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles,
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
},
)
}
func renderArticle(c *gin.Context, status int, path string, subtitle string, randomN int) {
path = getFilePath(path)
content, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("failed to read file %s: %s", path, err)
c.Redirect(http.StatusFound, "/404")
return
}
content = blackfriday.MarkdownCommon(content)
recommends := articles.RandomN(randomN)
topArticles := getTopVisited(15)
c.HTML(
status, "article.html", gin.H{
"isBlogApp": isBlogApp(c),
"content": template.HTML(content),
"title": ReadTitle(path),
"subtitle": subtitle,
"recommends": recommends,
"topArticles": topArticles,
},
)
}
func incrVisited(urlPath, subTitle string) {
if visited, err := genVisited(urlPath, subTitle); err != nil {
log.Printf("failed to gen visited: %s", err)
} else {
if _, err := redisClient.ZIncrBy(zsetKey, 1, visited).Result(); err != nil {
log.Printf("failed to incr score of %s: %s", urlPath, err)
}
}
}
// PingPongHandler ping pong
func PingPongHandler(c *gin.Context) {
c.JSON(http.StatusOK, nil)
}
// ArticleHandler 具体文章
func ArticleHandler(c *gin.Context) {
urlPath := c.Request.URL.Path
incrVisited(urlPath, "")
renderArticle(c, http.StatusOK, urlPath, "", 15)
}
// TutorialPageHandler 教程index
func TutorialPageHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/tutorial.md", "", 0)
}
// AboutMeHandler 关于我
func AboutMeHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/aboutme.md", "", 0)
}
// FriendsHandler 友链
func FriendsHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/friends.md", "", 0)
}
// AppHandler App页面
func AppHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/app.md", "", 0)
}
// NotFoundHandler 404
func NotFoundHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/404.md", "", 20)
}
// AllSharingHandler 所有分享
func AllSharingHandler(c *gin.Context) {
sharing := dao.GetAllSharing()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
},
)
}
// SharingHandler 分享
func SharingHandler(c *gin.Context) {
sharing := dao.GetSharingWithLimit(20)
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
"partly": true,
},
)
}
// NotesHandler 随想
func NotesHandler(c *gin.Context) {
notes := dao.GetAllNotes()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"notes": notes,
},
)
}
// RSSHandler RSS
func RSSHandler(c *gin.Context) {
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "rss.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"articles": articles,
},
)
}
// SharingRSSHandler RSS for sharing channel
func SharingRSSHandler(c *gin.Context) {
sharings := dao.GetAllSharing()
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "sharing_rss.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"sharings": sharings,
},
)
}
// SiteMapHandler sitemap
func SiteMapHandler(c *gin.Context) {
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "sitemap.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"articles": articles,
},
)
}
// TutorialHandler 教程
func TutorialHandler(c *gin.Context) {
category := c.Param("category")
filename := c.Param("filename")
urlPath := c.Request.URL.Path
subTitle := categoryMap[category]
incrVisited(urlPath, subTitle)
renderArticle(c, http.StatusOK, fmt.Sprintf("tutorial/%s/%s", category, filename), subTitle, 15)
}
// SearchHandler 搜索
func SearchHandler(c *gin.Context) {
word := c.PostForm("search")
c.Redirect(
http.StatusFound,
"https://www.google.com/search?q=site:jiajunhuang.com "+word,
)
}
// RewardHandler 扫码赞赏
func RewardHandler(c *gin.Context) {
userAgent := c.Request.UserAgent()
if strings.Contains(userAgent, "MicroMessenger") {
c.Redirect(http.StatusFound, os.Getenv("WECHAT_PAY_URL"))
return
}
c.Redirect(http.StatusFound, os.Getenv("ALIPAY_URL"))
}
// ArticlesAPIHandler 首页文章API
func ArticlesAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
perPage := 50
start := (queryObj.Page - 1) * perPage
if start < 0 {
start = 0
}
if start > len(articles) {
start = len(articles)
}
end := start + perPage
if end > len(articles) {
end = len(articles)
}
c.JSON(http.StatusOK, gin.H{"msg": "", "result": articles[start:end]})
}
// TopArticlesAPIHandler 热门文章API
func TopArticlesAPIHandler(c *gin.Context) {
topArticles := getTopVisited(20)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": topArticles})
}
// SharingAPIHandler 获取分享
func SharingAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
limit := 50
offset := (queryObj.Page - 1) * limit
sharings := dao.GetSharing(limit, offset)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": sharings})
}
// NotesAPIHandler 获取随想
func NotesAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
limit := 50
offset := (queryObj.Page - 1) * limit
notes := dao.GetNotes(limit, offset)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": notes})
}
func main() {
if err := InitSentry(); err != nil {
log.Panicf("failed to init sentry: %s", err)
}
defer sentry.Flush(2 * time.Second)
// telegram bot
go startNoteBot()
go startSharingBot()
InitializeDB()
InitializeRedis()
r := gin.New()
r.Use(sentrygin.New(sentrygin.Options{}))
r.Use(gin.Logger())
r.Use(func(c *gin.Context) {
totalRequests.Inc()
})
r.LoadHTMLGlob("templates/*.html")
r.Static("/static", "./static")
//r.Static("/tutorial/:lang/img/", "./tutorial/:lang/img") # 然而不支持
//r.Static("/articles/img", "./articles/img") # 然而有冲突
r.StaticFile("/favicon.ico", "./static/favicon.ico")
r.StaticFile("/robots.txt", "./static/robots.txt")
r.StaticFile("/ads.txt", "./static/ads.txt")
r.GET("/", IndexHandler)
r.GET("/ping", PingPongHandler)
r.GET("/404", NotFoundHandler)
r.GET("/archive", ArchiveHandler)
r.GET("/articles/:filepath", ArticleHandler)
r.GET("/aboutme", AboutMeHandler)
r.GET("/tutorial", TutorialPageHandler)
r.GET("/friends", FriendsHandler)
r.GET("/app", AppHandler)
r.GET("/sharing", SharingHandler)
r.GET("/sharing/all", AllSharingHandler)
r.GET("/sharing/rss", SharingRSSHandler)
r.GET("/notes", NotesHandler)
r.GET("/api/v1/articles", ArticlesAPIHandler)
r.GET("/api/v1/topn", TopArticlesAPIHandler)
r.GET("/api/v1/sharing", SharingAPIHandler)
r.GET("/api/v1/notes", NotesAPIHandler)
r.GET("/rss", RSSHandler)
r.GET("/sitemap.xml", SiteMapHandler)
r.GET("/tutorial/:category/:filename", TutorialHandler)
r.GET("/reward", RewardHandler)
r.POST("/search", SearchHandler)
r.GET("/metrics", gin.WrapH(promhttp.Handler()))
r.NoRoute(func(c *gin.Context) { c.Redirect(http.StatusFound, "/404") })
r.Run("0.0.0.0:8080")
}
| sql", os.Get | identifier_name |
main.go | package main
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"html/template"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"regexp"
"sort"
"strings"
"time"
"github.com/getsentry/sentry-go"
sentrygin "github.com/getsentry/sentry-go/gin"
"github.com/gin-gonic/gin"
redis "github.com/go-redis/redis/v7"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/russross/blackfriday"
)
const (
zsetKey = "blogtopn"
)
var (
filenameRegex = regexp.MustCompile(`(\d{4}_\d{2}_\d{2})-.+\..+`)
articles = LoadMDs("articles")
db *sqlx.DB
redisClient *redis.Client
categoryMap = map[string]string{
"golang": "Golang简明教程",
"python": "Python教程",
"data_structure": "数据结构在实际项目中的使用",
}
// ErrNotFound means article not found
ErrNotFound = errors.New("Article Not Found")
// ErrFailedToLoad failed to load article
ErrFailedToLoad = errors.New("Failed To Load Article")
// Prometheus
totalRequests = promauto.NewCounter(prometheus.CounterOpts{Name: "total_requests_total"})
)
// InitSentry 初始化sentry
func InitSentry() error {
return sentry.Init(sentry.ClientOptions{
Dsn: os.Getenv("SENTRY_DSN"),
// Specify a fixed sample rate:
TracesSampleRate: 1.0,
})
}
// InitializeDB 初始化数据库连接
func InitializeDB() {
var err error
db, err = sqlx.Connect("mysql", os.Getenv("SQLX_URL"))
if err != nil {
log.Fatalf("failed to connect to the db: %s", err)
}
}
// InitializeRedis 初始化Redis
func InitializeRedis() {
opt, err := redis.ParseURL(os.Getenv("REDIS_URL"))
if err != nil {
log.Fatalf("failed to connect to redis db: %s", err)
}
// Create client as usually.
redisClient = redis.NewClient(opt)
}
// Article 就是文章
type Article struct {
Title string `json:"title"`
Date string `json:"date_str"`
Filename string `json:"file_name"`
DirName string `json:"dir_name"`
PubDate time.Time `json:"-"`
Description string `json:"description"`
}
// Articles 文章列表
type Articles []Article
func (a Articles) Len() int { return len(a) }
func (a Articles) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a Articles) Less(i, j int) bool {
v := strings.Compare(a[i].Date, a[j].Date)
if v <= 0 {
return true
}
return false
}
// RandomN return n articles by random
func (a Articles) RandomN(n int) Articles {
if n <= 0 {
return nil
}
length := len(a)
pos := rand.Intn(length - n)
return a[pos : pos+n]
}
func isBlogApp(c *gin.Context) bool {
ua := c.GetHeader("User-Agent")
if strings.HasPrefix(ua, "BlogApp/") {
return true
}
return false
}
func getFilePath(path string) string {
suffix := ".html"
if strings.HasSuffix(path, suffix) {
path = path[:len(path)-len(suffix)]
}
return "./" + path
}
// ReadDesc 把简介读出来
func ReadDesc(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
reader := bufio.NewReader(file)
reader.ReadLine() // 忽略第一行(标题)
reader.ReadLine() // 忽略第二行(空行)
desc := ""
for i := 0; i < 3; i++ {
line, _, err := reader.ReadLine()
if err != nil && err != io.EOF {
log.Printf("failed to read desc of file(%s): %s", path, err)
continue
}
desc += string(line)
}
trimChars := "\n,。:,.:"
return strings.TrimRight(strings.TrimLeft(desc, trimChars), trimChars) + "..."
}
// ReadTitle 把标题读出来
func ReadTitle(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
line, _, err := bufio.NewReader(file).ReadLine()
if err != nil {
log.Printf("failed to read title of file(%s): %s", path, err)
return ""
}
title := strings.Replace(string(line), "# ", "", -1)
return title
}
// VisitedArticle is for remember which article had been visited
type VisitedArticle struct {
URLPath string `json:"url_path"`
Title string `json:"title"`
}
func genVisited(urlPath, subTitle string) (string, error) {
title := ReadTitle(urlPath)
if title == "" {
return "", ErrNotFound
}
if subTitle != "" {
title += " - " + subTitle
}
visited := VisitedArticle{URLPath: urlPath, Title: title}
b, err := json.Marshal(visited)
if err != nil {
return "", ErrFailedToLoad
}
return string(b), nil
}
func getTopVisited(n int) []VisitedArticle {
visitedArticles := []VisitedArticle{}
articles, err := redisClient.ZRevRangeByScore(zsetKey, &redis.ZRangeBy{
Min: "-inf", Max: "+inf", Offset: 0, Count: int64(n),
}).Result()
if err != nil {
log.Printf("failed to get top %d visited articles: %s", n, err)
return nil
}
for _, article := range articles {
var va VisitedArticle
if err := json.Unmarshal([]byte(article), &va); err != nil {
log.Printf("failed to unmarshal article: %s", err)
continue
}
visitedArticles = append(visitedArticles, va)
}
return visitedArticles
}
// LoadArticle 把文章的元信息读出来
func LoadArticle(dirname, filename string) *Article {
match := filenameRegex.FindStringSubmatch(filename)
if len(match) != 2 {
return nil
}
dateString := strings.Replace(match[1], "_", "-", -1)
filepath := fmt.Sprintf("./%s/%s", dirname, filename)
title := ReadTitle(filepath)
pubDate, err := time.Parse("2006-01-02", dateString)
if err != nil {
log.Panicf("failed to parse date: %s", err)
}
desc := ReadDesc(filepath)
return &Article{
Title: title, | DirName: dirname,
PubDate: pubDate,
Description: desc,
}
}
// LoadMDs 读取给定目录中的所有markdown文章
func LoadMDs(dirname string) Articles {
files, err := ioutil.ReadDir(dirname)
if err != nil {
log.Fatalf("failed to read dir(%s): %s", dirname, err)
return nil
}
var articles Articles
for _, file := range files {
filename := file.Name()
if article := LoadArticle(dirname, filename); article != nil {
articles = append(articles, *article)
}
}
sort.Sort(sort.Reverse(articles))
return articles
}
// IndexHandler 首页
func IndexHandler(c *gin.Context) {
topArticles := getTopVisited(15)
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles[:100],
"totalCount": len(articles),
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
"topArticles": topArticles,
},
)
}
// ArchiveHandler 全部文章
func ArchiveHandler(c *gin.Context) {
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles,
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
},
)
}
func renderArticle(c *gin.Context, status int, path string, subtitle string, randomN int) {
path = getFilePath(path)
content, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("failed to read file %s: %s", path, err)
c.Redirect(http.StatusFound, "/404")
return
}
content = blackfriday.MarkdownCommon(content)
recommends := articles.RandomN(randomN)
topArticles := getTopVisited(15)
c.HTML(
status, "article.html", gin.H{
"isBlogApp": isBlogApp(c),
"content": template.HTML(content),
"title": ReadTitle(path),
"subtitle": subtitle,
"recommends": recommends,
"topArticles": topArticles,
},
)
}
func incrVisited(urlPath, subTitle string) {
if visited, err := genVisited(urlPath, subTitle); err != nil {
log.Printf("failed to gen visited: %s", err)
} else {
if _, err := redisClient.ZIncrBy(zsetKey, 1, visited).Result(); err != nil {
log.Printf("failed to incr score of %s: %s", urlPath, err)
}
}
}
// PingPongHandler ping pong
func PingPongHandler(c *gin.Context) {
c.JSON(http.StatusOK, nil)
}
// ArticleHandler 具体文章
func ArticleHandler(c *gin.Context) {
urlPath := c.Request.URL.Path
incrVisited(urlPath, "")
renderArticle(c, http.StatusOK, urlPath, "", 15)
}
// TutorialPageHandler 教程index
func TutorialPageHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/tutorial.md", "", 0)
}
// AboutMeHandler 关于我
func AboutMeHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/aboutme.md", "", 0)
}
// FriendsHandler 友链
func FriendsHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/friends.md", "", 0)
}
// AppHandler App页面
func AppHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/app.md", "", 0)
}
// NotFoundHandler 404
func NotFoundHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/404.md", "", 20)
}
// AllSharingHandler 所有分享
func AllSharingHandler(c *gin.Context) {
sharing := dao.GetAllSharing()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
},
)
}
// SharingHandler 分享
func SharingHandler(c *gin.Context) {
sharing := dao.GetSharingWithLimit(20)
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
"partly": true,
},
)
}
// NotesHandler 随想
func NotesHandler(c *gin.Context) {
notes := dao.GetAllNotes()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"notes": notes,
},
)
}
// RSSHandler RSS
func RSSHandler(c *gin.Context) {
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "rss.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"articles": articles,
},
)
}
// SharingRSSHandler RSS for sharing channel
func SharingRSSHandler(c *gin.Context) {
sharings := dao.GetAllSharing()
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "sharing_rss.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"sharings": sharings,
},
)
}
// SiteMapHandler sitemap
func SiteMapHandler(c *gin.Context) {
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "sitemap.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"articles": articles,
},
)
}
// TutorialHandler 教程
func TutorialHandler(c *gin.Context) {
category := c.Param("category")
filename := c.Param("filename")
urlPath := c.Request.URL.Path
subTitle := categoryMap[category]
incrVisited(urlPath, subTitle)
renderArticle(c, http.StatusOK, fmt.Sprintf("tutorial/%s/%s", category, filename), subTitle, 15)
}
// SearchHandler 搜索
func SearchHandler(c *gin.Context) {
word := c.PostForm("search")
c.Redirect(
http.StatusFound,
"https://www.google.com/search?q=site:jiajunhuang.com "+word,
)
}
// RewardHandler 扫码赞赏
func RewardHandler(c *gin.Context) {
userAgent := c.Request.UserAgent()
if strings.Contains(userAgent, "MicroMessenger") {
c.Redirect(http.StatusFound, os.Getenv("WECHAT_PAY_URL"))
return
}
c.Redirect(http.StatusFound, os.Getenv("ALIPAY_URL"))
}
// ArticlesAPIHandler 首页文章API
func ArticlesAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
perPage := 50
start := (queryObj.Page - 1) * perPage
if start < 0 {
start = 0
}
if start > len(articles) {
start = len(articles)
}
end := start + perPage
if end > len(articles) {
end = len(articles)
}
c.JSON(http.StatusOK, gin.H{"msg": "", "result": articles[start:end]})
}
// TopArticlesAPIHandler 热门文章API
func TopArticlesAPIHandler(c *gin.Context) {
topArticles := getTopVisited(20)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": topArticles})
}
// SharingAPIHandler 获取分享
func SharingAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
limit := 50
offset := (queryObj.Page - 1) * limit
sharings := dao.GetSharing(limit, offset)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": sharings})
}
// NotesAPIHandler 获取随想
func NotesAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
limit := 50
offset := (queryObj.Page - 1) * limit
notes := dao.GetNotes(limit, offset)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": notes})
}
func main() {
if err := InitSentry(); err != nil {
log.Panicf("failed to init sentry: %s", err)
}
defer sentry.Flush(2 * time.Second)
// telegram bot
go startNoteBot()
go startSharingBot()
InitializeDB()
InitializeRedis()
r := gin.New()
r.Use(sentrygin.New(sentrygin.Options{}))
r.Use(gin.Logger())
r.Use(func(c *gin.Context) {
totalRequests.Inc()
})
r.LoadHTMLGlob("templates/*.html")
r.Static("/static", "./static")
//r.Static("/tutorial/:lang/img/", "./tutorial/:lang/img") # 然而不支持
//r.Static("/articles/img", "./articles/img") # 然而有冲突
r.StaticFile("/favicon.ico", "./static/favicon.ico")
r.StaticFile("/robots.txt", "./static/robots.txt")
r.StaticFile("/ads.txt", "./static/ads.txt")
r.GET("/", IndexHandler)
r.GET("/ping", PingPongHandler)
r.GET("/404", NotFoundHandler)
r.GET("/archive", ArchiveHandler)
r.GET("/articles/:filepath", ArticleHandler)
r.GET("/aboutme", AboutMeHandler)
r.GET("/tutorial", TutorialPageHandler)
r.GET("/friends", FriendsHandler)
r.GET("/app", AppHandler)
r.GET("/sharing", SharingHandler)
r.GET("/sharing/all", AllSharingHandler)
r.GET("/sharing/rss", SharingRSSHandler)
r.GET("/notes", NotesHandler)
r.GET("/api/v1/articles", ArticlesAPIHandler)
r.GET("/api/v1/topn", TopArticlesAPIHandler)
r.GET("/api/v1/sharing", SharingAPIHandler)
r.GET("/api/v1/notes", NotesAPIHandler)
r.GET("/rss", RSSHandler)
r.GET("/sitemap.xml", SiteMapHandler)
r.GET("/tutorial/:category/:filename", TutorialHandler)
r.GET("/reward", RewardHandler)
r.POST("/search", SearchHandler)
r.GET("/metrics", gin.WrapH(promhttp.Handler()))
r.NoRoute(func(c *gin.Context) { c.Redirect(http.StatusFound, "/404") })
r.Run("0.0.0.0:8080")
} | Date: dateString,
Filename: filename, | random_line_split |
main.go | package main
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"html/template"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"regexp"
"sort"
"strings"
"time"
"github.com/getsentry/sentry-go"
sentrygin "github.com/getsentry/sentry-go/gin"
"github.com/gin-gonic/gin"
redis "github.com/go-redis/redis/v7"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/russross/blackfriday"
)
const (
zsetKey = "blogtopn"
)
var (
filenameRegex = regexp.MustCompile(`(\d{4}_\d{2}_\d{2})-.+\..+`)
articles = LoadMDs("articles")
db *sqlx.DB
redisClient *redis.Client
categoryMap = map[string]string{
"golang": "Golang简明教程",
"python": "Python教程",
"data_structure": "数据结构在实际项目中的使用",
}
// ErrNotFound means article not found
ErrNotFound = errors.New("Article Not Found")
// ErrFailedToLoad failed to load article
ErrFailedToLoad = errors.New("Failed To Load Article")
// Prometheus
totalRequests = promauto.NewCounter(prometheus.CounterOpts{Name: "total_requests_total"})
)
// InitSentry 初始化sentry
func InitSentry() error {
return sentry.Init(sentry.ClientOptions{
Dsn: os.Getenv("SENTRY_DSN"),
// Specify a fixed sample rate:
TracesSampleRate: 1.0,
})
}
// InitializeDB 初始化数据库连接
func InitializeDB() {
var err error
db, err = sqlx.Connect("mysql", os.Getenv("SQLX_URL"))
if err != nil {
log.Fatalf("failed to connect to the db: %s", err)
}
}
// InitializeRedis 初始化Redis
func InitializeRedis() {
opt, err := redis.ParseURL(os.Getenv("REDIS_URL"))
if err != nil {
log.Fatalf("failed to connect to redis db: %s", err)
}
// Create client as usually.
redisClient = redis.NewClient(opt)
}
// Article 就是文章
type Article struct {
Title string `json:"title"`
Date string `json:"date_str"`
Filename string `json:"file_name"`
DirName string `json:"dir_name"`
PubDate time.Time `json:"-"`
Description string `json:"description"`
}
// Articles 文章列表
type Articles []Article
func (a Articles) Len() int { return len(a) }
func (a Articles) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a Articles) Less(i, j int) bool {
v := strings.Compare(a[i].Date, a[j].Date)
if v <= 0 {
return true
}
return false
}
// RandomN return n articles by random
func (a Articles) RandomN(n int) Articles {
if n <= 0 {
return nil
}
length := len(a)
pos := rand.Intn(length - n)
return a[pos : pos+n]
}
func isBlogApp(c *gin.Context) bool {
ua := c.GetHeader("User-Agent")
if strings.HasPrefix(ua, "BlogApp/") {
return true
}
return false
}
func getFilePath(path string) string {
suffix := ".html"
if strings.HasSuffix(path, suffix) {
path = path[:len(path)-len(suffix)]
}
return "./" + path
}
// ReadDesc 把简介读出来
func ReadDesc(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
reader := bufio.NewReader(file)
reader.ReadLine() // 忽略第一行(标题)
reader.ReadLine() // 忽略第二行(空行)
desc := ""
for i := 0; i < 3; i++ {
line, _, err := reader.ReadLine()
if err != nil && err != io.EOF {
log.Printf("failed to read desc of file(%s): %s", path, err)
continue
}
desc += string(line)
}
trimChars := "\n,。:,.:"
return strings.TrimRight(strings.TrimLeft(desc, trimChars), trimChars) + "..."
}
// ReadTitle 把标题读出来
func ReadTitle(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
line, _, err := bufio.NewReader(file).ReadLine()
if err != nil {
log.Printf("failed to read title of file(%s): %s", path, err)
return ""
}
title := strings.Replace(string(line), "# ", "", -1)
return title
}
// VisitedArticle is for remember which article had been visited
type VisitedArticle struct {
URLPath string `json:"url_path"`
Title string `json:"title"`
}
func genVisited(urlPath, subTitle string) (string, error) {
title := ReadTitle(urlPath)
if title == "" {
return "", ErrNotFound
}
if subTitle != "" {
title += " - " + subTitle
}
visited := VisitedArticle{URLPath: urlPath, Title: titl | sited)
if err != nil {
return "", ErrFailedToLoad
}
return string(b), nil
}
func getTopVisited(n int) []VisitedArticle {
visitedArticles := []VisitedArticle{}
articles, err := redisClient.ZRevRangeByScore(zsetKey, &redis.ZRangeBy{
Min: "-inf", Max: "+inf", Offset: 0, Count: int64(n),
}).Result()
if err != nil {
log.Printf("failed to get top %d visited articles: %s", n, err)
return nil
}
for _, article := range articles {
var va VisitedArticle
if err := json.Unmarshal([]byte(article), &va); err != nil {
log.Printf("failed to unmarshal article: %s", err)
continue
}
visitedArticles = append(visitedArticles, va)
}
return visitedArticles
}
// LoadArticle 把文章的元信息读出来
func LoadArticle(dirname, filename string) *Article {
match := filenameRegex.FindStringSubmatch(filename)
if len(match) != 2 {
return nil
}
dateString := strings.Replace(match[1], "_", "-", -1)
filepath := fmt.Sprintf("./%s/%s", dirname, filename)
title := ReadTitle(filepath)
pubDate, err := time.Parse("2006-01-02", dateString)
if err != nil {
log.Panicf("failed to parse date: %s", err)
}
desc := ReadDesc(filepath)
return &Article{
Title: title,
Date: dateString,
Filename: filename,
DirName: dirname,
PubDate: pubDate,
Description: desc,
}
}
// LoadMDs 读取给定目录中的所有markdown文章
func LoadMDs(dirname string) Articles {
files, err := ioutil.ReadDir(dirname)
if err != nil {
log.Fatalf("failed to read dir(%s): %s", dirname, err)
return nil
}
var articles Articles
for _, file := range files {
filename := file.Name()
if article := LoadArticle(dirname, filename); article != nil {
articles = append(articles, *article)
}
}
sort.Sort(sort.Reverse(articles))
return articles
}
// IndexHandler 首页
func IndexHandler(c *gin.Context) {
topArticles := getTopVisited(15)
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles[:100],
"totalCount": len(articles),
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
"topArticles": topArticles,
},
)
}
// ArchiveHandler 全部文章
func ArchiveHandler(c *gin.Context) {
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles,
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
},
)
}
func renderArticle(c *gin.Context, status int, path string, subtitle string, randomN int) {
path = getFilePath(path)
content, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("failed to read file %s: %s", path, err)
c.Redirect(http.StatusFound, "/404")
return
}
content = blackfriday.MarkdownCommon(content)
recommends := articles.RandomN(randomN)
topArticles := getTopVisited(15)
c.HTML(
status, "article.html", gin.H{
"isBlogApp": isBlogApp(c),
"content": template.HTML(content),
"title": ReadTitle(path),
"subtitle": subtitle,
"recommends": recommends,
"topArticles": topArticles,
},
)
}
func incrVisited(urlPath, subTitle string) {
if visited, err := genVisited(urlPath, subTitle); err != nil {
log.Printf("failed to gen visited: %s", err)
} else {
if _, err := redisClient.ZIncrBy(zsetKey, 1, visited).Result(); err != nil {
log.Printf("failed to incr score of %s: %s", urlPath, err)
}
}
}
// PingPongHandler ping pong
func PingPongHandler(c *gin.Context) {
c.JSON(http.StatusOK, nil)
}
// ArticleHandler 具体文章
func ArticleHandler(c *gin.Context) {
urlPath := c.Request.URL.Path
incrVisited(urlPath, "")
renderArticle(c, http.StatusOK, urlPath, "", 15)
}
// TutorialPageHandler 教程index
func TutorialPageHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/tutorial.md", "", 0)
}
// AboutMeHandler 关于我
func AboutMeHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/aboutme.md", "", 0)
}
// FriendsHandler 友链
func FriendsHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/friends.md", "", 0)
}
// AppHandler App页面
func AppHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/app.md", "", 0)
}
// NotFoundHandler 404
func NotFoundHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/404.md", "", 20)
}
// AllSharingHandler 所有分享
func AllSharingHandler(c *gin.Context) {
sharing := dao.GetAllSharing()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
},
)
}
// SharingHandler 分享
func SharingHandler(c *gin.Context) {
sharing := dao.GetSharingWithLimit(20)
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
"partly": true,
},
)
}
// NotesHandler 随想
func NotesHandler(c *gin.Context) {
notes := dao.GetAllNotes()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"notes": notes,
},
)
}
// RSSHandler RSS
func RSSHandler(c *gin.Context) {
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "rss.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"articles": articles,
},
)
}
// SharingRSSHandler RSS for sharing channel
func SharingRSSHandler(c *gin.Context) {
sharings := dao.GetAllSharing()
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "sharing_rss.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"sharings": sharings,
},
)
}
// SiteMapHandler sitemap
func SiteMapHandler(c *gin.Context) {
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "sitemap.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"articles": articles,
},
)
}
// TutorialHandler 教程
func TutorialHandler(c *gin.Context) {
category := c.Param("category")
filename := c.Param("filename")
urlPath := c.Request.URL.Path
subTitle := categoryMap[category]
incrVisited(urlPath, subTitle)
renderArticle(c, http.StatusOK, fmt.Sprintf("tutorial/%s/%s", category, filename), subTitle, 15)
}
// SearchHandler 搜索
func SearchHandler(c *gin.Context) {
word := c.PostForm("search")
c.Redirect(
http.StatusFound,
"https://www.google.com/search?q=site:jiajunhuang.com "+word,
)
}
// RewardHandler 扫码赞赏
func RewardHandler(c *gin.Context) {
userAgent := c.Request.UserAgent()
if strings.Contains(userAgent, "MicroMessenger") {
c.Redirect(http.StatusFound, os.Getenv("WECHAT_PAY_URL"))
return
}
c.Redirect(http.StatusFound, os.Getenv("ALIPAY_URL"))
}
// ArticlesAPIHandler 首页文章API
func ArticlesAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
perPage := 50
start := (queryObj.Page - 1) * perPage
if start < 0 {
start = 0
}
if start > len(articles) {
start = len(articles)
}
end := start + perPage
if end > len(articles) {
end = len(articles)
}
c.JSON(http.StatusOK, gin.H{"msg": "", "result": articles[start:end]})
}
// TopArticlesAPIHandler 热门文章API
func TopArticlesAPIHandler(c *gin.Context) {
topArticles := getTopVisited(20)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": topArticles})
}
// SharingAPIHandler 获取分享
func SharingAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
limit := 50
offset := (queryObj.Page - 1) * limit
sharings := dao.GetSharing(limit, offset)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": sharings})
}
// NotesAPIHandler 获取随想
func NotesAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
limit := 50
offset := (queryObj.Page - 1) * limit
notes := dao.GetNotes(limit, offset)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": notes})
}
func main() {
if err := InitSentry(); err != nil {
log.Panicf("failed to init sentry: %s", err)
}
defer sentry.Flush(2 * time.Second)
// telegram bot
go startNoteBot()
go startSharingBot()
InitializeDB()
InitializeRedis()
r := gin.New()
r.Use(sentrygin.New(sentrygin.Options{}))
r.Use(gin.Logger())
r.Use(func(c *gin.Context) {
totalRequests.Inc()
})
r.LoadHTMLGlob("templates/*.html")
r.Static("/static", "./static")
//r.Static("/tutorial/:lang/img/", "./tutorial/:lang/img") # 然而不支持
//r.Static("/articles/img", "./articles/img") # 然而有冲突
r.StaticFile("/favicon.ico", "./static/favicon.ico")
r.StaticFile("/robots.txt", "./static/robots.txt")
r.StaticFile("/ads.txt", "./static/ads.txt")
r.GET("/", IndexHandler)
r.GET("/ping", PingPongHandler)
r.GET("/404", NotFoundHandler)
r.GET("/archive", ArchiveHandler)
r.GET("/articles/:filepath", ArticleHandler)
r.GET("/aboutme", AboutMeHandler)
r.GET("/tutorial", TutorialPageHandler)
r.GET("/friends", FriendsHandler)
r.GET("/app", AppHandler)
r.GET("/sharing", SharingHandler)
r.GET("/sharing/all", AllSharingHandler)
r.GET("/sharing/rss", SharingRSSHandler)
r.GET("/notes", NotesHandler)
r.GET("/api/v1/articles", ArticlesAPIHandler)
r.GET("/api/v1/topn", TopArticlesAPIHandler)
r.GET("/api/v1/sharing", SharingAPIHandler)
r.GET("/api/v1/notes", NotesAPIHandler)
r.GET("/rss", RSSHandler)
r.GET("/sitemap.xml", SiteMapHandler)
r.GET("/tutorial/:category/:filename", TutorialHandler)
r.GET("/reward", RewardHandler)
r.POST("/search", SearchHandler)
r.GET("/metrics", gin.WrapH(promhttp.Handler()))
r.NoRoute(func(c *gin.Context) { c.Redirect(http.StatusFound, "/404") })
r.Run("0.0.0.0:8080")
}
| e}
b, err := json.Marshal(vi | conditional_block |
main.go | package main
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"html/template"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"regexp"
"sort"
"strings"
"time"
"github.com/getsentry/sentry-go"
sentrygin "github.com/getsentry/sentry-go/gin"
"github.com/gin-gonic/gin"
redis "github.com/go-redis/redis/v7"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/russross/blackfriday"
)
const (
zsetKey = "blogtopn"
)
var (
filenameRegex = regexp.MustCompile(`(\d{4}_\d{2}_\d{2})-.+\..+`)
articles = LoadMDs("articles")
db *sqlx.DB
redisClient *redis.Client
categoryMap = map[string]string{
"golang": "Golang简明教程",
"python": "Python教程",
"data_structure": "数据结构在实际项目中的使用",
}
// ErrNotFound means article not found
ErrNotFound = errors.New("Article Not Found")
// ErrFailedToLoad failed to load article
ErrFailedToLoad = errors.New("Failed To Load Article")
// Prometheus
totalRequests = promauto.NewCounter(prometheus.CounterOpts{Name: "total_requests_total"})
)
// InitSentry 初始化sentry
func InitSentry() error {
return sentry.Init(sentry.ClientOptions{
Dsn: os.Getenv("SENTRY_DSN"),
// Specify a fixed sample rate:
TracesSampleRate: 1.0,
})
}
// InitializeDB 初始化数据库连接
func InitializeDB() {
var err error
db, err = sqlx.Connect("mysql", os.Getenv("SQLX_URL"))
if err != nil {
log.Fatalf("failed to connect to the db: %s", err)
}
}
// InitializeRedis 初始化Redis
func InitializeRedis() {
opt, err := redis.ParseURL(os.Getenv("REDIS_URL"))
if err != nil {
log.Fatalf("failed to connect to redis db: %s", err)
}
// Create client as usually.
redisClient = redis.NewClient(opt)
}
// Article 就是文章
type Article struct {
Title string `json:"title"`
Date string `json:"date_str"`
Filename string `json:"file_name"`
DirName string `json:"dir_name"`
PubDate time.Time `json:"-"`
Description string `json:"description"`
}
// Articles 文章列表
type Articles []Article
func (a Articles) Len() int { return len(a) }
func (a Articles) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a Articles) Less(i, j int) bool {
v := strings.Compare(a[i].Date, a[j].Date)
if v <= 0 {
return true
}
return false
}
// RandomN return n articles by random
func (a Articles) RandomN(n int) Articles {
if n <= 0 {
return nil
}
length := len(a)
pos := rand.Intn(length - n)
return a[pos : pos+n]
}
func isBlogApp(c *gin.Context) bool {
ua := c.GetHeader("User-Agent")
if strings.HasPrefix(ua, "BlogApp/") {
return true
}
return false
}
func getFilePath(path string) string {
suffix := ".html"
if strings.HasSuffix(path, suffix) {
path = path[:len(path)-len(suffix)]
}
return "./" + path
}
// ReadDesc 把简介读出来
func ReadDesc(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed | {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
line, _, err := bufio.NewReader(file).ReadLine()
if err != nil {
log.Printf("failed to read title of file(%s): %s", path, err)
return ""
}
title := strings.Replace(string(line), "# ", "", -1)
return title
}
// VisitedArticle is for remember which article had been visited
type VisitedArticle struct {
URLPath string `json:"url_path"`
Title string `json:"title"`
}
func genVisited(urlPath, subTitle string) (string, error) {
title := ReadTitle(urlPath)
if title == "" {
return "", ErrNotFound
}
if subTitle != "" {
title += " - " + subTitle
}
visited := VisitedArticle{URLPath: urlPath, Title: title}
b, err := json.Marshal(visited)
if err != nil {
return "", ErrFailedToLoad
}
return string(b), nil
}
func getTopVisited(n int) []VisitedArticle {
visitedArticles := []VisitedArticle{}
articles, err := redisClient.ZRevRangeByScore(zsetKey, &redis.ZRangeBy{
Min: "-inf", Max: "+inf", Offset: 0, Count: int64(n),
}).Result()
if err != nil {
log.Printf("failed to get top %d visited articles: %s", n, err)
return nil
}
for _, article := range articles {
var va VisitedArticle
if err := json.Unmarshal([]byte(article), &va); err != nil {
log.Printf("failed to unmarshal article: %s", err)
continue
}
visitedArticles = append(visitedArticles, va)
}
return visitedArticles
}
// LoadArticle 把文章的元信息读出来
func LoadArticle(dirname, filename string) *Article {
match := filenameRegex.FindStringSubmatch(filename)
if len(match) != 2 {
return nil
}
dateString := strings.Replace(match[1], "_", "-", -1)
filepath := fmt.Sprintf("./%s/%s", dirname, filename)
title := ReadTitle(filepath)
pubDate, err := time.Parse("2006-01-02", dateString)
if err != nil {
log.Panicf("failed to parse date: %s", err)
}
desc := ReadDesc(filepath)
return &Article{
Title: title,
Date: dateString,
Filename: filename,
DirName: dirname,
PubDate: pubDate,
Description: desc,
}
}
// LoadMDs 读取给定目录中的所有markdown文章
func LoadMDs(dirname string) Articles {
files, err := ioutil.ReadDir(dirname)
if err != nil {
log.Fatalf("failed to read dir(%s): %s", dirname, err)
return nil
}
var articles Articles
for _, file := range files {
filename := file.Name()
if article := LoadArticle(dirname, filename); article != nil {
articles = append(articles, *article)
}
}
sort.Sort(sort.Reverse(articles))
return articles
}
// IndexHandler 首页
func IndexHandler(c *gin.Context) {
topArticles := getTopVisited(15)
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles[:100],
"totalCount": len(articles),
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
"topArticles": topArticles,
},
)
}
// ArchiveHandler 全部文章
func ArchiveHandler(c *gin.Context) {
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles,
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
},
)
}
func renderArticle(c *gin.Context, status int, path string, subtitle string, randomN int) {
path = getFilePath(path)
content, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("failed to read file %s: %s", path, err)
c.Redirect(http.StatusFound, "/404")
return
}
content = blackfriday.MarkdownCommon(content)
recommends := articles.RandomN(randomN)
topArticles := getTopVisited(15)
c.HTML(
status, "article.html", gin.H{
"isBlogApp": isBlogApp(c),
"content": template.HTML(content),
"title": ReadTitle(path),
"subtitle": subtitle,
"recommends": recommends,
"topArticles": topArticles,
},
)
}
func incrVisited(urlPath, subTitle string) {
if visited, err := genVisited(urlPath, subTitle); err != nil {
log.Printf("failed to gen visited: %s", err)
} else {
if _, err := redisClient.ZIncrBy(zsetKey, 1, visited).Result(); err != nil {
log.Printf("failed to incr score of %s: %s", urlPath, err)
}
}
}
// PingPongHandler ping pong
func PingPongHandler(c *gin.Context) {
c.JSON(http.StatusOK, nil)
}
// ArticleHandler 具体文章
func ArticleHandler(c *gin.Context) {
urlPath := c.Request.URL.Path
incrVisited(urlPath, "")
renderArticle(c, http.StatusOK, urlPath, "", 15)
}
// TutorialPageHandler 教程index
func TutorialPageHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/tutorial.md", "", 0)
}
// AboutMeHandler 关于我
func AboutMeHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/aboutme.md", "", 0)
}
// FriendsHandler 友链
func FriendsHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/friends.md", "", 0)
}
// AppHandler App页面
func AppHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/app.md", "", 0)
}
// NotFoundHandler 404
func NotFoundHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/404.md", "", 20)
}
// AllSharingHandler 所有分享
func AllSharingHandler(c *gin.Context) {
sharing := dao.GetAllSharing()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
},
)
}
// SharingHandler 分享
func SharingHandler(c *gin.Context) {
sharing := dao.GetSharingWithLimit(20)
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
"partly": true,
},
)
}
// NotesHandler 随想
func NotesHandler(c *gin.Context) {
notes := dao.GetAllNotes()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"notes": notes,
},
)
}
// RSSHandler RSS
func RSSHandler(c *gin.Context) {
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "rss.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"articles": articles,
},
)
}
// SharingRSSHandler RSS for sharing channel
func SharingRSSHandler(c *gin.Context) {
sharings := dao.GetAllSharing()
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "sharing_rss.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"sharings": sharings,
},
)
}
// SiteMapHandler sitemap
func SiteMapHandler(c *gin.Context) {
c.Header("Content-Type", "application/xml")
c.HTML(
http.StatusOK, "sitemap.html", gin.H{
"isBlogApp": isBlogApp(c),
"rssHeader": template.HTML(`<?xml version="1.0" encoding="UTF-8"?>`),
"articles": articles,
},
)
}
// TutorialHandler 教程
func TutorialHandler(c *gin.Context) {
category := c.Param("category")
filename := c.Param("filename")
urlPath := c.Request.URL.Path
subTitle := categoryMap[category]
incrVisited(urlPath, subTitle)
renderArticle(c, http.StatusOK, fmt.Sprintf("tutorial/%s/%s", category, filename), subTitle, 15)
}
// SearchHandler 搜索
func SearchHandler(c *gin.Context) {
word := c.PostForm("search")
c.Redirect(
http.StatusFound,
"https://www.google.com/search?q=site:jiajunhuang.com "+word,
)
}
// RewardHandler 扫码赞赏
func RewardHandler(c *gin.Context) {
userAgent := c.Request.UserAgent()
if strings.Contains(userAgent, "MicroMessenger") {
c.Redirect(http.StatusFound, os.Getenv("WECHAT_PAY_URL"))
return
}
c.Redirect(http.StatusFound, os.Getenv("ALIPAY_URL"))
}
// ArticlesAPIHandler 首页文章API
func ArticlesAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
perPage := 50
start := (queryObj.Page - 1) * perPage
if start < 0 {
start = 0
}
if start > len(articles) {
start = len(articles)
}
end := start + perPage
if end > len(articles) {
end = len(articles)
}
c.JSON(http.StatusOK, gin.H{"msg": "", "result": articles[start:end]})
}
// TopArticlesAPIHandler 热门文章API
func TopArticlesAPIHandler(c *gin.Context) {
topArticles := getTopVisited(20)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": topArticles})
}
// SharingAPIHandler 获取分享
func SharingAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
limit := 50
offset := (queryObj.Page - 1) * limit
sharings := dao.GetSharing(limit, offset)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": sharings})
}
// NotesAPIHandler 获取随想
func NotesAPIHandler(c *gin.Context) {
queryObj := struct {
Page int `form:"page,default=1"`
}{}
if err := c.BindQuery(&queryObj); err != nil {
log.Printf("failed to bind page: %s", err)
}
limit := 50
offset := (queryObj.Page - 1) * limit
notes := dao.GetNotes(limit, offset)
c.JSON(http.StatusOK, gin.H{"msg": "", "result": notes})
}
func main() {
if err := InitSentry(); err != nil {
log.Panicf("failed to init sentry: %s", err)
}
defer sentry.Flush(2 * time.Second)
// telegram bot
go startNoteBot()
go startSharingBot()
InitializeDB()
InitializeRedis()
r := gin.New()
r.Use(sentrygin.New(sentrygin.Options{}))
r.Use(gin.Logger())
r.Use(func(c *gin.Context) {
totalRequests.Inc()
})
r.LoadHTMLGlob("templates/*.html")
r.Static("/static", "./static")
//r.Static("/tutorial/:lang/img/", "./tutorial/:lang/img") # 然而不支持
//r.Static("/articles/img", "./articles/img") # 然而有冲突
r.StaticFile("/favicon.ico", "./static/favicon.ico")
r.StaticFile("/robots.txt", "./static/robots.txt")
r.StaticFile("/ads.txt", "./static/ads.txt")
r.GET("/", IndexHandler)
r.GET("/ping", PingPongHandler)
r.GET("/404", NotFoundHandler)
r.GET("/archive", ArchiveHandler)
r.GET("/articles/:filepath", ArticleHandler)
r.GET("/aboutme", AboutMeHandler)
r.GET("/tutorial", TutorialPageHandler)
r.GET("/friends", FriendsHandler)
r.GET("/app", AppHandler)
r.GET("/sharing", SharingHandler)
r.GET("/sharing/all", AllSharingHandler)
r.GET("/sharing/rss", SharingRSSHandler)
r.GET("/notes", NotesHandler)
r.GET("/api/v1/articles", ArticlesAPIHandler)
r.GET("/api/v1/topn", TopArticlesAPIHandler)
r.GET("/api/v1/sharing", SharingAPIHandler)
r.GET("/api/v1/notes", NotesAPIHandler)
r.GET("/rss", RSSHandler)
r.GET("/sitemap.xml", SiteMapHandler)
r.GET("/tutorial/:category/:filename", TutorialHandler)
r.GET("/reward", RewardHandler)
r.POST("/search", SearchHandler)
r.GET("/metrics", gin.WrapH(promhttp.Handler()))
r.NoRoute(func(c *gin.Context) { c.Redirect(http.StatusFound, "/404") })
r.Run("0.0.0.0:8080")
}
| to read file(%s): %s", path, err)
return ""
}
reader := bufio.NewReader(file)
reader.ReadLine() // 忽略第一行(标题)
reader.ReadLine() // 忽略第二行(空行)
desc := ""
for i := 0; i < 3; i++ {
line, _, err := reader.ReadLine()
if err != nil && err != io.EOF {
log.Printf("failed to read desc of file(%s): %s", path, err)
continue
}
desc += string(line)
}
trimChars := "\n,。:,.:"
return strings.TrimRight(strings.TrimLeft(desc, trimChars), trimChars) + "..."
}
// ReadTitle 把标题读出来
func ReadTitle(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil | identifier_body |
sequences.py | import logging
from typing import List, Tuple, Union
import albumentations
import keras
import numpy as np
from bfgn.data_management.scalers import BaseGlobalScaler
_logger = logging.getLogger(__name__)
ADDITIONAL_TARGETS_KEY = "image_{}"
class BaseSequence(keras.utils.Sequence):
feature_scaler = None
response_scaler = None
custom_augmentations = None
def __init__(
self,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
custom_augmentations: albumentations.Compose = None,
nan_replacement_value: float = None,
) -> None:
self.feature_scaler = feature_scaler
self.response_scaler = response_scaler
self.batch_size = batch_size
self.custom_augmentations = custom_augmentations
self.nan_replacement_value = nan_replacement_value
def __len__(self) -> int:
raise NotImplementedError("Method is required for Keras functionality. Should return steps_per_epoch.")
def __getitem__(
self, index: int, return_raw_sample: bool = False
) -> Union[
Tuple[List[np.array], List[np.array]],
Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]],
]:
# Method is required for Keras functionality, reuse names to avoid creating many new, potentially large objects
_logger.debug("Get batch {} with {} items via sequence".format(index, self.batch_size))
raw_features, raw_responses, raw_weights = self._get_features_responses_weights(index)
trans_features = [raw_feature.copy() for raw_feature in raw_features]
trans_responses = [raw_response.copy() for raw_response in raw_responses]
trans_weights = [raw_weight.copy() for raw_weight in raw_weights]
if self.custom_augmentations is not None:
trans_features, trans_responses, trans_weights = self._apply_augmentations(
trans_features, trans_responses, trans_weights
)
trans_features = self._scale_features(trans_features)
trans_responses = self._scale_responses(trans_responses)
if self.nan_replacement_value is not None:
trans_features = self._replace_nan_data_values(trans_features, self.nan_replacement_value)
trans_responses = self._replace_nan_data_values(trans_responses, self.nan_replacement_value)
else:
assert np.all(np.isfinite(trans_features)), (
"Some feature values are nan but nan_replacement_value not provided in data config. Please provide "
+ "a nan_replacement_value to transform features correctly."
)
# Append weights to responses for loss function calculations
raw_responses = [np.append(response, weight, axis=-1) for response, weight in zip(raw_responses, raw_weights)]
trans_responses = [np.append(resp, weight, axis=-1) for resp, weight in zip(trans_responses, trans_weights)]
if return_raw_sample is True:
# This is for BGFN reporting and other functionality
return_value = ((raw_features, raw_responses), (trans_features, trans_responses))
else:
# This is for Keras sequence generator behavior
return_value = (trans_features, trans_responses)
return return_value
def get_raw_and_transformed_sample(
self, index: int
) -> Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]]:
return self.__getitem__(index, return_raw_sample=True)
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
raise NotImplementedError(
"Custom Sequences must implement _get_features_responses_weights for training and reporting to work. "
+ "See method header for expected arguments and returned objects."
)
def _replace_nan_data_values(self, data: List[np.array], replacement_value):
for idx_array in range(len(data)):
data[idx_array][np.isnan(data[idx_array])] = replacement_value
return data
def _apply_augmentations(
self, features: List[np.array], responses: List[np.array], weights: List[np.array]
) -> Tuple[List[np.array], List[np.array], List[np.array]]:
assert len(responses) == 1, (
"Custom augmentations have not been tested on multiple responses. Please feel free to handle this "
+ "case, test your code, and submit a pull request."
)
# Loop through samples, augmenting each
num_samples = features[0].shape[0]
for idx_sample in range(num_samples):
# Get sample data
sample_features = [feature[idx_sample] for feature in features]
sample_responses = responses[0][idx_sample] # Assume single response
sample_weights = weights[0][idx_sample]
mask_loss_window = (sample_weights > 0)[..., 0]
# Format for albumentations.Compose
data_to_augment = {"image": sample_features.pop(0), "mask": np.dstack([sample_responses, sample_weights])}
target_keys = ["image"]
for idx, feature in enumerate(sample_features):
key_feature = ADDITIONAL_TARGETS_KEY.format(idx + 1)
data_to_augment[key_feature] = feature
target_keys.append(key_feature)
# Augment data and parse results
augmented = self.custom_augmentations(**data_to_augment)
sample_features = list() # For creating a weights mask
for idx_feature, key_feature in enumerate(target_keys):
features[idx_feature][idx_sample] = augmented[key_feature]
sample_features.append(augmented[key_feature])
responses[0][idx_sample] = augmented["mask"][..., :-1]
mask_features = np.isfinite(np.dstack(sample_features)).all(axis=-1)
mask = np.logical_and(mask_features, mask_loss_window)
weights[0][idx_sample] = np.expand_dims(mask * augmented["mask"][..., -1], axis=-1)
return features, responses, weights
def _scale_features(self, features: List[np.array]) -> List[np.array]:
return [self.feature_scaler.transform(feature) for feature in features]
def _scale_responses(self, responses: List[np.array]) -> List[np.array]:
return [self.response_scaler.transform(response) for response in responses]
class MemmappedSequence(BaseSequence):
def __init__(
self,
features,
responses,
weights,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
feature_mean_centering: False,
nan_replacement_value: None,
custom_augmentations: albumentations.Compose = None,
) -> None:
self.features = features # a list of numpy arrays, each of which is (n,y,x,f)
self.responses = responses # a list of numpy arrays, each of which is (n,y,x,r)
self.weights = weights # a list of numpy arrays, each of which is (n,y,x,1)
super().__init__(
feature_scaler=feature_scaler,
response_scaler=response_scaler,
batch_size=batch_size,
custom_augmentations=custom_augmentations,
nan_replacement_value=nan_replacement_value,
)
# Determine the cumulative number of total samples across arrays - we're going to use
# it to roll between files when extracting samples
self.cum_samples_per_array = np.zeros(len(features) + 1).astype(int)
for _array in range(1, len(features) + 1):
self.cum_samples_per_array[_array] = features[_array - 1].shape[0] + self.cum_samples_per_array[_array - 1]
self.feature_mean_centering = feature_mean_centering
def __len__(self):
# Method is required for Keras functionality, a.k.a. steps_per_epoch in fit_generator
return int(np.ceil(self.cum_samples_per_array[-1] / self.batch_size))
def _mean_center(self, data: np.array) -> np.array:
return data - np.mean(data, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
# start by finding which array we're starting in, based on the input index, batch size,
# and the number of samples per array
|
def sample_custom_augmentations_constructor(num_features: int, window_radius: int) -> albumentations.Compose:
"""
This function returns a custom augmentations object for use with sequences via the load_sequences function in
data_core.py. Please note that these augmentations have only been tested with RGB data between 0 and 1 and that
order of operations is critical. e.g., blurs don't like missing data so shouldn't be applied before dropout, noise
probably shouldn't be applied before color changes or blurs... of course, this is all dependent on your specific
problem.
Args:
num_features: number of features used in the model
window_size: window_size from the data configs
Returns:
custom augmentations function for use with sequences
"""
max_kernel = int(round(0.1 * window_radius))
max_hole_size = int(round(0.1 * window_radius))
additional_targets = [ADDITIONAL_TARGETS_KEY.format(idx) for idx in range(1, num_features)]
return albumentations.Compose(
[
# The augmentations assume an image is RGB between 0 and 1
albumentations.ToFloat(max_value=255, always_apply=True, p=1.0),
# These augmentations should be order independent, toss 'em up front
albumentations.Flip(p=0.5),
albumentations.Transpose(p=0.5),
albumentations.Rotate(limit=90, p=0.5),
# Fogging as it's quite similar to top-down cloud effects, seems reasonable to apply up front
albumentations.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.8, alpha_coef=0.08, p=0.5),
# Color modifications
albumentations.OneOf(
[
albumentations.RandomBrightnessContrast(
brightness_limit=0.2, contrast_limit=0.6, brightness_by_max=True, p=1.0
),
albumentations.RGBShift(r_shift_limit=0.2, g_shift_limit=0.2, b_shift_limit=0.2, p=1.0),
],
p=0.25,
),
# Distortions
albumentations.OneOf(
[
albumentations.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, p=1.0),
albumentations.GridDistortion(num_steps=5, distort_limit=0.4, p=1.0),
albumentations.OpticalDistortion(distort_limit=0.1, shift_limit=0.1, p=1.0),
],
p=0.25,
),
albumentations.GaussianBlur(blur_limit=max_kernel, p=0.25),
# Noise
albumentations.OneOf(
[
albumentations.CoarseDropout(
max_holes=8, max_height=max_hole_size, max_width=max_hole_size, fill_value=np.nan, p=1.0
),
albumentations.GaussNoise(var_limit=0.05, mean=0, p=1.0),
],
p=0.25,
),
# Scaling, adding last so that other augmentations are applied at a consistent resolution
albumentations.RandomScale(scale_limit=0.05, p=0.25),
# Augmentations may not return images of the same size, images can be both smaller and larger than expected, so
# these two augmentations are added to keep things consistent
albumentations.PadIfNeeded(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),
albumentations.CenterCrop(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),
# Return the data to its original scale
albumentations.FromFloat(max_value=255, always_apply=True, p=1.0),
],
p=1.0,
additional_targets={target: "image" for target in additional_targets},
)
| current_array = 0
while current_array < len(self.cum_samples_per_array) - 1:
if (
index * self.batch_size >= self.cum_samples_per_array[current_array]
and index * self.batch_size < self.cum_samples_per_array[current_array + 1]
):
break
current_array += 1
# grab the the appropriate number of samples from the current array
sample_index = int(index * self.batch_size - self.cum_samples_per_array[current_array])
batch_features = (self.features[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_responses = (self.responses[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_weights = (self.weights[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
# if the current array didn't have enough samples in it, roll forward to the next one (and keep
# doing so until we have enough samples)
while batch_features.shape[0] < self.batch_size:
sample_index = 0
current_array += 1
if current_array == len(self.features):
break
stop_ind = self.batch_size - batch_features.shape[0]
batch_features = np.append(
batch_features, (self.features[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_responses = np.append(
batch_responses, (self.responses[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_weights = np.append(batch_weights, (self.weights[current_array])[sample_index:stop_ind, ...], axis=0)
if self.feature_mean_centering is True:
batch_features = self._mean_center(batch_features)
return [batch_features], [batch_responses], [batch_weights] | identifier_body |
sequences.py | import logging
from typing import List, Tuple, Union
import albumentations
import keras
import numpy as np
from bfgn.data_management.scalers import BaseGlobalScaler
_logger = logging.getLogger(__name__)
ADDITIONAL_TARGETS_KEY = "image_{}"
class BaseSequence(keras.utils.Sequence):
feature_scaler = None
response_scaler = None
custom_augmentations = None
def __init__(
self,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
custom_augmentations: albumentations.Compose = None,
nan_replacement_value: float = None,
) -> None:
self.feature_scaler = feature_scaler
self.response_scaler = response_scaler
self.batch_size = batch_size
self.custom_augmentations = custom_augmentations
self.nan_replacement_value = nan_replacement_value
def __len__(self) -> int:
raise NotImplementedError("Method is required for Keras functionality. Should return steps_per_epoch.")
def __getitem__(
self, index: int, return_raw_sample: bool = False
) -> Union[
Tuple[List[np.array], List[np.array]],
Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]],
]:
# Method is required for Keras functionality, reuse names to avoid creating many new, potentially large objects
_logger.debug("Get batch {} with {} items via sequence".format(index, self.batch_size))
raw_features, raw_responses, raw_weights = self._get_features_responses_weights(index)
trans_features = [raw_feature.copy() for raw_feature in raw_features]
trans_responses = [raw_response.copy() for raw_response in raw_responses]
trans_weights = [raw_weight.copy() for raw_weight in raw_weights]
if self.custom_augmentations is not None:
trans_features, trans_responses, trans_weights = self._apply_augmentations(
trans_features, trans_responses, trans_weights
)
trans_features = self._scale_features(trans_features)
trans_responses = self._scale_responses(trans_responses)
if self.nan_replacement_value is not None:
trans_features = self._replace_nan_data_values(trans_features, self.nan_replacement_value)
trans_responses = self._replace_nan_data_values(trans_responses, self.nan_replacement_value)
else:
assert np.all(np.isfinite(trans_features)), (
"Some feature values are nan but nan_replacement_value not provided in data config. Please provide "
+ "a nan_replacement_value to transform features correctly."
)
# Append weights to responses for loss function calculations
raw_responses = [np.append(response, weight, axis=-1) for response, weight in zip(raw_responses, raw_weights)]
trans_responses = [np.append(resp, weight, axis=-1) for resp, weight in zip(trans_responses, trans_weights)]
if return_raw_sample is True:
# This is for BGFN reporting and other functionality
|
else:
# This is for Keras sequence generator behavior
return_value = (trans_features, trans_responses)
return return_value
def get_raw_and_transformed_sample(
self, index: int
) -> Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]]:
return self.__getitem__(index, return_raw_sample=True)
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
raise NotImplementedError(
"Custom Sequences must implement _get_features_responses_weights for training and reporting to work. "
+ "See method header for expected arguments and returned objects."
)
def _replace_nan_data_values(self, data: List[np.array], replacement_value):
for idx_array in range(len(data)):
data[idx_array][np.isnan(data[idx_array])] = replacement_value
return data
def _apply_augmentations(
self, features: List[np.array], responses: List[np.array], weights: List[np.array]
) -> Tuple[List[np.array], List[np.array], List[np.array]]:
assert len(responses) == 1, (
"Custom augmentations have not been tested on multiple responses. Please feel free to handle this "
+ "case, test your code, and submit a pull request."
)
# Loop through samples, augmenting each
num_samples = features[0].shape[0]
for idx_sample in range(num_samples):
# Get sample data
sample_features = [feature[idx_sample] for feature in features]
sample_responses = responses[0][idx_sample] # Assume single response
sample_weights = weights[0][idx_sample]
mask_loss_window = (sample_weights > 0)[..., 0]
# Format for albumentations.Compose
data_to_augment = {"image": sample_features.pop(0), "mask": np.dstack([sample_responses, sample_weights])}
target_keys = ["image"]
for idx, feature in enumerate(sample_features):
key_feature = ADDITIONAL_TARGETS_KEY.format(idx + 1)
data_to_augment[key_feature] = feature
target_keys.append(key_feature)
# Augment data and parse results
augmented = self.custom_augmentations(**data_to_augment)
sample_features = list() # For creating a weights mask
for idx_feature, key_feature in enumerate(target_keys):
features[idx_feature][idx_sample] = augmented[key_feature]
sample_features.append(augmented[key_feature])
responses[0][idx_sample] = augmented["mask"][..., :-1]
mask_features = np.isfinite(np.dstack(sample_features)).all(axis=-1)
mask = np.logical_and(mask_features, mask_loss_window)
weights[0][idx_sample] = np.expand_dims(mask * augmented["mask"][..., -1], axis=-1)
return features, responses, weights
def _scale_features(self, features: List[np.array]) -> List[np.array]:
return [self.feature_scaler.transform(feature) for feature in features]
def _scale_responses(self, responses: List[np.array]) -> List[np.array]:
return [self.response_scaler.transform(response) for response in responses]
class MemmappedSequence(BaseSequence):
def __init__(
self,
features,
responses,
weights,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
feature_mean_centering: False,
nan_replacement_value: None,
custom_augmentations: albumentations.Compose = None,
) -> None:
self.features = features # a list of numpy arrays, each of which is (n,y,x,f)
self.responses = responses # a list of numpy arrays, each of which is (n,y,x,r)
self.weights = weights # a list of numpy arrays, each of which is (n,y,x,1)
super().__init__(
feature_scaler=feature_scaler,
response_scaler=response_scaler,
batch_size=batch_size,
custom_augmentations=custom_augmentations,
nan_replacement_value=nan_replacement_value,
)
# Determine the cumulative number of total samples across arrays - we're going to use
# it to roll between files when extracting samples
self.cum_samples_per_array = np.zeros(len(features) + 1).astype(int)
for _array in range(1, len(features) + 1):
self.cum_samples_per_array[_array] = features[_array - 1].shape[0] + self.cum_samples_per_array[_array - 1]
self.feature_mean_centering = feature_mean_centering
def __len__(self):
# Method is required for Keras functionality, a.k.a. steps_per_epoch in fit_generator
return int(np.ceil(self.cum_samples_per_array[-1] / self.batch_size))
def _mean_center(self, data: np.array) -> np.array:
return data - np.mean(data, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
# start by finding which array we're starting in, based on the input index, batch size,
# and the number of samples per array
current_array = 0
while current_array < len(self.cum_samples_per_array) - 1:
if (
index * self.batch_size >= self.cum_samples_per_array[current_array]
and index * self.batch_size < self.cum_samples_per_array[current_array + 1]
):
break
current_array += 1
# grab the the appropriate number of samples from the current array
sample_index = int(index * self.batch_size - self.cum_samples_per_array[current_array])
batch_features = (self.features[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_responses = (self.responses[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_weights = (self.weights[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
# if the current array didn't have enough samples in it, roll forward to the next one (and keep
# doing so until we have enough samples)
while batch_features.shape[0] < self.batch_size:
sample_index = 0
current_array += 1
if current_array == len(self.features):
break
stop_ind = self.batch_size - batch_features.shape[0]
batch_features = np.append(
batch_features, (self.features[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_responses = np.append(
batch_responses, (self.responses[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_weights = np.append(batch_weights, (self.weights[current_array])[sample_index:stop_ind, ...], axis=0)
if self.feature_mean_centering is True:
batch_features = self._mean_center(batch_features)
return [batch_features], [batch_responses], [batch_weights]
def sample_custom_augmentations_constructor(num_features: int, window_radius: int) -> albumentations.Compose:
"""
This function returns a custom augmentations object for use with sequences via the load_sequences function in
data_core.py. Please note that these augmentations have only been tested with RGB data between 0 and 1 and that
order of operations is critical. e.g., blurs don't like missing data so shouldn't be applied before dropout, noise
probably shouldn't be applied before color changes or blurs... of course, this is all dependent on your specific
problem.
Args:
num_features: number of features used in the model
window_size: window_size from the data configs
Returns:
custom augmentations function for use with sequences
"""
max_kernel = int(round(0.1 * window_radius))
max_hole_size = int(round(0.1 * window_radius))
additional_targets = [ADDITIONAL_TARGETS_KEY.format(idx) for idx in range(1, num_features)]
return albumentations.Compose(
[
# The augmentations assume an image is RGB between 0 and 1
albumentations.ToFloat(max_value=255, always_apply=True, p=1.0),
# These augmentations should be order independent, toss 'em up front
albumentations.Flip(p=0.5),
albumentations.Transpose(p=0.5),
albumentations.Rotate(limit=90, p=0.5),
# Fogging as it's quite similar to top-down cloud effects, seems reasonable to apply up front
albumentations.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.8, alpha_coef=0.08, p=0.5),
# Color modifications
albumentations.OneOf(
[
albumentations.RandomBrightnessContrast(
brightness_limit=0.2, contrast_limit=0.6, brightness_by_max=True, p=1.0
),
albumentations.RGBShift(r_shift_limit=0.2, g_shift_limit=0.2, b_shift_limit=0.2, p=1.0),
],
p=0.25,
),
# Distortions
albumentations.OneOf(
[
albumentations.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, p=1.0),
albumentations.GridDistortion(num_steps=5, distort_limit=0.4, p=1.0),
albumentations.OpticalDistortion(distort_limit=0.1, shift_limit=0.1, p=1.0),
],
p=0.25,
),
albumentations.GaussianBlur(blur_limit=max_kernel, p=0.25),
# Noise
albumentations.OneOf(
[
albumentations.CoarseDropout(
max_holes=8, max_height=max_hole_size, max_width=max_hole_size, fill_value=np.nan, p=1.0
),
albumentations.GaussNoise(var_limit=0.05, mean=0, p=1.0),
],
p=0.25,
),
# Scaling, adding last so that other augmentations are applied at a consistent resolution
albumentations.RandomScale(scale_limit=0.05, p=0.25),
# Augmentations may not return images of the same size, images can be both smaller and larger than expected, so
# these two augmentations are added to keep things consistent
albumentations.PadIfNeeded(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),
albumentations.CenterCrop(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),
# Return the data to its original scale
albumentations.FromFloat(max_value=255, always_apply=True, p=1.0),
],
p=1.0,
additional_targets={target: "image" for target in additional_targets},
)
| return_value = ((raw_features, raw_responses), (trans_features, trans_responses)) | conditional_block |
sequences.py | import logging
from typing import List, Tuple, Union
import albumentations
import keras
import numpy as np
from bfgn.data_management.scalers import BaseGlobalScaler
_logger = logging.getLogger(__name__)
ADDITIONAL_TARGETS_KEY = "image_{}"
class BaseSequence(keras.utils.Sequence):
feature_scaler = None
response_scaler = None
custom_augmentations = None
def __init__(
self,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
custom_augmentations: albumentations.Compose = None,
nan_replacement_value: float = None,
) -> None:
self.feature_scaler = feature_scaler
self.response_scaler = response_scaler
self.batch_size = batch_size
self.custom_augmentations = custom_augmentations
self.nan_replacement_value = nan_replacement_value
def __len__(self) -> int:
raise NotImplementedError("Method is required for Keras functionality. Should return steps_per_epoch.")
def __getitem__(
self, index: int, return_raw_sample: bool = False
) -> Union[
Tuple[List[np.array], List[np.array]],
Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]],
]:
# Method is required for Keras functionality, reuse names to avoid creating many new, potentially large objects
_logger.debug("Get batch {} with {} items via sequence".format(index, self.batch_size))
raw_features, raw_responses, raw_weights = self._get_features_responses_weights(index)
trans_features = [raw_feature.copy() for raw_feature in raw_features]
trans_responses = [raw_response.copy() for raw_response in raw_responses]
trans_weights = [raw_weight.copy() for raw_weight in raw_weights]
if self.custom_augmentations is not None:
trans_features, trans_responses, trans_weights = self._apply_augmentations(
trans_features, trans_responses, trans_weights
)
trans_features = self._scale_features(trans_features)
trans_responses = self._scale_responses(trans_responses)
if self.nan_replacement_value is not None:
trans_features = self._replace_nan_data_values(trans_features, self.nan_replacement_value)
trans_responses = self._replace_nan_data_values(trans_responses, self.nan_replacement_value)
else:
assert np.all(np.isfinite(trans_features)), (
"Some feature values are nan but nan_replacement_value not provided in data config. Please provide "
+ "a nan_replacement_value to transform features correctly."
)
# Append weights to responses for loss function calculations
raw_responses = [np.append(response, weight, axis=-1) for response, weight in zip(raw_responses, raw_weights)]
trans_responses = [np.append(resp, weight, axis=-1) for resp, weight in zip(trans_responses, trans_weights)]
if return_raw_sample is True:
# This is for BGFN reporting and other functionality
return_value = ((raw_features, raw_responses), (trans_features, trans_responses))
else:
# This is for Keras sequence generator behavior
return_value = (trans_features, trans_responses)
return return_value
def get_raw_and_transformed_sample(
self, index: int
) -> Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]]:
return self.__getitem__(index, return_raw_sample=True)
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
raise NotImplementedError(
"Custom Sequences must implement _get_features_responses_weights for training and reporting to work. "
+ "See method header for expected arguments and returned objects."
)
def _replace_nan_data_values(self, data: List[np.array], replacement_value):
for idx_array in range(len(data)):
data[idx_array][np.isnan(data[idx_array])] = replacement_value
return data
def _apply_augmentations(
self, features: List[np.array], responses: List[np.array], weights: List[np.array]
) -> Tuple[List[np.array], List[np.array], List[np.array]]:
assert len(responses) == 1, (
"Custom augmentations have not been tested on multiple responses. Please feel free to handle this "
+ "case, test your code, and submit a pull request."
)
# Loop through samples, augmenting each
num_samples = features[0].shape[0]
for idx_sample in range(num_samples):
# Get sample data
sample_features = [feature[idx_sample] for feature in features]
sample_responses = responses[0][idx_sample] # Assume single response
sample_weights = weights[0][idx_sample]
mask_loss_window = (sample_weights > 0)[..., 0]
# Format for albumentations.Compose
data_to_augment = {"image": sample_features.pop(0), "mask": np.dstack([sample_responses, sample_weights])}
target_keys = ["image"]
for idx, feature in enumerate(sample_features):
key_feature = ADDITIONAL_TARGETS_KEY.format(idx + 1)
data_to_augment[key_feature] = feature
target_keys.append(key_feature)
# Augment data and parse results
augmented = self.custom_augmentations(**data_to_augment)
sample_features = list() # For creating a weights mask
for idx_feature, key_feature in enumerate(target_keys):
features[idx_feature][idx_sample] = augmented[key_feature]
sample_features.append(augmented[key_feature])
responses[0][idx_sample] = augmented["mask"][..., :-1]
mask_features = np.isfinite(np.dstack(sample_features)).all(axis=-1)
mask = np.logical_and(mask_features, mask_loss_window)
weights[0][idx_sample] = np.expand_dims(mask * augmented["mask"][..., -1], axis=-1)
return features, responses, weights
def _scale_features(self, features: List[np.array]) -> List[np.array]:
return [self.feature_scaler.transform(feature) for feature in features]
def _scale_responses(self, responses: List[np.array]) -> List[np.array]:
return [self.response_scaler.transform(response) for response in responses]
class MemmappedSequence(BaseSequence):
def __init__(
self,
features,
responses,
weights,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
feature_mean_centering: False,
nan_replacement_value: None,
custom_augmentations: albumentations.Compose = None,
) -> None:
self.features = features # a list of numpy arrays, each of which is (n,y,x,f)
self.responses = responses # a list of numpy arrays, each of which is (n,y,x,r)
self.weights = weights # a list of numpy arrays, each of which is (n,y,x,1)
super().__init__(
feature_scaler=feature_scaler,
response_scaler=response_scaler,
batch_size=batch_size,
custom_augmentations=custom_augmentations,
nan_replacement_value=nan_replacement_value,
)
# Determine the cumulative number of total samples across arrays - we're going to use
# it to roll between files when extracting samples
self.cum_samples_per_array = np.zeros(len(features) + 1).astype(int)
for _array in range(1, len(features) + 1):
self.cum_samples_per_array[_array] = features[_array - 1].shape[0] + self.cum_samples_per_array[_array - 1]
self.feature_mean_centering = feature_mean_centering
def __len__(self):
# Method is required for Keras functionality, a.k.a. steps_per_epoch in fit_generator
return int(np.ceil(self.cum_samples_per_array[-1] / self.batch_size))
def _mean_center(self, data: np.array) -> np.array:
return data - np.mean(data, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
def | (self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
# start by finding which array we're starting in, based on the input index, batch size,
# and the number of samples per array
current_array = 0
while current_array < len(self.cum_samples_per_array) - 1:
if (
index * self.batch_size >= self.cum_samples_per_array[current_array]
and index * self.batch_size < self.cum_samples_per_array[current_array + 1]
):
break
current_array += 1
# grab the the appropriate number of samples from the current array
sample_index = int(index * self.batch_size - self.cum_samples_per_array[current_array])
batch_features = (self.features[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_responses = (self.responses[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_weights = (self.weights[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
# if the current array didn't have enough samples in it, roll forward to the next one (and keep
# doing so until we have enough samples)
while batch_features.shape[0] < self.batch_size:
sample_index = 0
current_array += 1
if current_array == len(self.features):
break
stop_ind = self.batch_size - batch_features.shape[0]
batch_features = np.append(
batch_features, (self.features[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_responses = np.append(
batch_responses, (self.responses[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_weights = np.append(batch_weights, (self.weights[current_array])[sample_index:stop_ind, ...], axis=0)
if self.feature_mean_centering is True:
batch_features = self._mean_center(batch_features)
return [batch_features], [batch_responses], [batch_weights]
def sample_custom_augmentations_constructor(num_features: int, window_radius: int) -> albumentations.Compose:
"""
This function returns a custom augmentations object for use with sequences via the load_sequences function in
data_core.py. Please note that these augmentations have only been tested with RGB data between 0 and 1 and that
order of operations is critical. e.g., blurs don't like missing data so shouldn't be applied before dropout, noise
probably shouldn't be applied before color changes or blurs... of course, this is all dependent on your specific
problem.
Args:
num_features: number of features used in the model
window_size: window_size from the data configs
Returns:
custom augmentations function for use with sequences
"""
max_kernel = int(round(0.1 * window_radius))
max_hole_size = int(round(0.1 * window_radius))
additional_targets = [ADDITIONAL_TARGETS_KEY.format(idx) for idx in range(1, num_features)]
return albumentations.Compose(
[
# The augmentations assume an image is RGB between 0 and 1
albumentations.ToFloat(max_value=255, always_apply=True, p=1.0),
# These augmentations should be order independent, toss 'em up front
albumentations.Flip(p=0.5),
albumentations.Transpose(p=0.5),
albumentations.Rotate(limit=90, p=0.5),
# Fogging as it's quite similar to top-down cloud effects, seems reasonable to apply up front
albumentations.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.8, alpha_coef=0.08, p=0.5),
# Color modifications
albumentations.OneOf(
[
albumentations.RandomBrightnessContrast(
brightness_limit=0.2, contrast_limit=0.6, brightness_by_max=True, p=1.0
),
albumentations.RGBShift(r_shift_limit=0.2, g_shift_limit=0.2, b_shift_limit=0.2, p=1.0),
],
p=0.25,
),
# Distortions
albumentations.OneOf(
[
albumentations.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, p=1.0),
albumentations.GridDistortion(num_steps=5, distort_limit=0.4, p=1.0),
albumentations.OpticalDistortion(distort_limit=0.1, shift_limit=0.1, p=1.0),
],
p=0.25,
),
albumentations.GaussianBlur(blur_limit=max_kernel, p=0.25),
# Noise
albumentations.OneOf(
[
albumentations.CoarseDropout(
max_holes=8, max_height=max_hole_size, max_width=max_hole_size, fill_value=np.nan, p=1.0
),
albumentations.GaussNoise(var_limit=0.05, mean=0, p=1.0),
],
p=0.25,
),
# Scaling, adding last so that other augmentations are applied at a consistent resolution
albumentations.RandomScale(scale_limit=0.05, p=0.25),
# Augmentations may not return images of the same size, images can be both smaller and larger than expected, so
# these two augmentations are added to keep things consistent
albumentations.PadIfNeeded(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),
albumentations.CenterCrop(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),
# Return the data to its original scale
albumentations.FromFloat(max_value=255, always_apply=True, p=1.0),
],
p=1.0,
additional_targets={target: "image" for target in additional_targets},
)
| _get_features_responses_weights | identifier_name |
sequences.py | import logging
from typing import List, Tuple, Union
import albumentations
import keras
import numpy as np
from bfgn.data_management.scalers import BaseGlobalScaler
_logger = logging.getLogger(__name__)
ADDITIONAL_TARGETS_KEY = "image_{}"
class BaseSequence(keras.utils.Sequence):
feature_scaler = None
response_scaler = None
custom_augmentations = None
def __init__(
self,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
custom_augmentations: albumentations.Compose = None,
nan_replacement_value: float = None,
) -> None:
self.feature_scaler = feature_scaler
self.response_scaler = response_scaler
self.batch_size = batch_size
self.custom_augmentations = custom_augmentations
self.nan_replacement_value = nan_replacement_value
def __len__(self) -> int:
raise NotImplementedError("Method is required for Keras functionality. Should return steps_per_epoch.")
def __getitem__(
self, index: int, return_raw_sample: bool = False
) -> Union[
Tuple[List[np.array], List[np.array]],
Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]],
]:
# Method is required for Keras functionality, reuse names to avoid creating many new, potentially large objects
_logger.debug("Get batch {} with {} items via sequence".format(index, self.batch_size))
raw_features, raw_responses, raw_weights = self._get_features_responses_weights(index)
trans_features = [raw_feature.copy() for raw_feature in raw_features]
trans_responses = [raw_response.copy() for raw_response in raw_responses]
trans_weights = [raw_weight.copy() for raw_weight in raw_weights]
if self.custom_augmentations is not None:
trans_features, trans_responses, trans_weights = self._apply_augmentations(
trans_features, trans_responses, trans_weights
)
trans_features = self._scale_features(trans_features)
trans_responses = self._scale_responses(trans_responses)
if self.nan_replacement_value is not None:
trans_features = self._replace_nan_data_values(trans_features, self.nan_replacement_value)
trans_responses = self._replace_nan_data_values(trans_responses, self.nan_replacement_value)
else:
assert np.all(np.isfinite(trans_features)), (
"Some feature values are nan but nan_replacement_value not provided in data config. Please provide "
+ "a nan_replacement_value to transform features correctly."
)
# Append weights to responses for loss function calculations
raw_responses = [np.append(response, weight, axis=-1) for response, weight in zip(raw_responses, raw_weights)]
trans_responses = [np.append(resp, weight, axis=-1) for resp, weight in zip(trans_responses, trans_weights)] | return_value = ((raw_features, raw_responses), (trans_features, trans_responses))
else:
# This is for Keras sequence generator behavior
return_value = (trans_features, trans_responses)
return return_value
def get_raw_and_transformed_sample(
self, index: int
) -> Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]]:
return self.__getitem__(index, return_raw_sample=True)
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
raise NotImplementedError(
"Custom Sequences must implement _get_features_responses_weights for training and reporting to work. "
+ "See method header for expected arguments and returned objects."
)
def _replace_nan_data_values(self, data: List[np.array], replacement_value):
for idx_array in range(len(data)):
data[idx_array][np.isnan(data[idx_array])] = replacement_value
return data
def _apply_augmentations(
self, features: List[np.array], responses: List[np.array], weights: List[np.array]
) -> Tuple[List[np.array], List[np.array], List[np.array]]:
assert len(responses) == 1, (
"Custom augmentations have not been tested on multiple responses. Please feel free to handle this "
+ "case, test your code, and submit a pull request."
)
# Loop through samples, augmenting each
num_samples = features[0].shape[0]
for idx_sample in range(num_samples):
# Get sample data
sample_features = [feature[idx_sample] for feature in features]
sample_responses = responses[0][idx_sample] # Assume single response
sample_weights = weights[0][idx_sample]
mask_loss_window = (sample_weights > 0)[..., 0]
# Format for albumentations.Compose
data_to_augment = {"image": sample_features.pop(0), "mask": np.dstack([sample_responses, sample_weights])}
target_keys = ["image"]
for idx, feature in enumerate(sample_features):
key_feature = ADDITIONAL_TARGETS_KEY.format(idx + 1)
data_to_augment[key_feature] = feature
target_keys.append(key_feature)
# Augment data and parse results
augmented = self.custom_augmentations(**data_to_augment)
sample_features = list() # For creating a weights mask
for idx_feature, key_feature in enumerate(target_keys):
features[idx_feature][idx_sample] = augmented[key_feature]
sample_features.append(augmented[key_feature])
responses[0][idx_sample] = augmented["mask"][..., :-1]
mask_features = np.isfinite(np.dstack(sample_features)).all(axis=-1)
mask = np.logical_and(mask_features, mask_loss_window)
weights[0][idx_sample] = np.expand_dims(mask * augmented["mask"][..., -1], axis=-1)
return features, responses, weights
def _scale_features(self, features: List[np.array]) -> List[np.array]:
return [self.feature_scaler.transform(feature) for feature in features]
def _scale_responses(self, responses: List[np.array]) -> List[np.array]:
return [self.response_scaler.transform(response) for response in responses]
class MemmappedSequence(BaseSequence):
def __init__(
self,
features,
responses,
weights,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
feature_mean_centering: False,
nan_replacement_value: None,
custom_augmentations: albumentations.Compose = None,
) -> None:
self.features = features # a list of numpy arrays, each of which is (n,y,x,f)
self.responses = responses # a list of numpy arrays, each of which is (n,y,x,r)
self.weights = weights # a list of numpy arrays, each of which is (n,y,x,1)
super().__init__(
feature_scaler=feature_scaler,
response_scaler=response_scaler,
batch_size=batch_size,
custom_augmentations=custom_augmentations,
nan_replacement_value=nan_replacement_value,
)
# Determine the cumulative number of total samples across arrays - we're going to use
# it to roll between files when extracting samples
self.cum_samples_per_array = np.zeros(len(features) + 1).astype(int)
for _array in range(1, len(features) + 1):
self.cum_samples_per_array[_array] = features[_array - 1].shape[0] + self.cum_samples_per_array[_array - 1]
self.feature_mean_centering = feature_mean_centering
def __len__(self):
# Method is required for Keras functionality, a.k.a. steps_per_epoch in fit_generator
return int(np.ceil(self.cum_samples_per_array[-1] / self.batch_size))
def _mean_center(self, data: np.array) -> np.array:
return data - np.mean(data, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
# start by finding which array we're starting in, based on the input index, batch size,
# and the number of samples per array
current_array = 0
while current_array < len(self.cum_samples_per_array) - 1:
if (
index * self.batch_size >= self.cum_samples_per_array[current_array]
and index * self.batch_size < self.cum_samples_per_array[current_array + 1]
):
break
current_array += 1
# grab the the appropriate number of samples from the current array
sample_index = int(index * self.batch_size - self.cum_samples_per_array[current_array])
batch_features = (self.features[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_responses = (self.responses[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_weights = (self.weights[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
# if the current array didn't have enough samples in it, roll forward to the next one (and keep
# doing so until we have enough samples)
while batch_features.shape[0] < self.batch_size:
sample_index = 0
current_array += 1
if current_array == len(self.features):
break
stop_ind = self.batch_size - batch_features.shape[0]
batch_features = np.append(
batch_features, (self.features[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_responses = np.append(
batch_responses, (self.responses[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_weights = np.append(batch_weights, (self.weights[current_array])[sample_index:stop_ind, ...], axis=0)
if self.feature_mean_centering is True:
batch_features = self._mean_center(batch_features)
return [batch_features], [batch_responses], [batch_weights]
def sample_custom_augmentations_constructor(num_features: int, window_radius: int) -> albumentations.Compose:
"""
This function returns a custom augmentations object for use with sequences via the load_sequences function in
data_core.py. Please note that these augmentations have only been tested with RGB data between 0 and 1 and that
order of operations is critical. e.g., blurs don't like missing data so shouldn't be applied before dropout, noise
probably shouldn't be applied before color changes or blurs... of course, this is all dependent on your specific
problem.
Args:
num_features: number of features used in the model
window_size: window_size from the data configs
Returns:
custom augmentations function for use with sequences
"""
max_kernel = int(round(0.1 * window_radius))
max_hole_size = int(round(0.1 * window_radius))
additional_targets = [ADDITIONAL_TARGETS_KEY.format(idx) for idx in range(1, num_features)]
return albumentations.Compose(
[
# The augmentations assume an image is RGB between 0 and 1
albumentations.ToFloat(max_value=255, always_apply=True, p=1.0),
# These augmentations should be order independent, toss 'em up front
albumentations.Flip(p=0.5),
albumentations.Transpose(p=0.5),
albumentations.Rotate(limit=90, p=0.5),
# Fogging as it's quite similar to top-down cloud effects, seems reasonable to apply up front
albumentations.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.8, alpha_coef=0.08, p=0.5),
# Color modifications
albumentations.OneOf(
[
albumentations.RandomBrightnessContrast(
brightness_limit=0.2, contrast_limit=0.6, brightness_by_max=True, p=1.0
),
albumentations.RGBShift(r_shift_limit=0.2, g_shift_limit=0.2, b_shift_limit=0.2, p=1.0),
],
p=0.25,
),
# Distortions
albumentations.OneOf(
[
albumentations.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, p=1.0),
albumentations.GridDistortion(num_steps=5, distort_limit=0.4, p=1.0),
albumentations.OpticalDistortion(distort_limit=0.1, shift_limit=0.1, p=1.0),
],
p=0.25,
),
albumentations.GaussianBlur(blur_limit=max_kernel, p=0.25),
# Noise
albumentations.OneOf(
[
albumentations.CoarseDropout(
max_holes=8, max_height=max_hole_size, max_width=max_hole_size, fill_value=np.nan, p=1.0
),
albumentations.GaussNoise(var_limit=0.05, mean=0, p=1.0),
],
p=0.25,
),
# Scaling, adding last so that other augmentations are applied at a consistent resolution
albumentations.RandomScale(scale_limit=0.05, p=0.25),
# Augmentations may not return images of the same size, images can be both smaller and larger than expected, so
# these two augmentations are added to keep things consistent
albumentations.PadIfNeeded(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),
albumentations.CenterCrop(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),
# Return the data to its original scale
albumentations.FromFloat(max_value=255, always_apply=True, p=1.0),
],
p=1.0,
additional_targets={target: "image" for target in additional_targets},
) |
if return_raw_sample is True:
# This is for BGFN reporting and other functionality | random_line_split |
data.ts | /**
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Helpers for UI selections */
const GCR_PREFIX = 'gcr.io/deeplearning-platform-release';
/** Custom ScaleTier allows selection of AI Platform Machine type */
export const CUSTOM = 'CUSTOM';
/** Indicates a single Notebook run */
export const SINGLE = 'single';
/** Indicates a recurring scheduled Notebook run */
export const RECURRING = 'recurring';
/** Suffix to add to projectId for GCS bucket storing notebook sources. */
export const BUCKET_NAME_SUFFIX = '-scheduled-notebooks';
/** Region where Cloud Function will be deployed. */
export const CLOUD_FUNCTION_REGION = 'us-central1';
/** Location where the Cloud Function zip archive is stored */
export const CLOUD_FUNCTION_ARCHIVE =
'gs://deeplearning-platform-ui-public/gcp_scheduled_notebook_helper.zip';
/** Name of the Cloud Function that handles notebook scheduling */
export const CLOUD_FUNCTION_NAME = 'submitScheduledNotebook';
/** Indicates a hourly frequency type */
export const HOUR = 'hour';
/** Indicates a daily frequency type */
export const DAY = 'day';
/** Indicates a weekly frequency type */
export const WEEK = 'week';
/** Indicates a monthly frequency type */
export const MONTH = 'month';
/** Interface for an <option> inside a <select> */
export interface Option {
text: string;
value: string | number;
disabled?: boolean;
}
/** Returns an option whose value matches the given value. */
export function findOptionByValue<T extends Option>(
options: T[],
value: string | number | undefined
): T | undefined {
if (value === undefined) return undefined;
return options.find(option => option.value === value);
}
export const DAYS_OF_WEEK: Option[] = [
{ value: 'sundayRun', text: 'Sun' },
{ value: 'mondayRun', text: 'Mon' },
{ value: 'tuesdayRun', text: 'Tue' },
{ value: 'wednesdayRun', text: 'Wed' },
{ value: 'thursdayRun', text: 'Thur' },
{ value: 'fridayRun', text: 'Fri' },
{ value: 'saturdayRun', text: 'Sat' },
];
export const MONTH_FREQUENCIES: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '6', text: '6' },
{ value: '12', text: '12' },
];
/** Removes the item from the list if found */
export function removeFromList<T>(list: T[], value: T) {
const index = list.indexOf(value);
if (index >= 0) {
list.splice(index, 1);
}
}
/**
* Container images that can be used to schedule jobs on AI Platform.
* https://cloud.google.com/ai-platform/training/docs/containers-overview
*/
export const CONTAINER_IMAGES: Option[] = [
{ value: `${GCR_PREFIX}/base-cpu:latest`, text: 'Python' },
{
value: `${GCR_PREFIX}/tf-cpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf-gpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (GPU)',
},
{
value: `${GCR_PREFIX}/tf2-cpu.2-1:latest`,
text: 'TensorFlow 2.1 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf2-gpu.2-1:latest`,
text: 'TensorFlow 2.1 (GPU)',
},
{
value: `${GCR_PREFIX}/pytorch-cpu.1-4:latest`,
text: 'PyTorch 1.4 (CPU only)',
},
{
value: `${GCR_PREFIX}/pytorch-gpu.1-4:latest`,
text: 'PyTorch 1.4 (GPU)',
},
{
value: `${GCR_PREFIX}/r-cpu.3-6:latest`,
text: 'R 3.6 (with r-essentials)',
},
{ value: `${GCR_PREFIX}/beam-notebooks:latest`, text: 'Apache Beam' },
];
/**
* Scale tier values for AI Platform Jobs
* https://cloud.google.com/ai-platform/training/docs/machine-types#scale_tiers
*/
export const SCALE_TIERS: Option[] = [
{ value: 'BASIC', text: 'Single worker instance' },
{
value: 'BASIC_GPU',
text: 'A single worker instance with an NVIDIA Tesla K80 GPU',
},
{
value: 'STANDARD_1',
text: '1 master instance, 4 workers, 3 parameter servers',
},
{
value: 'PREMIUM_1',
text: '1 master instance, 19 workers, 11 parameter servers',
},
{ value: CUSTOM, text: 'Custom machine type configuration' },
];
/**
* AI Platform Machine types.
* https://cloud.google.com/ai-platform/training/docs/machine-types#compare-machine-types
*/
export const MASTER_TYPES: Option[] = [
{ value: 'n1-standard-4', text: '4 CPUs, 15 GB RAM' },
{ value: 'n1-standard-8', text: '8 CPUs, 30 GB RAM' },
{ value: 'n1-standard-16', text: '16 CPUs, 60 GB RAM' },
{ value: 'n1-standard-32', text: '32 CPUs, 120 GB RAM' },
{ value: 'n1-standard-64', text: '64 CPUs, 240 GB RAM' },
{ value: 'n1-standard-96', text: '96 CPUs, 360 GB RAM' },
{ value: 'n1-highmem-2', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-4', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-8', text: '8 CPUs, 52 GB RAM' },
{ value: 'n1-highmem-16', text: '16 CPUs, 104 GB RAM' },
{ value: 'n1-highmem-32', text: '32 CPUs, 208 GB RAM' },
{ value: 'n1-highmem-64', text: '64 CPUs, 416 GB RAM' },
{ value: 'n1-highmem-96', text: '96 CPUs, 624 GB RAM' },
{ value: 'n1-highcpu-16', text: '16 CPUs, 14.4 GB RAM' },
{ value: 'n1-highcpu-32', text: '32 CPUs, 28.8 GB RAM' },
{ value: 'n1-highcpu-64', text: '64 CPUs, 57.6 GB RAM' },
{ value: 'n1-highcpu-96', text: '96 CPUs, 86.4 GB RAM' },
];
/**
* AI Platform Accelerator types.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_K80', text: 'NVIDIA Tesla K80' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_P100', text: 'NVIDIA Tesla P100' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
/**
* AI Platform Accelerator types for particular machine types that only
* provide a limited amount.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES_REDUCED: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
const MASTER_TYPES_REDUCED: Set<string> = new Set([
'n1-standard-64',
'n1-standard-96',
'n1-highmem-64',
'n1-highmem-96',
'n1-highcpu-96',
]);
/**
* Returns the valid accelerator types given a masterType. Returns empty array
* if masterType is falsy.
*/
export function | (masterType: string): Option[] {
if (masterType) {
if (MASTER_TYPES_REDUCED.has(masterType)) {
return ACCELERATOR_TYPES_REDUCED;
}
return ACCELERATOR_TYPES;
}
return [];
}
/**
* AI Platform Accelerator counts.
* https://cloud.google.com/ai-platform/training/docs/using-gpus
*/
export const ACCELERATOR_COUNTS_1_2_4_8: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '8', text: '8' },
];
/**
* Supported AI Platform regions.
* https://cloud.google.com/ai-platform/training/docs/regions
* TODO: It may be more sensible to invoke the projects.locations.list API
* and filter for locations with TRAINING capability
* https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.locations/list
*/
export const REGIONS: Option[] = [
{
value: 'us-central1',
text: 'us-central1 (Iowa)',
},
{
value: 'us-east1',
text: 'us-east1 (South Carolina)',
},
{
value: 'us-east4',
text: 'us-east4 (Northern Virginia)',
},
{
value: 'us-west1',
text: 'us-west1 (Oregon)',
},
{
value: 'us-west2',
text: 'us-west2 (Los Angeles)',
},
{
value: 'us-west3',
text: 'us-west3 (Salt Lake City)',
},
{
value: 'asia-east1',
text: 'asia-east1 (Taiwan)',
},
{
value: 'europe-north1',
text: 'europe-north1 (Finland)',
},
{
value: 'europe-west1',
text: 'europe-west1 (Belgium)',
},
{
value: 'europe-west2',
text: 'europe-west2 (London)',
},
{
value: 'europe-west3',
text: 'europe-west3 (Frankfurt)',
},
{
value: 'europe-west4',
text: 'europe-west4 (Netherlands)',
},
{
value: 'europe-west6',
text: 'europe-west6 (Zurich)',
},
{
value: 'asia-east1',
text: 'asia-east1 (Taiwan)',
},
{
value: 'asia-east2',
text: 'asia-east2 (Hong Kong)',
},
{
value: 'asia-south1',
text: 'asia-south1 (Mumbai)',
},
{
value: 'asia-northeast1',
text: 'asia-northeast1 (Tokyo)',
},
{
value: 'asia-northeast2',
text: 'asia-northeast2 (Osaka)',
},
{
value: 'asia-northeast3',
text: 'asia-northeast3 (Seoul)',
},
{
value: 'asia-southeast1',
text: 'asia-southeast1 (Singapore)',
},
];
/** Single execution or recurring schedule */
export const SCHEDULE_TYPES: Option[] = [
{ value: SINGLE, text: 'Single run' },
{ value: RECURRING, text: 'Recurring run' },
];
export const FREQUENCY_TYPES: Option[] = [
{ value: HOUR, text: 'hour' },
{ value: DAY, text: 'day' },
{ value: WEEK, text: 'week' },
{ value: MONTH, text: 'month' },
];
/** Link to Cloud Console */
export const CLOUD_CONSOLE = 'https://console.cloud.google.com';
/** Link to AI Platform Jobs */
export const AI_PLATFORM_LINK = `${CLOUD_CONSOLE}/ai-platform/jobs`;
/** Link to GCS Storage Browser */
export const GCS_LINK = `${CLOUD_CONSOLE}/storage/browser`;
/** Link to Scheduled Runs page */
export const SCHEDULER_LINK = `${CLOUD_CONSOLE}/ai-platform/notebooks/list/scheduled-runs`;
/** Notebook jobs directory that notebooks will be imported to. */
export const IMPORT_DIRECTORY = 'imported_notebook_jobs/';
| getAcceleratorTypes | identifier_name |
data.ts | /**
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Helpers for UI selections */
const GCR_PREFIX = 'gcr.io/deeplearning-platform-release';
/** Custom ScaleTier allows selection of AI Platform Machine type */
export const CUSTOM = 'CUSTOM';
/** Indicates a single Notebook run */
export const SINGLE = 'single';
/** Indicates a recurring scheduled Notebook run */
export const RECURRING = 'recurring';
/** Suffix to add to projectId for GCS bucket storing notebook sources. */
export const BUCKET_NAME_SUFFIX = '-scheduled-notebooks';
/** Region where Cloud Function will be deployed. */
export const CLOUD_FUNCTION_REGION = 'us-central1';
/** Location where the Cloud Function zip archive is stored */
export const CLOUD_FUNCTION_ARCHIVE =
'gs://deeplearning-platform-ui-public/gcp_scheduled_notebook_helper.zip';
/** Name of the Cloud Function that handles notebook scheduling */
export const CLOUD_FUNCTION_NAME = 'submitScheduledNotebook';
/** Indicates a hourly frequency type */
export const HOUR = 'hour';
/** Indicates a daily frequency type */
export const DAY = 'day';
/** Indicates a weekly frequency type */
export const WEEK = 'week';
/** Indicates a monthly frequency type */
export const MONTH = 'month';
/** Interface for an <option> inside a <select> */
export interface Option {
text: string;
value: string | number;
disabled?: boolean;
}
/** Returns an option whose value matches the given value. */
export function findOptionByValue<T extends Option>(
options: T[],
value: string | number | undefined
): T | undefined |
export const DAYS_OF_WEEK: Option[] = [
{ value: 'sundayRun', text: 'Sun' },
{ value: 'mondayRun', text: 'Mon' },
{ value: 'tuesdayRun', text: 'Tue' },
{ value: 'wednesdayRun', text: 'Wed' },
{ value: 'thursdayRun', text: 'Thur' },
{ value: 'fridayRun', text: 'Fri' },
{ value: 'saturdayRun', text: 'Sat' },
];
export const MONTH_FREQUENCIES: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '6', text: '6' },
{ value: '12', text: '12' },
];
/** Removes the item from the list if found */
export function removeFromList<T>(list: T[], value: T) {
const index = list.indexOf(value);
if (index >= 0) {
list.splice(index, 1);
}
}
/**
* Container images that can be used to schedule jobs on AI Platform.
* https://cloud.google.com/ai-platform/training/docs/containers-overview
*/
export const CONTAINER_IMAGES: Option[] = [
{ value: `${GCR_PREFIX}/base-cpu:latest`, text: 'Python' },
{
value: `${GCR_PREFIX}/tf-cpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf-gpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (GPU)',
},
{
value: `${GCR_PREFIX}/tf2-cpu.2-1:latest`,
text: 'TensorFlow 2.1 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf2-gpu.2-1:latest`,
text: 'TensorFlow 2.1 (GPU)',
},
{
value: `${GCR_PREFIX}/pytorch-cpu.1-4:latest`,
text: 'PyTorch 1.4 (CPU only)',
},
{
value: `${GCR_PREFIX}/pytorch-gpu.1-4:latest`,
text: 'PyTorch 1.4 (GPU)',
},
{
value: `${GCR_PREFIX}/r-cpu.3-6:latest`,
text: 'R 3.6 (with r-essentials)',
},
{ value: `${GCR_PREFIX}/beam-notebooks:latest`, text: 'Apache Beam' },
];
/**
* Scale tier values for AI Platform Jobs
* https://cloud.google.com/ai-platform/training/docs/machine-types#scale_tiers
*/
export const SCALE_TIERS: Option[] = [
{ value: 'BASIC', text: 'Single worker instance' },
{
value: 'BASIC_GPU',
text: 'A single worker instance with an NVIDIA Tesla K80 GPU',
},
{
value: 'STANDARD_1',
text: '1 master instance, 4 workers, 3 parameter servers',
},
{
value: 'PREMIUM_1',
text: '1 master instance, 19 workers, 11 parameter servers',
},
{ value: CUSTOM, text: 'Custom machine type configuration' },
];
/**
* AI Platform Machine types.
* https://cloud.google.com/ai-platform/training/docs/machine-types#compare-machine-types
*/
export const MASTER_TYPES: Option[] = [
{ value: 'n1-standard-4', text: '4 CPUs, 15 GB RAM' },
{ value: 'n1-standard-8', text: '8 CPUs, 30 GB RAM' },
{ value: 'n1-standard-16', text: '16 CPUs, 60 GB RAM' },
{ value: 'n1-standard-32', text: '32 CPUs, 120 GB RAM' },
{ value: 'n1-standard-64', text: '64 CPUs, 240 GB RAM' },
{ value: 'n1-standard-96', text: '96 CPUs, 360 GB RAM' },
{ value: 'n1-highmem-2', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-4', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-8', text: '8 CPUs, 52 GB RAM' },
{ value: 'n1-highmem-16', text: '16 CPUs, 104 GB RAM' },
{ value: 'n1-highmem-32', text: '32 CPUs, 208 GB RAM' },
{ value: 'n1-highmem-64', text: '64 CPUs, 416 GB RAM' },
{ value: 'n1-highmem-96', text: '96 CPUs, 624 GB RAM' },
{ value: 'n1-highcpu-16', text: '16 CPUs, 14.4 GB RAM' },
{ value: 'n1-highcpu-32', text: '32 CPUs, 28.8 GB RAM' },
{ value: 'n1-highcpu-64', text: '64 CPUs, 57.6 GB RAM' },
{ value: 'n1-highcpu-96', text: '96 CPUs, 86.4 GB RAM' },
];
/**
* AI Platform Accelerator types.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_K80', text: 'NVIDIA Tesla K80' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_P100', text: 'NVIDIA Tesla P100' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
/**
* AI Platform Accelerator types for particular machine types that only
* provide a limited amount.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES_REDUCED: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
const MASTER_TYPES_REDUCED: Set<string> = new Set([
'n1-standard-64',
'n1-standard-96',
'n1-highmem-64',
'n1-highmem-96',
'n1-highcpu-96',
]);
/**
* Returns the valid accelerator types given a masterType. Returns empty array
* if masterType is falsy.
*/
export function getAcceleratorTypes(masterType: string): Option[] {
if (masterType) {
if (MASTER_TYPES_REDUCED.has(masterType)) {
return ACCELERATOR_TYPES_REDUCED;
}
return ACCELERATOR_TYPES;
}
return [];
}
/**
* AI Platform Accelerator counts.
* https://cloud.google.com/ai-platform/training/docs/using-gpus
*/
export const ACCELERATOR_COUNTS_1_2_4_8: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '8', text: '8' },
];
/**
* Supported AI Platform regions.
* https://cloud.google.com/ai-platform/training/docs/regions
* TODO: It may be more sensible to invoke the projects.locations.list API
* and filter for locations with TRAINING capability
* https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.locations/list
*/
export const REGIONS: Option[] = [
{
value: 'us-central1',
text: 'us-central1 (Iowa)',
},
{
value: 'us-east1',
text: 'us-east1 (South Carolina)',
},
{
value: 'us-east4',
text: 'us-east4 (Northern Virginia)',
},
{
value: 'us-west1',
text: 'us-west1 (Oregon)',
},
{
value: 'us-west2',
text: 'us-west2 (Los Angeles)',
},
{
value: 'us-west3',
text: 'us-west3 (Salt Lake City)',
},
{
value: 'asia-east1',
text: 'asia-east1 (Taiwan)',
},
{
value: 'europe-north1',
text: 'europe-north1 (Finland)',
},
{
value: 'europe-west1',
text: 'europe-west1 (Belgium)',
},
{
value: 'europe-west2',
text: 'europe-west2 (London)',
},
{
value: 'europe-west3',
text: 'europe-west3 (Frankfurt)',
},
{
value: 'europe-west4',
text: 'europe-west4 (Netherlands)',
},
{
value: 'europe-west6',
text: 'europe-west6 (Zurich)',
},
{
value: 'asia-east1',
text: 'asia-east1 (Taiwan)',
},
{
value: 'asia-east2',
text: 'asia-east2 (Hong Kong)',
},
{
value: 'asia-south1',
text: 'asia-south1 (Mumbai)',
},
{
value: 'asia-northeast1',
text: 'asia-northeast1 (Tokyo)',
},
{
value: 'asia-northeast2',
text: 'asia-northeast2 (Osaka)',
},
{
value: 'asia-northeast3',
text: 'asia-northeast3 (Seoul)',
},
{
value: 'asia-southeast1',
text: 'asia-southeast1 (Singapore)',
},
];
/** Single execution or recurring schedule */
export const SCHEDULE_TYPES: Option[] = [
{ value: SINGLE, text: 'Single run' },
{ value: RECURRING, text: 'Recurring run' },
];
export const FREQUENCY_TYPES: Option[] = [
{ value: HOUR, text: 'hour' },
{ value: DAY, text: 'day' },
{ value: WEEK, text: 'week' },
{ value: MONTH, text: 'month' },
];
/** Link to Cloud Console */
export const CLOUD_CONSOLE = 'https://console.cloud.google.com';
/** Link to AI Platform Jobs */
export const AI_PLATFORM_LINK = `${CLOUD_CONSOLE}/ai-platform/jobs`;
/** Link to GCS Storage Browser */
export const GCS_LINK = `${CLOUD_CONSOLE}/storage/browser`;
/** Link to Scheduled Runs page */
export const SCHEDULER_LINK = `${CLOUD_CONSOLE}/ai-platform/notebooks/list/scheduled-runs`;
/** Notebook jobs directory that notebooks will be imported to. */
export const IMPORT_DIRECTORY = 'imported_notebook_jobs/';
| {
if (value === undefined) return undefined;
return options.find(option => option.value === value);
} | identifier_body |
data.ts | /**
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Helpers for UI selections */
const GCR_PREFIX = 'gcr.io/deeplearning-platform-release';
/** Custom ScaleTier allows selection of AI Platform Machine type */
export const CUSTOM = 'CUSTOM';
/** Indicates a single Notebook run */
export const SINGLE = 'single';
/** Indicates a recurring scheduled Notebook run */
export const RECURRING = 'recurring';
/** Suffix to add to projectId for GCS bucket storing notebook sources. */
export const BUCKET_NAME_SUFFIX = '-scheduled-notebooks';
/** Region where Cloud Function will be deployed. */
export const CLOUD_FUNCTION_REGION = 'us-central1';
/** Location where the Cloud Function zip archive is stored */
export const CLOUD_FUNCTION_ARCHIVE =
'gs://deeplearning-platform-ui-public/gcp_scheduled_notebook_helper.zip';
/** Name of the Cloud Function that handles notebook scheduling */
export const CLOUD_FUNCTION_NAME = 'submitScheduledNotebook';
/** Indicates a hourly frequency type */
export const HOUR = 'hour';
/** Indicates a daily frequency type */
export const DAY = 'day';
/** Indicates a weekly frequency type */
export const WEEK = 'week';
/** Indicates a monthly frequency type */
export const MONTH = 'month';
/** Interface for an <option> inside a <select> */
export interface Option {
text: string;
value: string | number;
disabled?: boolean;
}
/** Returns an option whose value matches the given value. */
export function findOptionByValue<T extends Option>(
options: T[],
value: string | number | undefined
): T | undefined {
if (value === undefined) return undefined;
return options.find(option => option.value === value);
}
export const DAYS_OF_WEEK: Option[] = [
{ value: 'sundayRun', text: 'Sun' },
{ value: 'mondayRun', text: 'Mon' },
{ value: 'tuesdayRun', text: 'Tue' },
{ value: 'wednesdayRun', text: 'Wed' },
{ value: 'thursdayRun', text: 'Thur' },
{ value: 'fridayRun', text: 'Fri' },
{ value: 'saturdayRun', text: 'Sat' },
];
export const MONTH_FREQUENCIES: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '6', text: '6' },
{ value: '12', text: '12' },
];
/** Removes the item from the list if found */
export function removeFromList<T>(list: T[], value: T) {
const index = list.indexOf(value);
if (index >= 0) {
list.splice(index, 1);
}
}
/**
* Container images that can be used to schedule jobs on AI Platform.
* https://cloud.google.com/ai-platform/training/docs/containers-overview
*/
export const CONTAINER_IMAGES: Option[] = [
{ value: `${GCR_PREFIX}/base-cpu:latest`, text: 'Python' },
{
value: `${GCR_PREFIX}/tf-cpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf-gpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (GPU)',
},
{
value: `${GCR_PREFIX}/tf2-cpu.2-1:latest`,
text: 'TensorFlow 2.1 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf2-gpu.2-1:latest`,
text: 'TensorFlow 2.1 (GPU)',
},
{
value: `${GCR_PREFIX}/pytorch-cpu.1-4:latest`,
text: 'PyTorch 1.4 (CPU only)',
},
{
value: `${GCR_PREFIX}/pytorch-gpu.1-4:latest`,
text: 'PyTorch 1.4 (GPU)',
},
{
value: `${GCR_PREFIX}/r-cpu.3-6:latest`,
text: 'R 3.6 (with r-essentials)',
},
{ value: `${GCR_PREFIX}/beam-notebooks:latest`, text: 'Apache Beam' },
];
/**
* Scale tier values for AI Platform Jobs
* https://cloud.google.com/ai-platform/training/docs/machine-types#scale_tiers
*/
export const SCALE_TIERS: Option[] = [
{ value: 'BASIC', text: 'Single worker instance' },
{
value: 'BASIC_GPU',
text: 'A single worker instance with an NVIDIA Tesla K80 GPU',
},
{
value: 'STANDARD_1',
text: '1 master instance, 4 workers, 3 parameter servers',
},
{
value: 'PREMIUM_1',
text: '1 master instance, 19 workers, 11 parameter servers',
},
{ value: CUSTOM, text: 'Custom machine type configuration' },
];
/**
* AI Platform Machine types.
* https://cloud.google.com/ai-platform/training/docs/machine-types#compare-machine-types
*/
export const MASTER_TYPES: Option[] = [
{ value: 'n1-standard-4', text: '4 CPUs, 15 GB RAM' },
{ value: 'n1-standard-8', text: '8 CPUs, 30 GB RAM' },
{ value: 'n1-standard-16', text: '16 CPUs, 60 GB RAM' },
{ value: 'n1-standard-32', text: '32 CPUs, 120 GB RAM' },
{ value: 'n1-standard-64', text: '64 CPUs, 240 GB RAM' },
{ value: 'n1-standard-96', text: '96 CPUs, 360 GB RAM' },
{ value: 'n1-highmem-2', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-4', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-8', text: '8 CPUs, 52 GB RAM' },
{ value: 'n1-highmem-16', text: '16 CPUs, 104 GB RAM' },
{ value: 'n1-highmem-32', text: '32 CPUs, 208 GB RAM' },
{ value: 'n1-highmem-64', text: '64 CPUs, 416 GB RAM' },
{ value: 'n1-highmem-96', text: '96 CPUs, 624 GB RAM' },
{ value: 'n1-highcpu-16', text: '16 CPUs, 14.4 GB RAM' },
{ value: 'n1-highcpu-32', text: '32 CPUs, 28.8 GB RAM' },
{ value: 'n1-highcpu-64', text: '64 CPUs, 57.6 GB RAM' },
{ value: 'n1-highcpu-96', text: '96 CPUs, 86.4 GB RAM' },
];
/**
* AI Platform Accelerator types.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_K80', text: 'NVIDIA Tesla K80' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_P100', text: 'NVIDIA Tesla P100' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
/**
* AI Platform Accelerator types for particular machine types that only
* provide a limited amount.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES_REDUCED: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
const MASTER_TYPES_REDUCED: Set<string> = new Set([
'n1-standard-64',
'n1-standard-96',
'n1-highmem-64',
'n1-highmem-96',
'n1-highcpu-96',
]);
/**
* Returns the valid accelerator types given a masterType. Returns empty array
* if masterType is falsy.
*/
export function getAcceleratorTypes(masterType: string): Option[] {
if (masterType) |
return [];
}
/**
* AI Platform Accelerator counts.
* https://cloud.google.com/ai-platform/training/docs/using-gpus
*/
export const ACCELERATOR_COUNTS_1_2_4_8: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '8', text: '8' },
];
/**
* Supported AI Platform regions.
* https://cloud.google.com/ai-platform/training/docs/regions
* TODO: It may be more sensible to invoke the projects.locations.list API
* and filter for locations with TRAINING capability
* https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.locations/list
*/
export const REGIONS: Option[] = [
{
value: 'us-central1',
text: 'us-central1 (Iowa)',
},
{
value: 'us-east1',
text: 'us-east1 (South Carolina)',
},
{
value: 'us-east4',
text: 'us-east4 (Northern Virginia)',
},
{
value: 'us-west1',
text: 'us-west1 (Oregon)',
},
{
value: 'us-west2',
text: 'us-west2 (Los Angeles)',
},
{
value: 'us-west3',
text: 'us-west3 (Salt Lake City)',
},
{
value: 'asia-east1',
text: 'asia-east1 (Taiwan)',
},
{
value: 'europe-north1',
text: 'europe-north1 (Finland)',
},
{
value: 'europe-west1',
text: 'europe-west1 (Belgium)',
},
{
value: 'europe-west2',
text: 'europe-west2 (London)',
},
{
value: 'europe-west3',
text: 'europe-west3 (Frankfurt)',
},
{
value: 'europe-west4',
text: 'europe-west4 (Netherlands)',
},
{
value: 'europe-west6',
text: 'europe-west6 (Zurich)',
},
{
value: 'asia-east1',
text: 'asia-east1 (Taiwan)',
},
{
value: 'asia-east2',
text: 'asia-east2 (Hong Kong)',
},
{
value: 'asia-south1',
text: 'asia-south1 (Mumbai)',
},
{
value: 'asia-northeast1',
text: 'asia-northeast1 (Tokyo)',
},
{
value: 'asia-northeast2',
text: 'asia-northeast2 (Osaka)',
},
{
value: 'asia-northeast3',
text: 'asia-northeast3 (Seoul)',
},
{
value: 'asia-southeast1',
text: 'asia-southeast1 (Singapore)',
},
];
/** Single execution or recurring schedule */
export const SCHEDULE_TYPES: Option[] = [
{ value: SINGLE, text: 'Single run' },
{ value: RECURRING, text: 'Recurring run' },
];
export const FREQUENCY_TYPES: Option[] = [
{ value: HOUR, text: 'hour' },
{ value: DAY, text: 'day' },
{ value: WEEK, text: 'week' },
{ value: MONTH, text: 'month' },
];
/** Link to Cloud Console */
export const CLOUD_CONSOLE = 'https://console.cloud.google.com';
/** Link to AI Platform Jobs */
export const AI_PLATFORM_LINK = `${CLOUD_CONSOLE}/ai-platform/jobs`;
/** Link to GCS Storage Browser */
export const GCS_LINK = `${CLOUD_CONSOLE}/storage/browser`;
/** Link to Scheduled Runs page */
export const SCHEDULER_LINK = `${CLOUD_CONSOLE}/ai-platform/notebooks/list/scheduled-runs`;
/** Notebook jobs directory that notebooks will be imported to. */
export const IMPORT_DIRECTORY = 'imported_notebook_jobs/';
| {
if (MASTER_TYPES_REDUCED.has(masterType)) {
return ACCELERATOR_TYPES_REDUCED;
}
return ACCELERATOR_TYPES;
} | conditional_block |
data.ts | /**
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Helpers for UI selections */
const GCR_PREFIX = 'gcr.io/deeplearning-platform-release';
/** Custom ScaleTier allows selection of AI Platform Machine type */
export const CUSTOM = 'CUSTOM';
/** Indicates a single Notebook run */
export const SINGLE = 'single';
/** Indicates a recurring scheduled Notebook run */
export const RECURRING = 'recurring';
/** Suffix to add to projectId for GCS bucket storing notebook sources. */
export const BUCKET_NAME_SUFFIX = '-scheduled-notebooks';
/** Region where Cloud Function will be deployed. */
export const CLOUD_FUNCTION_REGION = 'us-central1';
/** Location where the Cloud Function zip archive is stored */
export const CLOUD_FUNCTION_ARCHIVE =
'gs://deeplearning-platform-ui-public/gcp_scheduled_notebook_helper.zip';
/** Name of the Cloud Function that handles notebook scheduling */
export const CLOUD_FUNCTION_NAME = 'submitScheduledNotebook';
/** Indicates a hourly frequency type */
export const HOUR = 'hour';
/** Indicates a daily frequency type */
export const DAY = 'day';
/** Indicates a weekly frequency type */
export const WEEK = 'week';
/** Indicates a monthly frequency type */
export const MONTH = 'month';
/** Interface for an <option> inside a <select> */
export interface Option {
text: string;
value: string | number;
disabled?: boolean;
}
/** Returns an option whose value matches the given value. */
export function findOptionByValue<T extends Option>(
options: T[],
value: string | number | undefined
): T | undefined {
if (value === undefined) return undefined;
return options.find(option => option.value === value);
}
export const DAYS_OF_WEEK: Option[] = [
{ value: 'sundayRun', text: 'Sun' },
{ value: 'mondayRun', text: 'Mon' },
{ value: 'tuesdayRun', text: 'Tue' },
{ value: 'wednesdayRun', text: 'Wed' },
{ value: 'thursdayRun', text: 'Thur' },
{ value: 'fridayRun', text: 'Fri' },
{ value: 'saturdayRun', text: 'Sat' },
];
export const MONTH_FREQUENCIES: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '6', text: '6' },
{ value: '12', text: '12' },
];
/** Removes the item from the list if found */
export function removeFromList<T>(list: T[], value: T) {
const index = list.indexOf(value);
if (index >= 0) {
list.splice(index, 1);
}
}
/**
* Container images that can be used to schedule jobs on AI Platform.
* https://cloud.google.com/ai-platform/training/docs/containers-overview
*/
export const CONTAINER_IMAGES: Option[] = [
{ value: `${GCR_PREFIX}/base-cpu:latest`, text: 'Python' },
{
value: `${GCR_PREFIX}/tf-cpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf-gpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (GPU)',
},
{
value: `${GCR_PREFIX}/tf2-cpu.2-1:latest`,
text: 'TensorFlow 2.1 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf2-gpu.2-1:latest`,
text: 'TensorFlow 2.1 (GPU)',
},
{
value: `${GCR_PREFIX}/pytorch-cpu.1-4:latest`,
text: 'PyTorch 1.4 (CPU only)',
},
{
value: `${GCR_PREFIX}/pytorch-gpu.1-4:latest`,
text: 'PyTorch 1.4 (GPU)',
},
{
value: `${GCR_PREFIX}/r-cpu.3-6:latest`,
text: 'R 3.6 (with r-essentials)',
},
{ value: `${GCR_PREFIX}/beam-notebooks:latest`, text: 'Apache Beam' },
];
/**
* Scale tier values for AI Platform Jobs
* https://cloud.google.com/ai-platform/training/docs/machine-types#scale_tiers
*/
export const SCALE_TIERS: Option[] = [
{ value: 'BASIC', text: 'Single worker instance' },
{
value: 'BASIC_GPU',
text: 'A single worker instance with an NVIDIA Tesla K80 GPU',
},
{
value: 'STANDARD_1',
text: '1 master instance, 4 workers, 3 parameter servers',
},
{
value: 'PREMIUM_1',
text: '1 master instance, 19 workers, 11 parameter servers',
},
{ value: CUSTOM, text: 'Custom machine type configuration' },
];
/**
* AI Platform Machine types.
* https://cloud.google.com/ai-platform/training/docs/machine-types#compare-machine-types
*/
export const MASTER_TYPES: Option[] = [
{ value: 'n1-standard-4', text: '4 CPUs, 15 GB RAM' },
{ value: 'n1-standard-8', text: '8 CPUs, 30 GB RAM' },
{ value: 'n1-standard-16', text: '16 CPUs, 60 GB RAM' },
{ value: 'n1-standard-32', text: '32 CPUs, 120 GB RAM' },
{ value: 'n1-standard-64', text: '64 CPUs, 240 GB RAM' },
{ value: 'n1-standard-96', text: '96 CPUs, 360 GB RAM' },
{ value: 'n1-highmem-2', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-4', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-8', text: '8 CPUs, 52 GB RAM' },
{ value: 'n1-highmem-16', text: '16 CPUs, 104 GB RAM' },
{ value: 'n1-highmem-32', text: '32 CPUs, 208 GB RAM' },
{ value: 'n1-highmem-64', text: '64 CPUs, 416 GB RAM' },
{ value: 'n1-highmem-96', text: '96 CPUs, 624 GB RAM' },
{ value: 'n1-highcpu-16', text: '16 CPUs, 14.4 GB RAM' },
{ value: 'n1-highcpu-32', text: '32 CPUs, 28.8 GB RAM' },
{ value: 'n1-highcpu-64', text: '64 CPUs, 57.6 GB RAM' },
{ value: 'n1-highcpu-96', text: '96 CPUs, 86.4 GB RAM' },
];
/**
* AI Platform Accelerator types.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_K80', text: 'NVIDIA Tesla K80' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_P100', text: 'NVIDIA Tesla P100' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
/**
* AI Platform Accelerator types for particular machine types that only
* provide a limited amount.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES_REDUCED: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
const MASTER_TYPES_REDUCED: Set<string> = new Set([
'n1-standard-64',
'n1-standard-96',
'n1-highmem-64',
'n1-highmem-96',
'n1-highcpu-96', | * if masterType is falsy.
*/
export function getAcceleratorTypes(masterType: string): Option[] {
if (masterType) {
if (MASTER_TYPES_REDUCED.has(masterType)) {
return ACCELERATOR_TYPES_REDUCED;
}
return ACCELERATOR_TYPES;
}
return [];
}
/**
* AI Platform Accelerator counts.
* https://cloud.google.com/ai-platform/training/docs/using-gpus
*/
export const ACCELERATOR_COUNTS_1_2_4_8: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '8', text: '8' },
];
/**
* Supported AI Platform regions.
* https://cloud.google.com/ai-platform/training/docs/regions
* TODO: It may be more sensible to invoke the projects.locations.list API
* and filter for locations with TRAINING capability
* https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.locations/list
*/
export const REGIONS: Option[] = [
{
value: 'us-central1',
text: 'us-central1 (Iowa)',
},
{
value: 'us-east1',
text: 'us-east1 (South Carolina)',
},
{
value: 'us-east4',
text: 'us-east4 (Northern Virginia)',
},
{
value: 'us-west1',
text: 'us-west1 (Oregon)',
},
{
value: 'us-west2',
text: 'us-west2 (Los Angeles)',
},
{
value: 'us-west3',
text: 'us-west3 (Salt Lake City)',
},
{
value: 'asia-east1',
text: 'asia-east1 (Taiwan)',
},
{
value: 'europe-north1',
text: 'europe-north1 (Finland)',
},
{
value: 'europe-west1',
text: 'europe-west1 (Belgium)',
},
{
value: 'europe-west2',
text: 'europe-west2 (London)',
},
{
value: 'europe-west3',
text: 'europe-west3 (Frankfurt)',
},
{
value: 'europe-west4',
text: 'europe-west4 (Netherlands)',
},
{
value: 'europe-west6',
text: 'europe-west6 (Zurich)',
},
{
value: 'asia-east1',
text: 'asia-east1 (Taiwan)',
},
{
value: 'asia-east2',
text: 'asia-east2 (Hong Kong)',
},
{
value: 'asia-south1',
text: 'asia-south1 (Mumbai)',
},
{
value: 'asia-northeast1',
text: 'asia-northeast1 (Tokyo)',
},
{
value: 'asia-northeast2',
text: 'asia-northeast2 (Osaka)',
},
{
value: 'asia-northeast3',
text: 'asia-northeast3 (Seoul)',
},
{
value: 'asia-southeast1',
text: 'asia-southeast1 (Singapore)',
},
];
/** Single execution or recurring schedule */
export const SCHEDULE_TYPES: Option[] = [
{ value: SINGLE, text: 'Single run' },
{ value: RECURRING, text: 'Recurring run' },
];
export const FREQUENCY_TYPES: Option[] = [
{ value: HOUR, text: 'hour' },
{ value: DAY, text: 'day' },
{ value: WEEK, text: 'week' },
{ value: MONTH, text: 'month' },
];
/** Link to Cloud Console */
export const CLOUD_CONSOLE = 'https://console.cloud.google.com';
/** Link to AI Platform Jobs */
export const AI_PLATFORM_LINK = `${CLOUD_CONSOLE}/ai-platform/jobs`;
/** Link to GCS Storage Browser */
export const GCS_LINK = `${CLOUD_CONSOLE}/storage/browser`;
/** Link to Scheduled Runs page */
export const SCHEDULER_LINK = `${CLOUD_CONSOLE}/ai-platform/notebooks/list/scheduled-runs`;
/** Notebook jobs directory that notebooks will be imported to. */
export const IMPORT_DIRECTORY = 'imported_notebook_jobs/'; | ]);
/**
* Returns the valid accelerator types given a masterType. Returns empty array | random_line_split |
classic.py | # -*- coding: utf-8 -*-
"""
Copyright 2015 Brookhaven Science Assoc.
as operator of Brookhaven National Lab.
"""
# supported RPC call version
PVER=0
import re
import logging
from functools import reduce
_log = logging.getLogger("carchive.classic")
import time, math
try:
from xmlrpc.client import Fault
except ImportError:
from xmlrpclib import Fault
from fnmatch import fnmatch
from collections import defaultdict
import numpy as np
from ..date import makeTime, timeTuple, total_seconds
from datetime import datetime
from twisted.internet import defer
from twisted.internet.defer import FirstError
# Use EOL hack
from ..rpcmunge import NiceProxy as Proxy
from ..dtype import dbr_time
from ..util import HandledError
from twisted.internet.error import ConnectionRefusedError
def _optime(R, S):
E = time.time()
_log.info("Query complete in %f sec", E-S)
return R
def _connerror(F):
if F.check(FirstError):
F = F.value.subFailure
if F.check(ConnectionRefusedError):
_log.fatal("Data server connection refused. Server not reachable?")
elif F.check(Fault):
E = F.value
if E.faultCode==-600:
_log.fatal("PV syntax error: %s",E.faultString)
raise HandledError()
else:
_log.fatal("RPC error: %s",E)
elif F.check(HandledError):
pass
else:
_log.fatal("Remote request failed! %s",F)
return F
_dtypes = {
0: np.dtype('a40'),
1: np.dtype('a26'),
2: np.int32,
3: np.float64
}
@defer.inlineCallbacks
def getArchive(conf):
"""getArchive(conf=...)
Fetch an Archive instance. If conf is passed
will be used instead of the default configuration.
Returns a Deferred which will fire with the Archive
instance.
"""
url = conf.get('url')
maxreq = conf.getint('maxrequests', 10)
maxq = conf.getint('maxquery')
proxy=Proxy(url, limit=maxreq, qlimit=maxq)
proxy.connectTimeout=3.0
info = proxy.callRemote('archiver.info').addErrback(_connerror)
archs= proxy.callRemote('archiver.archives').addErrback(_connerror)
X = yield defer.DeferredList([info, archs], fireOnOneErrback=True).addErrback(_connerror)
info, archs = X[0][1], X[1][1]
defer.returnValue(Archive(proxy, conf, info, archs))
class Archive(object):
"""
"""
def __init__(self, proxy, conf, info, archs):
self._proxy = proxy
self.conf = conf
if PVER < info['ver']:
_log.warn('Archive server protocol version %d is newer then ours (%d).\n'+
'Attempting to proceed.', info['ver'], PVER)
self.description = info['desc']
self.statusInfo = dict(enumerate(info['stat']))
self.severityInfo = {}
for S in info['sevr']:
self.severityInfo[int(S['num'])] = S
self.hows = enumerate(info['how'])
self.hows = dict([(a,b) for b,a in self.hows])
# map from name to key
self.__archs = dict([(x['name'],x['key']) for x in archs])
# map from key to name
self.__rarchs = dict([(x['key'],x['name']) for x in archs])
def | (self, sevr):
if sevr==0:
return ''
try:
return self.severityInfo[sevr]['sevr']
except KeyError:
return str(sevr)
def status(self, stat):
if stat==0:
return ''
try:
return self.statusInfo[stat]
except IndexError:
return str(stat)
def archives(self, pattern):
if not isinstance(pattern, (str,unicode)):
return list(set(reduce(list.__add__, map(self.archives, pattern), [])))
else:
return [a for a in iter(self.__archs.keys()) if fnmatch(a, pattern)]
def lookupArchive(self, arch):
return self.__rarchs[arch]
def _archname2key(self, archs):
if archs is None:
archs = list(self.__archs.values())
else:
for i,a in enumerate(archs):
try:
k = int(a)
if k not in iter(self.__archs.values()):
raise KeyError("Invalid Archive key '%d'"%k)
# do nothing
continue
except ValueError:
pass
try:
k = self.__archs[a]
archs[i] = k
except KeyError:
raise KeyError("Invalid Archive key '%s'"%a)
return archs
@defer.inlineCallbacks
def search(self, exact=None, pattern=None,
archs=None, breakDown=False,
rawTime=False):
"""Search for PV names matching the given pattern.
If archs is None then all archives are searched.
Otherwise archs must be a list of strings or integers
specifing archive names or keys.
Returns a Deferred which fires with a dictionary.
If breakDown is False (the default) then the result is
{'pvname':(firstTime, lastTime)}
If breakDown is True then the result is
{'pvname':[(firstTime, lastTime, archKey)]}
In the second form the ranges for each pv will be sorted
by firstTime.
For either return format, if rawTime is False then a datatime
is given, otherwise a tuple (sec,nsec).
"""
if exact is None and pattern is None:
raise TypeError("Must provide 'exact' or 'pattern'")
if exact is not None:
pattern = '^%s$'%re.escape(exact)
else:
# Test compile to catch basic syntax errors
re.compile(pattern)
archs = self._archname2key(archs)
_log.debug('Searching for %s in %s', pattern, archs)
Ds = [None]*len(archs)
for i,a in enumerate(archs):
Ds[i] = self._proxy.callRemote('archiver.names', a, pattern).addErrback(_connerror)
Ds = yield defer.DeferredList(Ds, fireOnOneErrback=True).addErrback(_connerror)
if breakDown:
results = defaultdict(list)
for i, (junk, A) in enumerate(Ds):
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
results[pv].append( (F, L, archs[i]) )
for R in results.values():
R.sort()
else:
results = defaultdict(lambda:[None]*2)
for junk, A in Ds:
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
C = results[pv]
if C[0] is None or F < C[0]:
C[0] = F
if C[1] is None or L > C[1]:
C[1] = L
results = dict([(K,tuple(V)) for K,V in results.items()])
defer.returnValue(results)
@defer.inlineCallbacks
def _fetchdata(self, arch, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
how=0, enumAsInt=False, displayMeta=False):
if count is None and chunkSize is None:
raise TypeError("If count is None then chunkSize must be given")
if chunkSize is None:
chunkSize = count
if T0 is None and Tend is None:
raise TypeError("Must specify T0 or Tend")
if T0 is None:
T0 = datetime.now()
else:
T0 = makeTime(T0)
if Tend is None:
Tend = datetime.now()
else:
Tend = makeTime(Tend)
if T0 > Tend:
raise ValueError("T0 must be <= Tend")
if count is None:
C = chunkSize
else:
C = min(count, chunkSize)
Tcur = timeTuple(T0)
Tlast =timeTuple(Tend)
N = 0
first = True
last = False
while not last and Tcur < Tlast:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],pv,Tcur,Tlast,C,how)
D = self._proxy.callRemote('archiver.values',
arch, [pv],
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
C, how).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
data = yield D
except:
_log.fatal('Query fails')
raise
assert len(data)==1, "Server returned more than one PVs? (%s)"%len(data)
assert data[0]['name']==pv, "Server gives us %s != %s"%(data[0]['name'], pv)
vals = data[0]['values']
maxcount = data[0]['count']
_log.debug("Query yields %u points"%len(vals))
N += len(vals)
last = len(vals)<C
if count and N>=count:
last = True
the_meta = data[0]['meta']
if data[0]['meta']['type']==0:
states = data[0]['meta']['states']
else:
states = []
orig_type = data[0]['type']
vtype = orig_type
if vtype==1 and enumAsInt:
vtype = 2
try:
dtype = _dtypes[vtype]
except KeyError:
raise ValueError("Server gives unknown value type %d"%vtype)
XML = data[0]['values']
if len(XML)==0:
break
if vtype == 1:
for V in XML:
for i,pnt in enumerate(V['value']):
try:
V['value'][i] = states[pnt]
except IndexError:
V['value'][i] = str(pnt)
maxelem=0
metadata = np.ndarray(len(XML), dtype=dbr_time)
for i,E in enumerate(XML):
maxelem = max(maxelem, len(E['value']))
metadata[i] = (E['sevr'], E['stat'], E['secs'], E['nano'])
if not displayMeta:
assert maxcount==maxelem, "Value shape inconsistent. %d %d"%(maxcount,maxelem)
values = np.ndarray((len(XML), maxelem), dtype=dtype)
for i,E in enumerate(XML):
V = E['value']
values[i,:len(V)] = V
values[i,len(V):] = 0
del XML
del data
if first:
first = False
else:
# remove duplicate sample
values = values[1:]
metadata = metadata[1:]
# no non-duplicate samples
if len(metadata)==0:
break
Tcur = (int(metadata[-1]['sec']), int(metadata[-1]['ns']+1))
if displayMeta:
extraMeta = {'orig_type':orig_type, 'the_meta':the_meta, 'reported_arr_size':maxcount}
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, extraMeta=extraMeta, **cbKWs)
else:
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, **cbKWs)
defer.returnValue(N)
@defer.inlineCallbacks
def fetchraw(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False, displayMeta=False, rawTimes=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True, rawTime=True)
breakDown = breakDown[pv]
if len(breakDown)==0:
_log.error("PV not archived")
defer.returnValue(0)
if rawTimes:
Tcur, Tend = T0, Tend
else:
Tcur, Tend = timeTuple(T0), timeTuple(Tend)
_log.debug("Time range: %s -> %s", Tcur, Tend)
_log.debug("Planning with: %s", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))
plan = []
# Plan queries
# Find a set of non-overlapping regions
for F, L, K in breakDown:
# some mis-match of definitions
# the search results give the times
# of the first and last samples
# inclusive.
# time range [F, L]
# However, values() query end time
# is exclusive
# time range [F, L)
# We step the end time forward by 1 micro-second
# to ensure that the last sample can be returned.
# Note: it seems that Channel Archiver uses
# micro-sec resolution times for comparisons...
_log.debug("Before: %s", L)
LS, LN = L
LN += 1000
if LN>1000000000:
LS += 1
LN = 0
L = LS, LN
_log.debug("After: %s", L)
if L <= Tcur:
continue # Too early, keep going
elif F >= Tend:
break # No more data in range
# range to request from this archive
Rstart = max(Tcur, F)
Rend = min(Tend, L)
plan.append((Rstart, Rend, K))
Tcur = Rend
if len(plan)==0 and len(breakDown)>0 and breakDown[-1][1] <= Tcur:
# requested range is later than last recorded sample,
# which is all we can return
F, L, K = breakDown[-1]
LS, LN = L
plan.append(((LS+1,0),(LS+2,0),K))
count=1
_log.debug("Returning last sample. No data in or after requested time range.")
elif len(plan)==0:
# requested range is earlier than first recorded sample.
_log.warn("Query plan empty. No data in or before request time range.")
defer.returnValue(0)
_log.debug("Using plan of %d queries %s", len(plan), map(lambda a,b,c:(a,b,self.__rarchs[c]), plan))
N = yield self._nextraw(0, pv=pv, plan=plan,
Ctot=0, Climit=count,
callback=callback, cbArgs=cbArgs,
cbKWs=cbKWs, chunkSize=chunkSize,
enumAsInt=enumAsInt, displayMeta=displayMeta)
defer.returnValue(N)
def _nextraw(self, partcount, pv, plan, Ctot, Climit,
callback, cbArgs, cbKWs, chunkSize,
enumAsInt, displayMeta=False):
sofar = partcount + Ctot
if len(plan)==0:
_log.debug("Plan complete: %s", pv)
return sofar # done
elif Climit and sofar>=Climit:
_log.debug("Plan point limit reached: %s", pv)
return sofar # done
count = Climit - sofar if Climit else None
T0, Tend, arch = plan.pop(0)
_log.debug("Query %d of %s %s -> %s for %s", len(plan), self.__rarchs[arch], T0, Tend, pv)
D = self._fetchdata(arch, pv, callback,
cbArgs=cbArgs, cbKWs=cbKWs,
T0=T0, Tend=Tend,
count=count,
chunkSize=chunkSize,
enumAsInt=enumAsInt,
displayMeta=displayMeta)
D.addCallback(self._nextraw, pv, plan, sofar, Climit,
callback, cbArgs, cbKWs, chunkSize, enumAsInt, displayMeta)
return D
@defer.inlineCallbacks
def fetchplot(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
delta = total_seconds(Tend-T0)
if delta<=0.0 or count<=0:
raise ValueError("invalid time range or sample count")
rate = count/delta # average rate in samples per second
if rate>=1.0:
_log.info("Time range too short for plot bin, switching to raw")
D = self.fetchraw(pv, callback, cbArgs, cbKWs, T0, Tend,
None, count, archs, breakDown,
enumAsInt)
defer.returnValue(D)
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True, rawTime=True)
breakDown = breakDown[pv]
if len(breakDown)==0:
_log.error("PV not archived")
defer.returnValue(0)
Tcur, Tend = timeTuple(T0), timeTuple(Tend)
_log.debug("Time range: %s -> %s", Tcur, Tend)
_log.debug("Planning with: %s", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))
N = 0
# Plan queries
# Find a set of non-overlapping regions
for F, L, K in breakDown:
LS, LN = L
LN += 1000
if LN>1000000000:
LS += 1
LN = 0
L = LS, LN
if L <= Tcur:
continue # Too early, keep going
elif F >= Tend:
break # No more data in range
# range to request from this archive
Rstart = max(Tcur, F)
Rend = min(Tend, L)
Rcount = int(math.ceil((Rend[0]-Rstart[0])*rate))
_log.debug("Query %s %s -> %s for %s (%d)", self.__rarchs[K], Rstart, Rend, pv, Rcount)
D = self._fetchdata(K, pv, callback,
cbArgs=cbArgs, cbKWs=cbKWs,
T0=Rstart, Tend=Rend,
count=Rcount,
chunkSize=chunkSize,
enumAsInt=enumAsInt,
how=3)
Nc = yield D
N += Nc
defer.returnValue(N)
@defer.inlineCallbacks
def fetchsnap(self, pvs, T=None,
archs=None, chunkSize=100,
enumAsInt=False):
"""Fetch the value of all requested PVs at the given time
"""
pvs = list(pvs)
archs = self._archname2key(archs)
# values() request time range is inclusive, so Tcur==Tlast is a no-op
sec,ns = Tcur = timeTuple(makeTime(T))
ns+=1000
if ns>1000000000:
ns-=1000000000
sec+=1
Tlast = sec, ns
del sec, ns
Npvs = len(pvs)
NGroups = 1+(Npvs/chunkSize)
assert NGroups>0
values, metas = np.zeros(Npvs, dtype=np.object), np.zeros(Npvs, dtype=dbr_time)
_log.debug('fetchsnap at %s %s pvs in %s groups from %s archs',
Tcur, Npvs, NGroups, len(archs))
for igrp in range(NGroups):
Gpvs = pvs[igrp::NGroups]
if len(Gpvs)==0:
continue
Rval = values[igrp::NGroups]
Rmeta= metas[igrp::NGroups]
for arch in archs:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],Gpvs,Tcur,Tlast,2,0)
D = self._proxy.callRemote('archiver.values',
arch, Gpvs,
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
2, 0).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
results = yield D
except:
_log.fatal('Query fails')
raise
assert len(results)==len(Gpvs)
for idx, data in enumerate(results):
assert data['name']==Gpvs[idx], 'Results arrived out of order'
if len(data['values'])==0:
continue # no data for this one...
E = data['values'][-1]
if Rval[idx] is not None and Rmeta[idx]['sec']>E['secs']:
continue # too old
Rval[idx] = E['value']
Rmeta[idx] = (E['sevr'], E['stat'], E['secs'], E['nano'])
defer.returnValue((values, metas))
| severity | identifier_name |
classic.py | # -*- coding: utf-8 -*-
"""
Copyright 2015 Brookhaven Science Assoc.
as operator of Brookhaven National Lab.
"""
# supported RPC call version
PVER=0
import re
import logging
from functools import reduce
_log = logging.getLogger("carchive.classic")
import time, math
try:
from xmlrpc.client import Fault
except ImportError:
from xmlrpclib import Fault
from fnmatch import fnmatch
from collections import defaultdict
import numpy as np
from ..date import makeTime, timeTuple, total_seconds
from datetime import datetime
from twisted.internet import defer
from twisted.internet.defer import FirstError
# Use EOL hack
from ..rpcmunge import NiceProxy as Proxy
from ..dtype import dbr_time
from ..util import HandledError
from twisted.internet.error import ConnectionRefusedError
def _optime(R, S):
E = time.time()
_log.info("Query complete in %f sec", E-S)
return R
def _connerror(F):
if F.check(FirstError):
F = F.value.subFailure
if F.check(ConnectionRefusedError):
_log.fatal("Data server connection refused. Server not reachable?")
elif F.check(Fault):
E = F.value
if E.faultCode==-600:
_log.fatal("PV syntax error: %s",E.faultString)
raise HandledError()
else:
_log.fatal("RPC error: %s",E)
elif F.check(HandledError):
pass
else:
_log.fatal("Remote request failed! %s",F)
return F
_dtypes = {
0: np.dtype('a40'),
1: np.dtype('a26'),
2: np.int32,
3: np.float64
}
@defer.inlineCallbacks
def getArchive(conf):
"""getArchive(conf=...)
Fetch an Archive instance. If conf is passed
will be used instead of the default configuration.
Returns a Deferred which will fire with the Archive
instance.
"""
url = conf.get('url')
maxreq = conf.getint('maxrequests', 10)
maxq = conf.getint('maxquery')
proxy=Proxy(url, limit=maxreq, qlimit=maxq)
proxy.connectTimeout=3.0
info = proxy.callRemote('archiver.info').addErrback(_connerror)
archs= proxy.callRemote('archiver.archives').addErrback(_connerror)
X = yield defer.DeferredList([info, archs], fireOnOneErrback=True).addErrback(_connerror)
info, archs = X[0][1], X[1][1]
defer.returnValue(Archive(proxy, conf, info, archs))
class Archive(object):
"""
"""
def __init__(self, proxy, conf, info, archs):
self._proxy = proxy
self.conf = conf
if PVER < info['ver']:
_log.warn('Archive server protocol version %d is newer then ours (%d).\n'+
'Attempting to proceed.', info['ver'], PVER)
self.description = info['desc']
self.statusInfo = dict(enumerate(info['stat']))
self.severityInfo = {}
for S in info['sevr']:
self.severityInfo[int(S['num'])] = S
self.hows = enumerate(info['how'])
self.hows = dict([(a,b) for b,a in self.hows])
# map from name to key
self.__archs = dict([(x['name'],x['key']) for x in archs])
# map from key to name
self.__rarchs = dict([(x['key'],x['name']) for x in archs])
def severity(self, sevr):
if sevr==0:
return ''
try:
return self.severityInfo[sevr]['sevr']
except KeyError:
return str(sevr)
def status(self, stat):
if stat==0:
return ''
try:
return self.statusInfo[stat]
except IndexError:
return str(stat)
def archives(self, pattern):
if not isinstance(pattern, (str,unicode)):
return list(set(reduce(list.__add__, map(self.archives, pattern), [])))
else:
return [a for a in iter(self.__archs.keys()) if fnmatch(a, pattern)]
def lookupArchive(self, arch):
return self.__rarchs[arch]
def _archname2key(self, archs):
if archs is None:
archs = list(self.__archs.values())
else:
for i,a in enumerate(archs):
try:
k = int(a)
if k not in iter(self.__archs.values()):
raise KeyError("Invalid Archive key '%d'"%k)
# do nothing
continue
except ValueError:
pass
try:
k = self.__archs[a]
archs[i] = k
except KeyError:
raise KeyError("Invalid Archive key '%s'"%a)
return archs
@defer.inlineCallbacks
def search(self, exact=None, pattern=None,
archs=None, breakDown=False,
rawTime=False):
"""Search for PV names matching the given pattern.
If archs is None then all archives are searched.
Otherwise archs must be a list of strings or integers
specifing archive names or keys.
Returns a Deferred which fires with a dictionary.
If breakDown is False (the default) then the result is
{'pvname':(firstTime, lastTime)}
If breakDown is True then the result is
{'pvname':[(firstTime, lastTime, archKey)]}
In the second form the ranges for each pv will be sorted
by firstTime.
For either return format, if rawTime is False then a datatime
is given, otherwise a tuple (sec,nsec).
"""
if exact is None and pattern is None:
raise TypeError("Must provide 'exact' or 'pattern'")
if exact is not None:
pattern = '^%s$'%re.escape(exact)
else:
# Test compile to catch basic syntax errors
re.compile(pattern)
archs = self._archname2key(archs)
_log.debug('Searching for %s in %s', pattern, archs)
Ds = [None]*len(archs)
for i,a in enumerate(archs):
Ds[i] = self._proxy.callRemote('archiver.names', a, pattern).addErrback(_connerror)
Ds = yield defer.DeferredList(Ds, fireOnOneErrback=True).addErrback(_connerror)
if breakDown:
results = defaultdict(list)
for i, (junk, A) in enumerate(Ds):
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
results[pv].append( (F, L, archs[i]) )
for R in results.values():
R.sort()
else:
results = defaultdict(lambda:[None]*2)
for junk, A in Ds:
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
C = results[pv]
if C[0] is None or F < C[0]:
C[0] = F
if C[1] is None or L > C[1]:
C[1] = L
results = dict([(K,tuple(V)) for K,V in results.items()])
defer.returnValue(results)
@defer.inlineCallbacks
def _fetchdata(self, arch, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
how=0, enumAsInt=False, displayMeta=False):
if count is None and chunkSize is None:
raise TypeError("If count is None then chunkSize must be given")
if chunkSize is None:
chunkSize = count
if T0 is None and Tend is None:
raise TypeError("Must specify T0 or Tend")
if T0 is None:
T0 = datetime.now()
else:
T0 = makeTime(T0)
if Tend is None:
Tend = datetime.now()
else:
Tend = makeTime(Tend)
if T0 > Tend:
raise ValueError("T0 must be <= Tend")
if count is None:
C = chunkSize
else:
C = min(count, chunkSize)
Tcur = timeTuple(T0)
Tlast =timeTuple(Tend)
N = 0
first = True
last = False
while not last and Tcur < Tlast:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],pv,Tcur,Tlast,C,how)
D = self._proxy.callRemote('archiver.values',
arch, [pv],
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
C, how).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
data = yield D
except:
_log.fatal('Query fails')
raise
assert len(data)==1, "Server returned more than one PVs? (%s)"%len(data)
assert data[0]['name']==pv, "Server gives us %s != %s"%(data[0]['name'], pv)
vals = data[0]['values']
maxcount = data[0]['count']
_log.debug("Query yields %u points"%len(vals))
N += len(vals)
last = len(vals)<C
if count and N>=count:
last = True
the_meta = data[0]['meta']
if data[0]['meta']['type']==0:
states = data[0]['meta']['states']
else:
states = []
orig_type = data[0]['type']
vtype = orig_type
if vtype==1 and enumAsInt:
vtype = 2
try:
dtype = _dtypes[vtype]
except KeyError:
raise ValueError("Server gives unknown value type %d"%vtype)
XML = data[0]['values']
if len(XML)==0:
break
if vtype == 1:
for V in XML:
for i,pnt in enumerate(V['value']):
try:
V['value'][i] = states[pnt]
except IndexError:
V['value'][i] = str(pnt)
maxelem=0
metadata = np.ndarray(len(XML), dtype=dbr_time)
for i,E in enumerate(XML):
maxelem = max(maxelem, len(E['value']))
metadata[i] = (E['sevr'], E['stat'], E['secs'], E['nano'])
if not displayMeta:
assert maxcount==maxelem, "Value shape inconsistent. %d %d"%(maxcount,maxelem)
values = np.ndarray((len(XML), maxelem), dtype=dtype)
for i,E in enumerate(XML):
V = E['value']
values[i,:len(V)] = V
values[i,len(V):] = 0
del XML
del data
if first:
first = False
else:
# remove duplicate sample
values = values[1:]
metadata = metadata[1:]
# no non-duplicate samples
if len(metadata)==0:
break
Tcur = (int(metadata[-1]['sec']), int(metadata[-1]['ns']+1))
if displayMeta:
extraMeta = {'orig_type':orig_type, 'the_meta':the_meta, 'reported_arr_size':maxcount}
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, extraMeta=extraMeta, **cbKWs)
else:
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, **cbKWs)
defer.returnValue(N)
@defer.inlineCallbacks
def fetchraw(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False, displayMeta=False, rawTimes=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True, rawTime=True)
breakDown = breakDown[pv]
if len(breakDown)==0:
_log.error("PV not archived")
defer.returnValue(0)
if rawTimes:
Tcur, Tend = T0, Tend
else:
Tcur, Tend = timeTuple(T0), timeTuple(Tend)
_log.debug("Time range: %s -> %s", Tcur, Tend)
_log.debug("Planning with: %s", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))
plan = []
# Plan queries
# Find a set of non-overlapping regions
for F, L, K in breakDown:
# some mis-match of definitions
# the search results give the times
# of the first and last samples
# inclusive.
# time range [F, L]
# However, values() query end time
# is exclusive
# time range [F, L)
# We step the end time forward by 1 micro-second
# to ensure that the last sample can be returned.
# Note: it seems that Channel Archiver uses
# micro-sec resolution times for comparisons...
_log.debug("Before: %s", L)
LS, LN = L
LN += 1000
if LN>1000000000:
LS += 1
LN = 0
L = LS, LN
_log.debug("After: %s", L)
if L <= Tcur:
continue # Too early, keep going
elif F >= Tend:
break # No more data in range
# range to request from this archive
Rstart = max(Tcur, F)
Rend = min(Tend, L)
plan.append((Rstart, Rend, K))
Tcur = Rend
if len(plan)==0 and len(breakDown)>0 and breakDown[-1][1] <= Tcur:
# requested range is later than last recorded sample,
# which is all we can return
|
elif len(plan)==0:
# requested range is earlier than first recorded sample.
_log.warn("Query plan empty. No data in or before request time range.")
defer.returnValue(0)
_log.debug("Using plan of %d queries %s", len(plan), map(lambda a,b,c:(a,b,self.__rarchs[c]), plan))
N = yield self._nextraw(0, pv=pv, plan=plan,
Ctot=0, Climit=count,
callback=callback, cbArgs=cbArgs,
cbKWs=cbKWs, chunkSize=chunkSize,
enumAsInt=enumAsInt, displayMeta=displayMeta)
defer.returnValue(N)
def _nextraw(self, partcount, pv, plan, Ctot, Climit,
callback, cbArgs, cbKWs, chunkSize,
enumAsInt, displayMeta=False):
sofar = partcount + Ctot
if len(plan)==0:
_log.debug("Plan complete: %s", pv)
return sofar # done
elif Climit and sofar>=Climit:
_log.debug("Plan point limit reached: %s", pv)
return sofar # done
count = Climit - sofar if Climit else None
T0, Tend, arch = plan.pop(0)
_log.debug("Query %d of %s %s -> %s for %s", len(plan), self.__rarchs[arch], T0, Tend, pv)
D = self._fetchdata(arch, pv, callback,
cbArgs=cbArgs, cbKWs=cbKWs,
T0=T0, Tend=Tend,
count=count,
chunkSize=chunkSize,
enumAsInt=enumAsInt,
displayMeta=displayMeta)
D.addCallback(self._nextraw, pv, plan, sofar, Climit,
callback, cbArgs, cbKWs, chunkSize, enumAsInt, displayMeta)
return D
@defer.inlineCallbacks
def fetchplot(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
delta = total_seconds(Tend-T0)
if delta<=0.0 or count<=0:
raise ValueError("invalid time range or sample count")
rate = count/delta # average rate in samples per second
if rate>=1.0:
_log.info("Time range too short for plot bin, switching to raw")
D = self.fetchraw(pv, callback, cbArgs, cbKWs, T0, Tend,
None, count, archs, breakDown,
enumAsInt)
defer.returnValue(D)
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True, rawTime=True)
breakDown = breakDown[pv]
if len(breakDown)==0:
_log.error("PV not archived")
defer.returnValue(0)
Tcur, Tend = timeTuple(T0), timeTuple(Tend)
_log.debug("Time range: %s -> %s", Tcur, Tend)
_log.debug("Planning with: %s", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))
N = 0
# Plan queries
# Find a set of non-overlapping regions
for F, L, K in breakDown:
LS, LN = L
LN += 1000
if LN>1000000000:
LS += 1
LN = 0
L = LS, LN
if L <= Tcur:
continue # Too early, keep going
elif F >= Tend:
break # No more data in range
# range to request from this archive
Rstart = max(Tcur, F)
Rend = min(Tend, L)
Rcount = int(math.ceil((Rend[0]-Rstart[0])*rate))
_log.debug("Query %s %s -> %s for %s (%d)", self.__rarchs[K], Rstart, Rend, pv, Rcount)
D = self._fetchdata(K, pv, callback,
cbArgs=cbArgs, cbKWs=cbKWs,
T0=Rstart, Tend=Rend,
count=Rcount,
chunkSize=chunkSize,
enumAsInt=enumAsInt,
how=3)
Nc = yield D
N += Nc
defer.returnValue(N)
@defer.inlineCallbacks
def fetchsnap(self, pvs, T=None,
archs=None, chunkSize=100,
enumAsInt=False):
"""Fetch the value of all requested PVs at the given time
"""
pvs = list(pvs)
archs = self._archname2key(archs)
# values() request time range is inclusive, so Tcur==Tlast is a no-op
sec,ns = Tcur = timeTuple(makeTime(T))
ns+=1000
if ns>1000000000:
ns-=1000000000
sec+=1
Tlast = sec, ns
del sec, ns
Npvs = len(pvs)
NGroups = 1+(Npvs/chunkSize)
assert NGroups>0
values, metas = np.zeros(Npvs, dtype=np.object), np.zeros(Npvs, dtype=dbr_time)
_log.debug('fetchsnap at %s %s pvs in %s groups from %s archs',
Tcur, Npvs, NGroups, len(archs))
for igrp in range(NGroups):
Gpvs = pvs[igrp::NGroups]
if len(Gpvs)==0:
continue
Rval = values[igrp::NGroups]
Rmeta= metas[igrp::NGroups]
for arch in archs:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],Gpvs,Tcur,Tlast,2,0)
D = self._proxy.callRemote('archiver.values',
arch, Gpvs,
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
2, 0).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
results = yield D
except:
_log.fatal('Query fails')
raise
assert len(results)==len(Gpvs)
for idx, data in enumerate(results):
assert data['name']==Gpvs[idx], 'Results arrived out of order'
if len(data['values'])==0:
continue # no data for this one...
E = data['values'][-1]
if Rval[idx] is not None and Rmeta[idx]['sec']>E['secs']:
continue # too old
Rval[idx] = E['value']
Rmeta[idx] = (E['sevr'], E['stat'], E['secs'], E['nano'])
defer.returnValue((values, metas))
| F, L, K = breakDown[-1]
LS, LN = L
plan.append(((LS+1,0),(LS+2,0),K))
count=1
_log.debug("Returning last sample. No data in or after requested time range.") | conditional_block |
classic.py | # -*- coding: utf-8 -*-
"""
Copyright 2015 Brookhaven Science Assoc.
as operator of Brookhaven National Lab.
"""
# supported RPC call version
PVER=0
import re
import logging
from functools import reduce
_log = logging.getLogger("carchive.classic")
import time, math
try:
from xmlrpc.client import Fault
except ImportError:
from xmlrpclib import Fault
from fnmatch import fnmatch
from collections import defaultdict
import numpy as np
from ..date import makeTime, timeTuple, total_seconds
from datetime import datetime
from twisted.internet import defer
from twisted.internet.defer import FirstError
# Use EOL hack
from ..rpcmunge import NiceProxy as Proxy
from ..dtype import dbr_time
from ..util import HandledError
from twisted.internet.error import ConnectionRefusedError
def _optime(R, S):
E = time.time()
_log.info("Query complete in %f sec", E-S)
return R
def _connerror(F):
if F.check(FirstError):
F = F.value.subFailure
if F.check(ConnectionRefusedError):
_log.fatal("Data server connection refused. Server not reachable?")
elif F.check(Fault):
E = F.value
if E.faultCode==-600:
_log.fatal("PV syntax error: %s",E.faultString)
raise HandledError()
else:
_log.fatal("RPC error: %s",E)
elif F.check(HandledError):
pass
else:
_log.fatal("Remote request failed! %s",F)
return F
_dtypes = {
0: np.dtype('a40'),
1: np.dtype('a26'),
2: np.int32,
3: np.float64
}
@defer.inlineCallbacks
def getArchive(conf):
"""getArchive(conf=...)
Fetch an Archive instance. If conf is passed
will be used instead of the default configuration.
Returns a Deferred which will fire with the Archive
instance.
"""
url = conf.get('url')
maxreq = conf.getint('maxrequests', 10)
maxq = conf.getint('maxquery')
proxy=Proxy(url, limit=maxreq, qlimit=maxq)
proxy.connectTimeout=3.0
info = proxy.callRemote('archiver.info').addErrback(_connerror)
archs= proxy.callRemote('archiver.archives').addErrback(_connerror)
X = yield defer.DeferredList([info, archs], fireOnOneErrback=True).addErrback(_connerror)
info, archs = X[0][1], X[1][1]
defer.returnValue(Archive(proxy, conf, info, archs))
class Archive(object):
| """
"""
def __init__(self, proxy, conf, info, archs):
self._proxy = proxy
self.conf = conf
if PVER < info['ver']:
_log.warn('Archive server protocol version %d is newer then ours (%d).\n'+
'Attempting to proceed.', info['ver'], PVER)
self.description = info['desc']
self.statusInfo = dict(enumerate(info['stat']))
self.severityInfo = {}
for S in info['sevr']:
self.severityInfo[int(S['num'])] = S
self.hows = enumerate(info['how'])
self.hows = dict([(a,b) for b,a in self.hows])
# map from name to key
self.__archs = dict([(x['name'],x['key']) for x in archs])
# map from key to name
self.__rarchs = dict([(x['key'],x['name']) for x in archs])
def severity(self, sevr):
if sevr==0:
return ''
try:
return self.severityInfo[sevr]['sevr']
except KeyError:
return str(sevr)
def status(self, stat):
if stat==0:
return ''
try:
return self.statusInfo[stat]
except IndexError:
return str(stat)
def archives(self, pattern):
if not isinstance(pattern, (str,unicode)):
return list(set(reduce(list.__add__, map(self.archives, pattern), [])))
else:
return [a for a in iter(self.__archs.keys()) if fnmatch(a, pattern)]
def lookupArchive(self, arch):
return self.__rarchs[arch]
def _archname2key(self, archs):
if archs is None:
archs = list(self.__archs.values())
else:
for i,a in enumerate(archs):
try:
k = int(a)
if k not in iter(self.__archs.values()):
raise KeyError("Invalid Archive key '%d'"%k)
# do nothing
continue
except ValueError:
pass
try:
k = self.__archs[a]
archs[i] = k
except KeyError:
raise KeyError("Invalid Archive key '%s'"%a)
return archs
@defer.inlineCallbacks
def search(self, exact=None, pattern=None,
archs=None, breakDown=False,
rawTime=False):
"""Search for PV names matching the given pattern.
If archs is None then all archives are searched.
Otherwise archs must be a list of strings or integers
specifing archive names or keys.
Returns a Deferred which fires with a dictionary.
If breakDown is False (the default) then the result is
{'pvname':(firstTime, lastTime)}
If breakDown is True then the result is
{'pvname':[(firstTime, lastTime, archKey)]}
In the second form the ranges for each pv will be sorted
by firstTime.
For either return format, if rawTime is False then a datatime
is given, otherwise a tuple (sec,nsec).
"""
if exact is None and pattern is None:
raise TypeError("Must provide 'exact' or 'pattern'")
if exact is not None:
pattern = '^%s$'%re.escape(exact)
else:
# Test compile to catch basic syntax errors
re.compile(pattern)
archs = self._archname2key(archs)
_log.debug('Searching for %s in %s', pattern, archs)
Ds = [None]*len(archs)
for i,a in enumerate(archs):
Ds[i] = self._proxy.callRemote('archiver.names', a, pattern).addErrback(_connerror)
Ds = yield defer.DeferredList(Ds, fireOnOneErrback=True).addErrback(_connerror)
if breakDown:
results = defaultdict(list)
for i, (junk, A) in enumerate(Ds):
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
results[pv].append( (F, L, archs[i]) )
for R in results.values():
R.sort()
else:
results = defaultdict(lambda:[None]*2)
for junk, A in Ds:
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
C = results[pv]
if C[0] is None or F < C[0]:
C[0] = F
if C[1] is None or L > C[1]:
C[1] = L
results = dict([(K,tuple(V)) for K,V in results.items()])
defer.returnValue(results)
@defer.inlineCallbacks
def _fetchdata(self, arch, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
how=0, enumAsInt=False, displayMeta=False):
if count is None and chunkSize is None:
raise TypeError("If count is None then chunkSize must be given")
if chunkSize is None:
chunkSize = count
if T0 is None and Tend is None:
raise TypeError("Must specify T0 or Tend")
if T0 is None:
T0 = datetime.now()
else:
T0 = makeTime(T0)
if Tend is None:
Tend = datetime.now()
else:
Tend = makeTime(Tend)
if T0 > Tend:
raise ValueError("T0 must be <= Tend")
if count is None:
C = chunkSize
else:
C = min(count, chunkSize)
Tcur = timeTuple(T0)
Tlast =timeTuple(Tend)
N = 0
first = True
last = False
while not last and Tcur < Tlast:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],pv,Tcur,Tlast,C,how)
D = self._proxy.callRemote('archiver.values',
arch, [pv],
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
C, how).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
data = yield D
except:
_log.fatal('Query fails')
raise
assert len(data)==1, "Server returned more than one PVs? (%s)"%len(data)
assert data[0]['name']==pv, "Server gives us %s != %s"%(data[0]['name'], pv)
vals = data[0]['values']
maxcount = data[0]['count']
_log.debug("Query yields %u points"%len(vals))
N += len(vals)
last = len(vals)<C
if count and N>=count:
last = True
the_meta = data[0]['meta']
if data[0]['meta']['type']==0:
states = data[0]['meta']['states']
else:
states = []
orig_type = data[0]['type']
vtype = orig_type
if vtype==1 and enumAsInt:
vtype = 2
try:
dtype = _dtypes[vtype]
except KeyError:
raise ValueError("Server gives unknown value type %d"%vtype)
XML = data[0]['values']
if len(XML)==0:
break
if vtype == 1:
for V in XML:
for i,pnt in enumerate(V['value']):
try:
V['value'][i] = states[pnt]
except IndexError:
V['value'][i] = str(pnt)
maxelem=0
metadata = np.ndarray(len(XML), dtype=dbr_time)
for i,E in enumerate(XML):
maxelem = max(maxelem, len(E['value']))
metadata[i] = (E['sevr'], E['stat'], E['secs'], E['nano'])
if not displayMeta:
assert maxcount==maxelem, "Value shape inconsistent. %d %d"%(maxcount,maxelem)
values = np.ndarray((len(XML), maxelem), dtype=dtype)
for i,E in enumerate(XML):
V = E['value']
values[i,:len(V)] = V
values[i,len(V):] = 0
del XML
del data
if first:
first = False
else:
# remove duplicate sample
values = values[1:]
metadata = metadata[1:]
# no non-duplicate samples
if len(metadata)==0:
break
Tcur = (int(metadata[-1]['sec']), int(metadata[-1]['ns']+1))
if displayMeta:
extraMeta = {'orig_type':orig_type, 'the_meta':the_meta, 'reported_arr_size':maxcount}
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, extraMeta=extraMeta, **cbKWs)
else:
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, **cbKWs)
defer.returnValue(N)
@defer.inlineCallbacks
def fetchraw(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False, displayMeta=False, rawTimes=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True, rawTime=True)
breakDown = breakDown[pv]
if len(breakDown)==0:
_log.error("PV not archived")
defer.returnValue(0)
if rawTimes:
Tcur, Tend = T0, Tend
else:
Tcur, Tend = timeTuple(T0), timeTuple(Tend)
_log.debug("Time range: %s -> %s", Tcur, Tend)
_log.debug("Planning with: %s", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))
plan = []
# Plan queries
# Find a set of non-overlapping regions
for F, L, K in breakDown:
# some mis-match of definitions
# the search results give the times
# of the first and last samples
# inclusive.
# time range [F, L]
# However, values() query end time
# is exclusive
# time range [F, L)
# We step the end time forward by 1 micro-second
# to ensure that the last sample can be returned.
# Note: it seems that Channel Archiver uses
# micro-sec resolution times for comparisons...
_log.debug("Before: %s", L)
LS, LN = L
LN += 1000
if LN>1000000000:
LS += 1
LN = 0
L = LS, LN
_log.debug("After: %s", L)
if L <= Tcur:
continue # Too early, keep going
elif F >= Tend:
break # No more data in range
# range to request from this archive
Rstart = max(Tcur, F)
Rend = min(Tend, L)
plan.append((Rstart, Rend, K))
Tcur = Rend
if len(plan)==0 and len(breakDown)>0 and breakDown[-1][1] <= Tcur:
# requested range is later than last recorded sample,
# which is all we can return
F, L, K = breakDown[-1]
LS, LN = L
plan.append(((LS+1,0),(LS+2,0),K))
count=1
_log.debug("Returning last sample. No data in or after requested time range.")
elif len(plan)==0:
# requested range is earlier than first recorded sample.
_log.warn("Query plan empty. No data in or before request time range.")
defer.returnValue(0)
_log.debug("Using plan of %d queries %s", len(plan), map(lambda a,b,c:(a,b,self.__rarchs[c]), plan))
N = yield self._nextraw(0, pv=pv, plan=plan,
Ctot=0, Climit=count,
callback=callback, cbArgs=cbArgs,
cbKWs=cbKWs, chunkSize=chunkSize,
enumAsInt=enumAsInt, displayMeta=displayMeta)
defer.returnValue(N)
def _nextraw(self, partcount, pv, plan, Ctot, Climit,
callback, cbArgs, cbKWs, chunkSize,
enumAsInt, displayMeta=False):
sofar = partcount + Ctot
if len(plan)==0:
_log.debug("Plan complete: %s", pv)
return sofar # done
elif Climit and sofar>=Climit:
_log.debug("Plan point limit reached: %s", pv)
return sofar # done
count = Climit - sofar if Climit else None
T0, Tend, arch = plan.pop(0)
_log.debug("Query %d of %s %s -> %s for %s", len(plan), self.__rarchs[arch], T0, Tend, pv)
D = self._fetchdata(arch, pv, callback,
cbArgs=cbArgs, cbKWs=cbKWs,
T0=T0, Tend=Tend,
count=count,
chunkSize=chunkSize,
enumAsInt=enumAsInt,
displayMeta=displayMeta)
D.addCallback(self._nextraw, pv, plan, sofar, Climit,
callback, cbArgs, cbKWs, chunkSize, enumAsInt, displayMeta)
return D
@defer.inlineCallbacks
def fetchplot(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
delta = total_seconds(Tend-T0)
if delta<=0.0 or count<=0:
raise ValueError("invalid time range or sample count")
rate = count/delta # average rate in samples per second
if rate>=1.0:
_log.info("Time range too short for plot bin, switching to raw")
D = self.fetchraw(pv, callback, cbArgs, cbKWs, T0, Tend,
None, count, archs, breakDown,
enumAsInt)
defer.returnValue(D)
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True, rawTime=True)
breakDown = breakDown[pv]
if len(breakDown)==0:
_log.error("PV not archived")
defer.returnValue(0)
Tcur, Tend = timeTuple(T0), timeTuple(Tend)
_log.debug("Time range: %s -> %s", Tcur, Tend)
_log.debug("Planning with: %s", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))
N = 0
# Plan queries
# Find a set of non-overlapping regions
for F, L, K in breakDown:
LS, LN = L
LN += 1000
if LN>1000000000:
LS += 1
LN = 0
L = LS, LN
if L <= Tcur:
continue # Too early, keep going
elif F >= Tend:
break # No more data in range
# range to request from this archive
Rstart = max(Tcur, F)
Rend = min(Tend, L)
Rcount = int(math.ceil((Rend[0]-Rstart[0])*rate))
_log.debug("Query %s %s -> %s for %s (%d)", self.__rarchs[K], Rstart, Rend, pv, Rcount)
D = self._fetchdata(K, pv, callback,
cbArgs=cbArgs, cbKWs=cbKWs,
T0=Rstart, Tend=Rend,
count=Rcount,
chunkSize=chunkSize,
enumAsInt=enumAsInt,
how=3)
Nc = yield D
N += Nc
defer.returnValue(N)
@defer.inlineCallbacks
def fetchsnap(self, pvs, T=None,
archs=None, chunkSize=100,
enumAsInt=False):
"""Fetch the value of all requested PVs at the given time
"""
pvs = list(pvs)
archs = self._archname2key(archs)
# values() request time range is inclusive, so Tcur==Tlast is a no-op
sec,ns = Tcur = timeTuple(makeTime(T))
ns+=1000
if ns>1000000000:
ns-=1000000000
sec+=1
Tlast = sec, ns
del sec, ns
Npvs = len(pvs)
NGroups = 1+(Npvs/chunkSize)
assert NGroups>0
values, metas = np.zeros(Npvs, dtype=np.object), np.zeros(Npvs, dtype=dbr_time)
_log.debug('fetchsnap at %s %s pvs in %s groups from %s archs',
Tcur, Npvs, NGroups, len(archs))
for igrp in range(NGroups):
Gpvs = pvs[igrp::NGroups]
if len(Gpvs)==0:
continue
Rval = values[igrp::NGroups]
Rmeta= metas[igrp::NGroups]
for arch in archs:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],Gpvs,Tcur,Tlast,2,0)
D = self._proxy.callRemote('archiver.values',
arch, Gpvs,
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
2, 0).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
results = yield D
except:
_log.fatal('Query fails')
raise
assert len(results)==len(Gpvs)
for idx, data in enumerate(results):
assert data['name']==Gpvs[idx], 'Results arrived out of order'
if len(data['values'])==0:
continue # no data for this one...
E = data['values'][-1]
if Rval[idx] is not None and Rmeta[idx]['sec']>E['secs']:
continue # too old
Rval[idx] = E['value']
Rmeta[idx] = (E['sevr'], E['stat'], E['secs'], E['nano'])
defer.returnValue((values, metas)) | identifier_body | |
classic.py | # -*- coding: utf-8 -*-
"""
Copyright 2015 Brookhaven Science Assoc.
as operator of Brookhaven National Lab.
"""
# supported RPC call version
PVER=0
import re
import logging
from functools import reduce
_log = logging.getLogger("carchive.classic")
import time, math
try:
from xmlrpc.client import Fault
except ImportError:
from xmlrpclib import Fault
from fnmatch import fnmatch
from collections import defaultdict
import numpy as np
from ..date import makeTime, timeTuple, total_seconds
from datetime import datetime
from twisted.internet import defer
from twisted.internet.defer import FirstError
# Use EOL hack
from ..rpcmunge import NiceProxy as Proxy
from ..dtype import dbr_time
from ..util import HandledError
from twisted.internet.error import ConnectionRefusedError
def _optime(R, S):
E = time.time()
_log.info("Query complete in %f sec", E-S)
return R
def _connerror(F):
if F.check(FirstError):
F = F.value.subFailure
if F.check(ConnectionRefusedError):
_log.fatal("Data server connection refused. Server not reachable?")
elif F.check(Fault):
E = F.value
if E.faultCode==-600:
_log.fatal("PV syntax error: %s",E.faultString)
raise HandledError()
else:
_log.fatal("RPC error: %s",E)
elif F.check(HandledError):
pass
else:
_log.fatal("Remote request failed! %s",F)
return F
_dtypes = {
0: np.dtype('a40'),
1: np.dtype('a26'),
2: np.int32,
3: np.float64
}
@defer.inlineCallbacks
def getArchive(conf):
"""getArchive(conf=...)
Fetch an Archive instance. If conf is passed
will be used instead of the default configuration.
Returns a Deferred which will fire with the Archive
instance.
"""
url = conf.get('url')
maxreq = conf.getint('maxrequests', 10)
maxq = conf.getint('maxquery')
proxy=Proxy(url, limit=maxreq, qlimit=maxq)
proxy.connectTimeout=3.0
info = proxy.callRemote('archiver.info').addErrback(_connerror)
archs= proxy.callRemote('archiver.archives').addErrback(_connerror)
X = yield defer.DeferredList([info, archs], fireOnOneErrback=True).addErrback(_connerror)
info, archs = X[0][1], X[1][1]
defer.returnValue(Archive(proxy, conf, info, archs))
class Archive(object):
"""
"""
def __init__(self, proxy, conf, info, archs):
self._proxy = proxy
self.conf = conf
if PVER < info['ver']:
_log.warn('Archive server protocol version %d is newer then ours (%d).\n'+
'Attempting to proceed.', info['ver'], PVER)
self.description = info['desc']
self.statusInfo = dict(enumerate(info['stat']))
self.severityInfo = {}
for S in info['sevr']:
self.severityInfo[int(S['num'])] = S
self.hows = enumerate(info['how'])
self.hows = dict([(a,b) for b,a in self.hows])
# map from name to key
self.__archs = dict([(x['name'],x['key']) for x in archs])
# map from key to name
self.__rarchs = dict([(x['key'],x['name']) for x in archs])
def severity(self, sevr):
if sevr==0:
return ''
try:
return self.severityInfo[sevr]['sevr']
except KeyError:
return str(sevr)
def status(self, stat):
if stat==0:
return ''
try:
return self.statusInfo[stat]
except IndexError:
return str(stat)
def archives(self, pattern):
if not isinstance(pattern, (str,unicode)):
return list(set(reduce(list.__add__, map(self.archives, pattern), [])))
else:
return [a for a in iter(self.__archs.keys()) if fnmatch(a, pattern)]
def lookupArchive(self, arch):
return self.__rarchs[arch]
def _archname2key(self, archs):
if archs is None:
archs = list(self.__archs.values())
else:
for i,a in enumerate(archs):
try:
k = int(a)
if k not in iter(self.__archs.values()):
raise KeyError("Invalid Archive key '%d'"%k)
# do nothing
continue
except ValueError:
pass
try:
k = self.__archs[a]
archs[i] = k
except KeyError:
raise KeyError("Invalid Archive key '%s'"%a)
return archs
@defer.inlineCallbacks
def search(self, exact=None, pattern=None,
archs=None, breakDown=False,
rawTime=False):
"""Search for PV names matching the given pattern.
If archs is None then all archives are searched.
Otherwise archs must be a list of strings or integers
specifing archive names or keys.
Returns a Deferred which fires with a dictionary.
If breakDown is False (the default) then the result is
{'pvname':(firstTime, lastTime)}
If breakDown is True then the result is
{'pvname':[(firstTime, lastTime, archKey)]}
In the second form the ranges for each pv will be sorted
by firstTime.
For either return format, if rawTime is False then a datatime
is given, otherwise a tuple (sec,nsec).
"""
if exact is None and pattern is None:
raise TypeError("Must provide 'exact' or 'pattern'")
if exact is not None:
pattern = '^%s$'%re.escape(exact)
else:
# Test compile to catch basic syntax errors
re.compile(pattern)
archs = self._archname2key(archs)
_log.debug('Searching for %s in %s', pattern, archs)
Ds = [None]*len(archs)
for i,a in enumerate(archs):
Ds[i] = self._proxy.callRemote('archiver.names', a, pattern).addErrback(_connerror)
Ds = yield defer.DeferredList(Ds, fireOnOneErrback=True).addErrback(_connerror)
if breakDown:
results = defaultdict(list)
for i, (junk, A) in enumerate(Ds):
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
results[pv].append( (F, L, archs[i]) )
for R in results.values():
R.sort()
else:
results = defaultdict(lambda:[None]*2)
for junk, A in Ds:
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
C = results[pv]
if C[0] is None or F < C[0]:
C[0] = F
if C[1] is None or L > C[1]:
C[1] = L
results = dict([(K,tuple(V)) for K,V in results.items()])
defer.returnValue(results)
@defer.inlineCallbacks
def _fetchdata(self, arch, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
how=0, enumAsInt=False, displayMeta=False):
if count is None and chunkSize is None:
raise TypeError("If count is None then chunkSize must be given")
if chunkSize is None:
chunkSize = count
if T0 is None and Tend is None:
raise TypeError("Must specify T0 or Tend")
if T0 is None:
T0 = datetime.now()
else:
T0 = makeTime(T0)
if Tend is None:
Tend = datetime.now()
else:
Tend = makeTime(Tend)
if T0 > Tend:
raise ValueError("T0 must be <= Tend")
if count is None:
C = chunkSize
else:
C = min(count, chunkSize)
Tcur = timeTuple(T0)
Tlast =timeTuple(Tend)
N = 0
first = True
last = False
while not last and Tcur < Tlast:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],pv,Tcur,Tlast,C,how)
D = self._proxy.callRemote('archiver.values',
arch, [pv],
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
C, how).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
data = yield D
except:
_log.fatal('Query fails')
raise
assert len(data)==1, "Server returned more than one PVs? (%s)"%len(data)
assert data[0]['name']==pv, "Server gives us %s != %s"%(data[0]['name'], pv)
vals = data[0]['values']
maxcount = data[0]['count']
_log.debug("Query yields %u points"%len(vals))
N += len(vals)
last = len(vals)<C
if count and N>=count:
last = True
the_meta = data[0]['meta']
if data[0]['meta']['type']==0:
states = data[0]['meta']['states']
else:
states = []
orig_type = data[0]['type']
vtype = orig_type
if vtype==1 and enumAsInt:
vtype = 2
try:
dtype = _dtypes[vtype]
except KeyError:
raise ValueError("Server gives unknown value type %d"%vtype)
XML = data[0]['values']
if len(XML)==0:
break
if vtype == 1:
for V in XML:
for i,pnt in enumerate(V['value']):
try:
V['value'][i] = states[pnt]
except IndexError:
V['value'][i] = str(pnt)
maxelem=0
metadata = np.ndarray(len(XML), dtype=dbr_time)
for i,E in enumerate(XML):
maxelem = max(maxelem, len(E['value']))
metadata[i] = (E['sevr'], E['stat'], E['secs'], E['nano'])
if not displayMeta:
assert maxcount==maxelem, "Value shape inconsistent. %d %d"%(maxcount,maxelem)
values = np.ndarray((len(XML), maxelem), dtype=dtype)
for i,E in enumerate(XML):
V = E['value']
values[i,:len(V)] = V
values[i,len(V):] = 0
del XML
del data
if first:
first = False
else:
# remove duplicate sample
values = values[1:]
metadata = metadata[1:]
# no non-duplicate samples
if len(metadata)==0:
break
Tcur = (int(metadata[-1]['sec']), int(metadata[-1]['ns']+1))
if displayMeta:
extraMeta = {'orig_type':orig_type, 'the_meta':the_meta, 'reported_arr_size':maxcount}
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, extraMeta=extraMeta, **cbKWs)
else:
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, **cbKWs)
defer.returnValue(N)
@defer.inlineCallbacks
def fetchraw(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False, displayMeta=False, rawTimes=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True, rawTime=True)
breakDown = breakDown[pv]
if len(breakDown)==0:
_log.error("PV not archived")
defer.returnValue(0)
if rawTimes:
Tcur, Tend = T0, Tend
else:
Tcur, Tend = timeTuple(T0), timeTuple(Tend)
_log.debug("Time range: %s -> %s", Tcur, Tend)
_log.debug("Planning with: %s", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))
plan = []
# Plan queries
# Find a set of non-overlapping regions
for F, L, K in breakDown:
# some mis-match of definitions
# the search results give the times
# of the first and last samples
# inclusive.
# time range [F, L]
# However, values() query end time
# is exclusive
# time range [F, L)
# We step the end time forward by 1 micro-second
# to ensure that the last sample can be returned.
# Note: it seems that Channel Archiver uses
# micro-sec resolution times for comparisons...
_log.debug("Before: %s", L)
LS, LN = L
LN += 1000
if LN>1000000000:
LS += 1
LN = 0
L = LS, LN
_log.debug("After: %s", L)
if L <= Tcur:
continue # Too early, keep going
elif F >= Tend:
break # No more data in range
# range to request from this archive
Rstart = max(Tcur, F)
Rend = min(Tend, L)
plan.append((Rstart, Rend, K))
Tcur = Rend
if len(plan)==0 and len(breakDown)>0 and breakDown[-1][1] <= Tcur:
# requested range is later than last recorded sample,
# which is all we can return
F, L, K = breakDown[-1]
LS, LN = L
plan.append(((LS+1,0),(LS+2,0),K))
count=1
_log.debug("Returning last sample. No data in or after requested time range.")
elif len(plan)==0:
# requested range is earlier than first recorded sample.
_log.warn("Query plan empty. No data in or before request time range.")
defer.returnValue(0)
_log.debug("Using plan of %d queries %s", len(plan), map(lambda a,b,c:(a,b,self.__rarchs[c]), plan))
N = yield self._nextraw(0, pv=pv, plan=plan,
Ctot=0, Climit=count, | enumAsInt=enumAsInt, displayMeta=displayMeta)
defer.returnValue(N)
def _nextraw(self, partcount, pv, plan, Ctot, Climit,
callback, cbArgs, cbKWs, chunkSize,
enumAsInt, displayMeta=False):
sofar = partcount + Ctot
if len(plan)==0:
_log.debug("Plan complete: %s", pv)
return sofar # done
elif Climit and sofar>=Climit:
_log.debug("Plan point limit reached: %s", pv)
return sofar # done
count = Climit - sofar if Climit else None
T0, Tend, arch = plan.pop(0)
_log.debug("Query %d of %s %s -> %s for %s", len(plan), self.__rarchs[arch], T0, Tend, pv)
D = self._fetchdata(arch, pv, callback,
cbArgs=cbArgs, cbKWs=cbKWs,
T0=T0, Tend=Tend,
count=count,
chunkSize=chunkSize,
enumAsInt=enumAsInt,
displayMeta=displayMeta)
D.addCallback(self._nextraw, pv, plan, sofar, Climit,
callback, cbArgs, cbKWs, chunkSize, enumAsInt, displayMeta)
return D
@defer.inlineCallbacks
def fetchplot(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
delta = total_seconds(Tend-T0)
if delta<=0.0 or count<=0:
raise ValueError("invalid time range or sample count")
rate = count/delta # average rate in samples per second
if rate>=1.0:
_log.info("Time range too short for plot bin, switching to raw")
D = self.fetchraw(pv, callback, cbArgs, cbKWs, T0, Tend,
None, count, archs, breakDown,
enumAsInt)
defer.returnValue(D)
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True, rawTime=True)
breakDown = breakDown[pv]
if len(breakDown)==0:
_log.error("PV not archived")
defer.returnValue(0)
Tcur, Tend = timeTuple(T0), timeTuple(Tend)
_log.debug("Time range: %s -> %s", Tcur, Tend)
_log.debug("Planning with: %s", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))
N = 0
# Plan queries
# Find a set of non-overlapping regions
for F, L, K in breakDown:
LS, LN = L
LN += 1000
if LN>1000000000:
LS += 1
LN = 0
L = LS, LN
if L <= Tcur:
continue # Too early, keep going
elif F >= Tend:
break # No more data in range
# range to request from this archive
Rstart = max(Tcur, F)
Rend = min(Tend, L)
Rcount = int(math.ceil((Rend[0]-Rstart[0])*rate))
_log.debug("Query %s %s -> %s for %s (%d)", self.__rarchs[K], Rstart, Rend, pv, Rcount)
D = self._fetchdata(K, pv, callback,
cbArgs=cbArgs, cbKWs=cbKWs,
T0=Rstart, Tend=Rend,
count=Rcount,
chunkSize=chunkSize,
enumAsInt=enumAsInt,
how=3)
Nc = yield D
N += Nc
defer.returnValue(N)
@defer.inlineCallbacks
def fetchsnap(self, pvs, T=None,
archs=None, chunkSize=100,
enumAsInt=False):
"""Fetch the value of all requested PVs at the given time
"""
pvs = list(pvs)
archs = self._archname2key(archs)
# values() request time range is inclusive, so Tcur==Tlast is a no-op
sec,ns = Tcur = timeTuple(makeTime(T))
ns+=1000
if ns>1000000000:
ns-=1000000000
sec+=1
Tlast = sec, ns
del sec, ns
Npvs = len(pvs)
NGroups = 1+(Npvs/chunkSize)
assert NGroups>0
values, metas = np.zeros(Npvs, dtype=np.object), np.zeros(Npvs, dtype=dbr_time)
_log.debug('fetchsnap at %s %s pvs in %s groups from %s archs',
Tcur, Npvs, NGroups, len(archs))
for igrp in range(NGroups):
Gpvs = pvs[igrp::NGroups]
if len(Gpvs)==0:
continue
Rval = values[igrp::NGroups]
Rmeta= metas[igrp::NGroups]
for arch in archs:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],Gpvs,Tcur,Tlast,2,0)
D = self._proxy.callRemote('archiver.values',
arch, Gpvs,
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
2, 0).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
results = yield D
except:
_log.fatal('Query fails')
raise
assert len(results)==len(Gpvs)
for idx, data in enumerate(results):
assert data['name']==Gpvs[idx], 'Results arrived out of order'
if len(data['values'])==0:
continue # no data for this one...
E = data['values'][-1]
if Rval[idx] is not None and Rmeta[idx]['sec']>E['secs']:
continue # too old
Rval[idx] = E['value']
Rmeta[idx] = (E['sevr'], E['stat'], E['secs'], E['nano'])
defer.returnValue((values, metas)) | callback=callback, cbArgs=cbArgs,
cbKWs=cbKWs, chunkSize=chunkSize, | random_line_split |
app.js | //WorldStats
//My first real project
//Started 4-11-15
/** Packages to install with NPM
bcrypt -- for password encryption (but require in user.js file)
body-parser -- for response handling
ejs -- for view files
express
express-session
method-override -- to enable PUT/PATCH and DELETE verbs
pg
pg-hstore
request
sequelize -- for database interaction
sequelize-cli
session
**/
//REQUIRE the needed libraries
var bodyParser = require('body-parser');
var express = require('express');
var methodOverride = require('method-override');
var pg = require('pg');
var request = require('request'); //needed for HTTP API access
var session = require('express-session');
var sql = require('./models'); //include the PostgreSQL database ***
var app = express(); //begin express **
//This is secondary JS file with most of the game logic
var gameStuff = require('./game');
//APP.SET to set main settings
app.set("view engine","ejs");
//APP.USE for middleware elements
app.use(bodyParser.urlencoded({extended: true})); //***
app.use(methodOverride("_method")); //***
app.use(express.static('public')); //the 'static' directory holds CSS files, images, etc.
app.use(session({ //***
secret: 'only for Worldstats',
resave: false,
saveUninitialized: true
}));
app.use('/', function(req,res,next){
req.login = function(user){
req.session.userId = user.id;
req.session.screen_name = user.screen_name;
};
req.currentUser = function() { ///***** WHY DOES THIS HAVE TWO RETURN STATEMENTS
return sql.Player.find({
where: { id: req.session.userId }
}).then(function(user) {
req.user = user;
return user;
})
};
req.logout = function() {
req.session.userId = null;
req.user = null;
};
next(); //move on to next middleware
},
gameStuff); //can only have one '/' level app to use, so put in gameStuff here
var loggedIn = function(req,res,next) {
if (req.session.userId) {
next();
} else {
res.redirect('/');
}
};
//For every page that should be limited when a Player is logged in, apply loggedIn middleware below:
app.use('/profile',loggedIn);
//app.use('/pregame',loggedIn); //FOR SOME REASON THIS MAY BE CAUSING CONFLICT WITH pregame's own authorization
app.use('/question', loggedIn);
app.use('/answer',loggedIn);
app.use('/nextquestion',loggedIn);
//Define the various ROUTES
//Public routes available without login are: 1) welcome page, 2) high scores page, 3) signup page
//ROOT route for welcome page
app.get('/', function(req,res){
console.log("Hello world!");
res.render('index.ejs');
});
//High scores page
app.get('/highscores', function(req,res){
console.log("Hello from highscores route")
sql.Score.findAll({limit: 10, order: '"game_score" DESC', include:[{model:sql.Player}]}).then(function(scoreData){
//console.log(scoreData);
var highScoreArray = [];
for (var i = 0; i < scoreData.length; i++) {
var highScore = scoreData[i].dataValues.game_score;
var highDate = scoreData[i].dataValues.date_played;
var highPlayer = scoreData[i].dataValues.Player.dataValues.screen_name;
highScoreArray.push([highScore,highDate,highPlayer]);
};
console.log("This is highScoreArray")
console.log(highScoreArray);
res.render('highscores.ejs',{ejsScoreArray:highScoreArray});
})
});
//Create new player page
app.get('/players/new', function(req,res){
res.render('signup.ejs');
// res.send("Be a player!");
});
//Once user submits form on create new player page, process info with route below:
app.post('/players', function(req,res){
var newScreenName = req.body.signupScreenName;
var newEmail = req.body.signupEmail;
var newPassword = req.body.signupPassword;
console.log(newScreenName,newEmail,newPassword);
console.log(typeof sql.Player.createSecure);
sql.Player.createSecure(newScreenName,newEmail,newPassword).then(
function(newUser){
res.redirect('/login');
});
});
//Login page
app.get('/login', function(req,res){
res.render('login.ejs');
});
app.post('/login', function(req,res){
var uScreenName = req.body.loginScreenName;
var uPassword = req.body.loginPassword;
console.log(uScreenName,uPassword);
sql.Player.authenticate(uScreenName,uPassword).then(
function(user){
if (user) {
//this means a user was returned by authenticate function, so password valid
req.login(user);
res.redirect('/pregame');
} else {
//this means no user wa`s returned (false was returned), so login credentials invalid
res.render('login.ejs'); //*** Add "login failed" error message to user
}
})
});
//Logout page
app.get('/logout', function(req,res){
req.logout();
res.redirect('/');
});
//Startgame with different levels
app.get('/startgame/:level', function(req, res){
console.log("Hello from startgame");
var level = req.params.level;
console.log("Level");
console.log(level);
req.setupGame(level);
res.redirect('/question');
});
//Private routes that are only available to players after log-in
//Authorization is handled via LoggedIn middleware function
//Profile page -- for edit
// app.get('/players/:id', function(req,res){ //*** STILL TO CONFIRM THIS WORKS AS URL PARAM
// console.log("Hello from profile page")
// console.log(req.currentUser);
// res.send(currentUser);
// })
//Profile page
app.get('/profile', function(req,res){
console.log("Hello from profile route");
var scoreObject = {};
var profileObject = {};
req.currentUser().then(function(foundPlayer){
//console.log("This is found player",foundPlayer);
if (foundPlayer) {
sql.Score.findAll({where: {PlayerId:req.session.userId}, limit: 3, order: '"game_score" DESC'}).then(function(myScores){
scoreObject = myScores;
profileObject = {"score":scoreObject,"player":foundPlayer};
res.render('profile',{ejsProfile:profileObject});
}) //end of myScores function
} else {
res.redirect('/login');
}
})
});
/** Testing the shift of game code to separate file
// and shift of game-consistent variables to req.session.varName
app.get('/teststeven',function(req,res){
req.setupGame();
//console.log()
res.redirect('/isworking');
});
app.get('/isworking', function(req,res){
console.log(req.session.gameScore);
});
**/
//Pregame page
app.get('/pregame', function(req,res){
req.currentUser().then(function(foundPlayer){
if (foundPlayer) { //This if is redundant -- but will leave it to have Player name on pregame page
//req.setupGame(); //initialize what is needed for the game MOVE TO startgame
res.render('pregame',{ejsFoundPlayer:foundPlayer});
} else {
res.redirect('/login');
}
})
});
//Question page
app.get('/question', function(req,res){
var renderIt = function(data){
res.render('question.ejs',{ejsQuestionData:data});
};
req.playBall(renderIt);
});
//Answer page
app.get('/answer', function(req,res){
console.log("Hello from answer page");
var playerAnswer = [];
var correctAnswer = [];
var answerCountryandValue = req.session.countryAndValueData;
for (var id in req.query) {
playerAnswer.push(req.query[id]);
// console.log("This is the answer value",id, req.query[id]);
};
for (var i = 0; i < answerCountryandValue.length; i++) {
correctAnswer.push(answerCountryandValue[i][0]);
};
console.log("playerAnswer",playerAnswer);
console.log("correctAnswer",correctAnswer);
//Send player answer and real answer to get scored
playerResults = compareAnswers(playerAnswer,answerCountryandValue);
//Use the score results to update score
req.session.gameScore += playerResults.numCorrect;
//Add info to gameSummary which is in req.session
req.session.gameSummary.push([req.session.currentRound,req.session.metricShortName,req.session.gameScore]);
console.log("this is gameSummary")
console.log(req.session.gameSummary);
console.log("playerResults")
console.log(playerResults);
res.render('answer.ejs',{ejsAnswer:playerResults});
});
var compareAnswers = function(playerAnswer,fullAnswer){
//this takes two arrays and compares how many items are the same and provides an object back with:
// {"numCorrect":#,
// "whichWrong":[index #s of wrong answer, wrong answer ];
console.log("Hello from compareAnswers");
var correctScore = 0;
var answerMatrix = [];
for (var i = 0; i < fullAnswer.length; i++) | ;
var scoreKey = {
"numCorrect":correctScore,
"answerKey":answerMatrix
};
return scoreKey;
};
app.get('/nextquestion', function(req,res){
if (req.session.nextRound >= req.session.maxRounds) {
//nextRound was already incremented in game.js -- so if nextRound is already beyond maxRounds, then game over!
console.log("Start gameover process");
console.log(req.session.gameSummary);
//Assemble object with data to present on gameover page
//FOR NOW JUST PASS FINAL SCORE AS MVP APPROACH ***
var gameFinalStats = {"finalScore":req.session.gameScore}; //NEED TO ADD ADDITIONAL DATA TO OBJECT
console.log("req.session.gameScore is",req.session.gameScore);
console.log("req.session.maxRounds is ",req.session.maxRounds);
console.log("req.session.countriesPerRound is ",req.session.countriesPerRound);
//Increment this player's CUMULATIVE score in Player table
sql.Player.find({where:{id:req.session.userId}}).then(function(whoPlayed){
console.log("Hello from cumulative update")
var lifetime = whoPlayed.cumulative_score;
console.log("Prior lifetime score ",lifetime);
lifetime += req.session.gameScore;
console.log("New lifetime score ",lifetime);
whoPlayed.cumulative_score = lifetime; whoPlayed.save();})
//Post scores to Score table in SQL and then render gameover page
//POSTING DIRECTLY, RATHER THAN CALLING AN INSTANCE METHOD
var now = new Date();
sql.Score.create({
game_score:req.session.gameScore,
rounds_played:req.session.maxRounds,
date_played:now,
PlayerId: req.session.userId,
countries_per_round: req.session.countriesPerRound
}).then(function(){
res.render('gameover.ejs',{ejsGameStats:gameFinalStats});
})
// console.log("About to call addNewScore")
// req.currentUser().then(function(who){
// if (who) {
// who.addNewScore(req.session.gameScore,req.session.maxRounds).then(function(){
// res.render('gameover.ejs',{ejsGameStats:gameFinalStats});
// })
// } else {
// //here if no current user found... should not happen if authorization works right
// res.redirect('/login');
// }
// })
} else { //still playing, so reload question page
res.redirect('/question');
}
});
// //Postgame page
// app.get('/postgame',function(req,res){
// res.render('gameover.ejs',{ejsGameSummary:req.session.gameSummary});
// });
app.get('/startover', function(req,res){
console.log("Hello from startover route");
res.redirect('/pregame');
});
//Start the server listening on port 3000
app.listen(process.env.PORT || 3000, function (){ //This allows app to run either via Heroku *or * locally
console.log("Don't blame me. I'm an interpreter. I'm not supposed to know a power socket from a computer terminal. ");
});
| {
if (playerAnswer[i] === fullAnswer[i][0]) {
correctScore++;
answerMatrix.push([fullAnswer[i][0],fullAnswer[i][1],"Correct"]);
} else {
answerMatrix.push([fullAnswer[i][0],fullAnswer[i][1],playerAnswer[i]]);
}
} | conditional_block |
app.js | //WorldStats
//My first real project
//Started 4-11-15
/** Packages to install with NPM
bcrypt -- for password encryption (but require in user.js file)
body-parser -- for response handling
ejs -- for view files
express
express-session
method-override -- to enable PUT/PATCH and DELETE verbs
pg
pg-hstore
request
sequelize -- for database interaction
sequelize-cli
session
**/
//REQUIRE the needed libraries
var bodyParser = require('body-parser');
var express = require('express');
var methodOverride = require('method-override');
var pg = require('pg');
var request = require('request'); //needed for HTTP API access
var session = require('express-session');
var sql = require('./models'); //include the PostgreSQL database ***
var app = express(); //begin express **
//This is secondary JS file with most of the game logic
var gameStuff = require('./game');
//APP.SET to set main settings
app.set("view engine","ejs");
//APP.USE for middleware elements
app.use(bodyParser.urlencoded({extended: true})); //***
app.use(methodOverride("_method")); //***
app.use(express.static('public')); //the 'static' directory holds CSS files, images, etc.
app.use(session({ //***
secret: 'only for Worldstats',
resave: false,
saveUninitialized: true
}));
app.use('/', function(req,res,next){
req.login = function(user){
req.session.userId = user.id;
req.session.screen_name = user.screen_name;
};
req.currentUser = function() { ///***** WHY DOES THIS HAVE TWO RETURN STATEMENTS
return sql.Player.find({
where: { id: req.session.userId }
}).then(function(user) {
req.user = user;
return user;
})
};
req.logout = function() {
req.session.userId = null;
req.user = null;
};
next(); //move on to next middleware
},
gameStuff); //can only have one '/' level app to use, so put in gameStuff here
var loggedIn = function(req,res,next) {
if (req.session.userId) {
next();
} else {
res.redirect('/');
}
};
//For every page that should be limited when a Player is logged in, apply loggedIn middleware below:
app.use('/profile',loggedIn);
//app.use('/pregame',loggedIn); //FOR SOME REASON THIS MAY BE CAUSING CONFLICT WITH pregame's own authorization
app.use('/question', loggedIn);
app.use('/answer',loggedIn);
app.use('/nextquestion',loggedIn);
//Define the various ROUTES
//Public routes available without login are: 1) welcome page, 2) high scores page, 3) signup page
//ROOT route for welcome page
app.get('/', function(req,res){
console.log("Hello world!");
res.render('index.ejs');
});
//High scores page
app.get('/highscores', function(req,res){
console.log("Hello from highscores route")
sql.Score.findAll({limit: 10, order: '"game_score" DESC', include:[{model:sql.Player}]}).then(function(scoreData){
//console.log(scoreData);
var highScoreArray = [];
for (var i = 0; i < scoreData.length; i++) {
var highScore = scoreData[i].dataValues.game_score;
var highDate = scoreData[i].dataValues.date_played;
var highPlayer = scoreData[i].dataValues.Player.dataValues.screen_name;
highScoreArray.push([highScore,highDate,highPlayer]);
};
console.log("This is highScoreArray")
console.log(highScoreArray);
res.render('highscores.ejs',{ejsScoreArray:highScoreArray});
})
});
//Create new player page
app.get('/players/new', function(req,res){
res.render('signup.ejs');
// res.send("Be a player!");
});
//Once user submits form on create new player page, process info with route below:
app.post('/players', function(req,res){
var newScreenName = req.body.signupScreenName;
var newEmail = req.body.signupEmail;
var newPassword = req.body.signupPassword;
console.log(newScreenName,newEmail,newPassword);
console.log(typeof sql.Player.createSecure);
sql.Player.createSecure(newScreenName,newEmail,newPassword).then(
function(newUser){
res.redirect('/login');
});
});
//Login page
app.get('/login', function(req,res){
res.render('login.ejs');
});
app.post('/login', function(req,res){
var uScreenName = req.body.loginScreenName;
var uPassword = req.body.loginPassword;
console.log(uScreenName,uPassword);
sql.Player.authenticate(uScreenName,uPassword).then(
function(user){
if (user) {
//this means a user was returned by authenticate function, so password valid
req.login(user);
res.redirect('/pregame');
} else {
//this means no user wa`s returned (false was returned), so login credentials invalid
res.render('login.ejs'); //*** Add "login failed" error message to user
}
})
});
//Logout page
app.get('/logout', function(req,res){
req.logout();
res.redirect('/');
});
//Startgame with different levels
app.get('/startgame/:level', function(req, res){
console.log("Hello from startgame");
var level = req.params.level;
console.log("Level");
console.log(level);
req.setupGame(level);
res.redirect('/question');
});
//Private routes that are only available to players after log-in
//Authorization is handled via LoggedIn middleware function
//Profile page -- for edit
// app.get('/players/:id', function(req,res){ //*** STILL TO CONFIRM THIS WORKS AS URL PARAM
// console.log("Hello from profile page")
// console.log(req.currentUser);
// res.send(currentUser);
// })
//Profile page
app.get('/profile', function(req,res){
console.log("Hello from profile route");
var scoreObject = {};
var profileObject = {};
req.currentUser().then(function(foundPlayer){
//console.log("This is found player",foundPlayer);
if (foundPlayer) {
sql.Score.findAll({where: {PlayerId:req.session.userId}, limit: 3, order: '"game_score" DESC'}).then(function(myScores){
scoreObject = myScores;
profileObject = {"score":scoreObject,"player":foundPlayer};
res.render('profile',{ejsProfile:profileObject});
}) //end of myScores function
} else {
res.redirect('/login');
}
})
});
/** Testing the shift of game code to separate file
// and shift of game-consistent variables to req.session.varName
app.get('/teststeven',function(req,res){
req.setupGame();
//console.log()
res.redirect('/isworking');
});
app.get('/isworking', function(req,res){
console.log(req.session.gameScore);
});
**/
//Pregame page
app.get('/pregame', function(req,res){
req.currentUser().then(function(foundPlayer){
if (foundPlayer) { //This if is redundant -- but will leave it to have Player name on pregame page
//req.setupGame(); //initialize what is needed for the game MOVE TO startgame
res.render('pregame',{ejsFoundPlayer:foundPlayer});
} else {
res.redirect('/login');
}
})
});
//Question page
app.get('/question', function(req,res){
var renderIt = function(data){
res.render('question.ejs',{ejsQuestionData:data});
};
req.playBall(renderIt);
});
//Answer page | var answerCountryandValue = req.session.countryAndValueData;
for (var id in req.query) {
playerAnswer.push(req.query[id]);
// console.log("This is the answer value",id, req.query[id]);
};
for (var i = 0; i < answerCountryandValue.length; i++) {
correctAnswer.push(answerCountryandValue[i][0]);
};
console.log("playerAnswer",playerAnswer);
console.log("correctAnswer",correctAnswer);
//Send player answer and real answer to get scored
playerResults = compareAnswers(playerAnswer,answerCountryandValue);
//Use the score results to update score
req.session.gameScore += playerResults.numCorrect;
//Add info to gameSummary which is in req.session
req.session.gameSummary.push([req.session.currentRound,req.session.metricShortName,req.session.gameScore]);
console.log("this is gameSummary")
console.log(req.session.gameSummary);
console.log("playerResults")
console.log(playerResults);
res.render('answer.ejs',{ejsAnswer:playerResults});
});
var compareAnswers = function(playerAnswer,fullAnswer){
//this takes two arrays and compares how many items are the same and provides an object back with:
// {"numCorrect":#,
// "whichWrong":[index #s of wrong answer, wrong answer ];
console.log("Hello from compareAnswers");
var correctScore = 0;
var answerMatrix = [];
for (var i = 0; i < fullAnswer.length; i++) {
if (playerAnswer[i] === fullAnswer[i][0]) {
correctScore++;
answerMatrix.push([fullAnswer[i][0],fullAnswer[i][1],"Correct"]);
} else {
answerMatrix.push([fullAnswer[i][0],fullAnswer[i][1],playerAnswer[i]]);
}
};
var scoreKey = {
"numCorrect":correctScore,
"answerKey":answerMatrix
};
return scoreKey;
};
app.get('/nextquestion', function(req,res){
if (req.session.nextRound >= req.session.maxRounds) {
//nextRound was already incremented in game.js -- so if nextRound is already beyond maxRounds, then game over!
console.log("Start gameover process");
console.log(req.session.gameSummary);
//Assemble object with data to present on gameover page
//FOR NOW JUST PASS FINAL SCORE AS MVP APPROACH ***
var gameFinalStats = {"finalScore":req.session.gameScore}; //NEED TO ADD ADDITIONAL DATA TO OBJECT
console.log("req.session.gameScore is",req.session.gameScore);
console.log("req.session.maxRounds is ",req.session.maxRounds);
console.log("req.session.countriesPerRound is ",req.session.countriesPerRound);
//Increment this player's CUMULATIVE score in Player table
sql.Player.find({where:{id:req.session.userId}}).then(function(whoPlayed){
console.log("Hello from cumulative update")
var lifetime = whoPlayed.cumulative_score;
console.log("Prior lifetime score ",lifetime);
lifetime += req.session.gameScore;
console.log("New lifetime score ",lifetime);
whoPlayed.cumulative_score = lifetime; whoPlayed.save();})
//Post scores to Score table in SQL and then render gameover page
//POSTING DIRECTLY, RATHER THAN CALLING AN INSTANCE METHOD
var now = new Date();
sql.Score.create({
game_score:req.session.gameScore,
rounds_played:req.session.maxRounds,
date_played:now,
PlayerId: req.session.userId,
countries_per_round: req.session.countriesPerRound
}).then(function(){
res.render('gameover.ejs',{ejsGameStats:gameFinalStats});
})
// console.log("About to call addNewScore")
// req.currentUser().then(function(who){
// if (who) {
// who.addNewScore(req.session.gameScore,req.session.maxRounds).then(function(){
// res.render('gameover.ejs',{ejsGameStats:gameFinalStats});
// })
// } else {
// //here if no current user found... should not happen if authorization works right
// res.redirect('/login');
// }
// })
} else { //still playing, so reload question page
res.redirect('/question');
}
});
// //Postgame page
// app.get('/postgame',function(req,res){
// res.render('gameover.ejs',{ejsGameSummary:req.session.gameSummary});
// });
app.get('/startover', function(req,res){
console.log("Hello from startover route");
res.redirect('/pregame');
});
//Start the server listening on port 3000
app.listen(process.env.PORT || 3000, function (){ //This allows app to run either via Heroku *or * locally
console.log("Don't blame me. I'm an interpreter. I'm not supposed to know a power socket from a computer terminal. ");
}); | app.get('/answer', function(req,res){
console.log("Hello from answer page");
var playerAnswer = [];
var correctAnswer = []; | random_line_split |
instruments.rs | //! interfacing with the `instruments` command line tool
use std::fmt::Write;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Output};
use anyhow::{anyhow, Result};
use cargo::core::Workspace;
use semver::Version;
use crate::opt::AppConfig;
/// Holds available templates.
pub struct TemplateCatalog {
standard_templates: Vec<String>,
custom_templates: Vec<String>,
}
/// Represents the Xcode Instrument version detected.
pub enum XcodeInstruments {
XcTrace,
InstrumentsBinary,
}
impl XcodeInstruments {
/// Detects which version of Xcode Instruments is installed and if it can be launched.
pub(crate) fn detect() -> Result<XcodeInstruments> {
let cur_version = get_macos_version()?;
let macos_xctrace_version = Version::parse("10.15.0").unwrap();
if cur_version >= macos_xctrace_version {
// This is the check used by Homebrew,see
// https://github.com/Homebrew/install/blob/a1d820fc8950312c35073700d0ea88a531bc5950/install.sh#L216
let clt_git_filepath = Path::new("/Library/Developer/CommandLineTools/usr/bin/git");
if clt_git_filepath.exists() {
return Ok(XcodeInstruments::XcTrace);
}
} else {
let instruments_app_filepath = Path::new("/usr/bin/instruments");
if instruments_app_filepath.exists() {
return Ok(XcodeInstruments::InstrumentsBinary);
}
}
Err(anyhow!(
"Xcode Instruments is not installed. Please install the Xcode Command Line Tools."
))
}
/// Return a catalog of available Instruments Templates.
///
/// The custom templates only appears if you have custom templates.
pub(crate) fn available_templates(&self) -> Result<TemplateCatalog> {
match self {
XcodeInstruments::XcTrace => parse_xctrace_template_list(),
XcodeInstruments::InstrumentsBinary => parse_instruments_template_list(),
}
}
/// Prepare the Xcode Instruments profiling command
///
/// If the `xctrace` tool is used, the prepared command looks like
///
/// ```sh
/// xcrun xctrace record --template MyTemplate \
/// --time-limit 5000ms \
/// --output path/to/tracefile \
/// --launch \
/// --
/// ```
///
/// If the older `instruments` tool is used, the prepared command looks
/// like
///
/// ```sh
/// instruments -t MyTemplate \
/// -D /path/to/tracefile \
/// -l 5000ms
/// ```
fn profiling_command(
&self,
template_name: &str,
trace_filepath: &Path,
time_limit: Option<usize>,
) -> Result<Command> {
match self {
XcodeInstruments::XcTrace => {
let mut command = Command::new("xcrun");
command.args(["xctrace", "record"]);
command.args(["--template", template_name]);
if let Some(limit_millis) = time_limit {
let limit_millis_str = format!("{}ms", limit_millis);
command.args(["--time-limit", &limit_millis_str]);
}
command.args(["--output", trace_filepath.to_str().unwrap()]);
// redirect stdin & err to the user's terminal
if let Some(tty) = get_tty()? {
command.args(["--target-stdin", &tty, "--target-stdout", &tty]);
}
command.args(["--launch", "--"]);
Ok(command)
}
XcodeInstruments::InstrumentsBinary => {
let mut command = Command::new("instruments");
command.args(["-t", template_name]);
command.arg("-D").arg(trace_filepath);
if let Some(limit) = time_limit {
command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout, .. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if !status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn | () -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line| !line.starts_with('=') && !line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, .. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line| !line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line| !line.starts_with("~/Library/"))
.take_while(|line| !line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if !trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
}
/// Return the template name abbreviation if available.
fn abbrev_name(template_name: &str) -> Option<&str> {
match template_name {
"Time Profiler" => Some("time"),
"Allocations" => Some("alloc"),
"File Activity" => Some("io"),
"System Trace" => Some("sys"),
_ => None,
}
}
/// Profile the target binary at `binary_filepath`, write results at
/// `trace_filepath` and returns its path.
pub(crate) fn profile_target(
target_filepath: &Path,
xctrace_tool: &XcodeInstruments,
app_config: &AppConfig,
workspace: &Workspace,
) -> Result<PathBuf> {
// 1. Get the template name from config
// This borrows a ref to the String in Option<String>. The value can be
// unwrapped because in this version the template was checked earlier to
// be a `Some(x)`.
let template_name = resolve_template_name(app_config.template_name.as_deref().unwrap());
// 2. Compute the trace filepath and create its parent directory
let workspace_root = workspace.root().to_path_buf();
let trace_filepath = prepare_trace_filepath(
target_filepath,
template_name,
app_config,
workspace_root.as_path(),
)?;
// 3. Print current activity `Profiling target/debug/tries`
{
let target_shortpath = target_filepath
.strip_prefix(workspace_root)
.unwrap_or(target_filepath)
.to_string_lossy();
let status_detail = format!("{} with template '{}'", target_shortpath, template_name);
workspace.config().shell().status("Profiling", status_detail)?;
}
let mut command =
xctrace_tool.profiling_command(template_name, &trace_filepath, app_config.time_limit)?;
command.arg(target_filepath);
if !app_config.target_args.is_empty() {
command.args(app_config.target_args.as_slice());
}
let output = command.output()?;
if !output.status.success() {
let stderr =
String::from_utf8(output.stderr).unwrap_or_else(|_| "failed to capture stderr".into());
let stdout =
String::from_utf8(output.stdout).unwrap_or_else(|_| "failed to capture stdout".into());
return Err(anyhow!("instruments errored: {} {}", stderr, stdout));
}
Ok(trace_filepath)
}
/// get the tty of th current terminal session
fn get_tty() -> Result<Option<String>> {
let mut command = Command::new("ps");
command.arg("otty=").arg(std::process::id().to_string());
Ok(String::from_utf8(command.output()?.stdout)?
.split_whitespace()
.next()
.map(|tty| format!("/dev/{}", tty)))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn semvers_can_be_parsed() {
assert_eq!(semver_from_utf8(b"2.3.4").unwrap(), Version::parse("2.3.4").unwrap());
assert_eq!(semver_from_utf8(b"11.1").unwrap(), Version::parse("11.1.0").unwrap());
assert_eq!(semver_from_utf8(b"11").unwrap(), Version::parse("11.0.0").unwrap());
}
}
| parse_xctrace_template_list | identifier_name |
instruments.rs | //! interfacing with the `instruments` command line tool
use std::fmt::Write;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Output};
use anyhow::{anyhow, Result};
use cargo::core::Workspace;
use semver::Version;
use crate::opt::AppConfig;
/// Holds available templates.
pub struct TemplateCatalog {
standard_templates: Vec<String>,
custom_templates: Vec<String>,
}
/// Represents the Xcode Instrument version detected.
pub enum XcodeInstruments {
XcTrace,
InstrumentsBinary,
}
impl XcodeInstruments {
/// Detects which version of Xcode Instruments is installed and if it can be launched.
pub(crate) fn detect() -> Result<XcodeInstruments> {
let cur_version = get_macos_version()?;
let macos_xctrace_version = Version::parse("10.15.0").unwrap();
if cur_version >= macos_xctrace_version {
// This is the check used by Homebrew,see
// https://github.com/Homebrew/install/blob/a1d820fc8950312c35073700d0ea88a531bc5950/install.sh#L216
let clt_git_filepath = Path::new("/Library/Developer/CommandLineTools/usr/bin/git");
if clt_git_filepath.exists() {
return Ok(XcodeInstruments::XcTrace);
}
} else {
let instruments_app_filepath = Path::new("/usr/bin/instruments");
if instruments_app_filepath.exists() {
return Ok(XcodeInstruments::InstrumentsBinary);
}
}
Err(anyhow!(
"Xcode Instruments is not installed. Please install the Xcode Command Line Tools."
))
}
/// Return a catalog of available Instruments Templates.
///
/// The custom templates only appears if you have custom templates.
pub(crate) fn available_templates(&self) -> Result<TemplateCatalog> {
match self {
XcodeInstruments::XcTrace => parse_xctrace_template_list(),
XcodeInstruments::InstrumentsBinary => parse_instruments_template_list(),
}
}
/// Prepare the Xcode Instruments profiling command
///
/// If the `xctrace` tool is used, the prepared command looks like
///
/// ```sh
/// xcrun xctrace record --template MyTemplate \
/// --time-limit 5000ms \
/// --output path/to/tracefile \
/// --launch \
/// --
/// ```
///
/// If the older `instruments` tool is used, the prepared command looks
/// like
///
/// ```sh
/// instruments -t MyTemplate \
/// -D /path/to/tracefile \
/// -l 5000ms
/// ```
fn profiling_command(
&self,
template_name: &str,
trace_filepath: &Path,
time_limit: Option<usize>,
) -> Result<Command> {
match self {
XcodeInstruments::XcTrace => {
let mut command = Command::new("xcrun");
command.args(["xctrace", "record"]);
command.args(["--template", template_name]);
if let Some(limit_millis) = time_limit {
let limit_millis_str = format!("{}ms", limit_millis);
command.args(["--time-limit", &limit_millis_str]);
}
command.args(["--output", trace_filepath.to_str().unwrap()]);
// redirect stdin & err to the user's terminal
if let Some(tty) = get_tty()? {
command.args(["--target-stdin", &tty, "--target-stdout", &tty]);
}
command.args(["--launch", "--"]);
Ok(command)
}
XcodeInstruments::InstrumentsBinary => {
let mut command = Command::new("instruments");
command.args(["-t", template_name]);
command.arg("-D").arg(trace_filepath);
if let Some(limit) = time_limit {
command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout, .. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if !status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn parse_xctrace_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line| !line.starts_with('=') && !line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, .. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line| !line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line| !line.starts_with("~/Library/"))
.take_while(|line| !line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if !trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
}
/// Return the template name abbreviation if available.
fn abbrev_name(template_name: &str) -> Option<&str> {
match template_name {
"Time Profiler" => Some("time"),
"Allocations" => Some("alloc"),
"File Activity" => Some("io"),
"System Trace" => Some("sys"),
_ => None,
}
}
/// Profile the target binary at `binary_filepath`, write results at
/// `trace_filepath` and returns its path.
pub(crate) fn profile_target(
target_filepath: &Path,
xctrace_tool: &XcodeInstruments,
app_config: &AppConfig,
workspace: &Workspace,
) -> Result<PathBuf> {
// 1. Get the template name from config
// This borrows a ref to the String in Option<String>. The value can be
// unwrapped because in this version the template was checked earlier to
// be a `Some(x)`.
let template_name = resolve_template_name(app_config.template_name.as_deref().unwrap());
// 2. Compute the trace filepath and create its parent directory
let workspace_root = workspace.root().to_path_buf();
let trace_filepath = prepare_trace_filepath(
target_filepath,
template_name,
app_config,
workspace_root.as_path(),
)?;
// 3. Print current activity `Profiling target/debug/tries`
{
let target_shortpath = target_filepath
.strip_prefix(workspace_root)
.unwrap_or(target_filepath)
.to_string_lossy();
let status_detail = format!("{} with template '{}'", target_shortpath, template_name);
workspace.config().shell().status("Profiling", status_detail)?;
}
let mut command =
xctrace_tool.profiling_command(template_name, &trace_filepath, app_config.time_limit)?;
command.arg(target_filepath);
if !app_config.target_args.is_empty() {
command.args(app_config.target_args.as_slice());
}
let output = command.output()?;
if !output.status.success() |
Ok(trace_filepath)
}
/// get the tty of th current terminal session
fn get_tty() -> Result<Option<String>> {
let mut command = Command::new("ps");
command.arg("otty=").arg(std::process::id().to_string());
Ok(String::from_utf8(command.output()?.stdout)?
.split_whitespace()
.next()
.map(|tty| format!("/dev/{}", tty)))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn semvers_can_be_parsed() {
assert_eq!(semver_from_utf8(b"2.3.4").unwrap(), Version::parse("2.3.4").unwrap());
assert_eq!(semver_from_utf8(b"11.1").unwrap(), Version::parse("11.1.0").unwrap());
assert_eq!(semver_from_utf8(b"11").unwrap(), Version::parse("11.0.0").unwrap());
}
}
| {
let stderr =
String::from_utf8(output.stderr).unwrap_or_else(|_| "failed to capture stderr".into());
let stdout =
String::from_utf8(output.stdout).unwrap_or_else(|_| "failed to capture stdout".into());
return Err(anyhow!("instruments errored: {} {}", stderr, stdout));
} | conditional_block |
instruments.rs | //! interfacing with the `instruments` command line tool
use std::fmt::Write;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Output};
use anyhow::{anyhow, Result};
use cargo::core::Workspace;
use semver::Version;
use crate::opt::AppConfig;
/// Holds available templates.
pub struct TemplateCatalog {
standard_templates: Vec<String>,
custom_templates: Vec<String>,
}
/// Represents the Xcode Instrument version detected.
pub enum XcodeInstruments {
XcTrace,
InstrumentsBinary,
}
impl XcodeInstruments {
/// Detects which version of Xcode Instruments is installed and if it can be launched.
pub(crate) fn detect() -> Result<XcodeInstruments> {
let cur_version = get_macos_version()?;
let macos_xctrace_version = Version::parse("10.15.0").unwrap();
if cur_version >= macos_xctrace_version {
// This is the check used by Homebrew,see
// https://github.com/Homebrew/install/blob/a1d820fc8950312c35073700d0ea88a531bc5950/install.sh#L216
let clt_git_filepath = Path::new("/Library/Developer/CommandLineTools/usr/bin/git");
if clt_git_filepath.exists() {
return Ok(XcodeInstruments::XcTrace);
}
} else {
let instruments_app_filepath = Path::new("/usr/bin/instruments");
if instruments_app_filepath.exists() {
return Ok(XcodeInstruments::InstrumentsBinary);
}
}
Err(anyhow!(
"Xcode Instruments is not installed. Please install the Xcode Command Line Tools."
))
}
/// Return a catalog of available Instruments Templates.
///
/// The custom templates only appears if you have custom templates.
pub(crate) fn available_templates(&self) -> Result<TemplateCatalog> {
match self {
XcodeInstruments::XcTrace => parse_xctrace_template_list(),
XcodeInstruments::InstrumentsBinary => parse_instruments_template_list(),
}
}
/// Prepare the Xcode Instruments profiling command
///
/// If the `xctrace` tool is used, the prepared command looks like
///
/// ```sh
/// xcrun xctrace record --template MyTemplate \
/// --time-limit 5000ms \
/// --output path/to/tracefile \
/// --launch \
/// --
/// ```
///
/// If the older `instruments` tool is used, the prepared command looks
/// like
///
/// ```sh
/// instruments -t MyTemplate \
/// -D /path/to/tracefile \
/// -l 5000ms
/// ```
fn profiling_command(
&self,
template_name: &str,
trace_filepath: &Path,
time_limit: Option<usize>,
) -> Result<Command> {
match self {
XcodeInstruments::XcTrace => {
let mut command = Command::new("xcrun");
command.args(["xctrace", "record"]);
command.args(["--template", template_name]);
if let Some(limit_millis) = time_limit {
let limit_millis_str = format!("{}ms", limit_millis);
command.args(["--time-limit", &limit_millis_str]);
}
command.args(["--output", trace_filepath.to_str().unwrap()]);
// redirect stdin & err to the user's terminal
if let Some(tty) = get_tty()? {
command.args(["--target-stdin", &tty, "--target-stdout", &tty]);
}
command.args(["--launch", "--"]);
Ok(command)
}
XcodeInstruments::InstrumentsBinary => {
let mut command = Command::new("instruments");
command.args(["-t", template_name]);
command.arg("-D").arg(trace_filepath);
if let Some(limit) = time_limit {
command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout, .. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if !status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn parse_xctrace_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line| !line.starts_with('=') && !line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, .. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line| !line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line| !line.starts_with("~/Library/"))
.take_while(|line| !line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if !trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str |
/// Return the template name abbreviation if available.
fn abbrev_name(template_name: &str) -> Option<&str> {
match template_name {
"Time Profiler" => Some("time"),
"Allocations" => Some("alloc"),
"File Activity" => Some("io"),
"System Trace" => Some("sys"),
_ => None,
}
}
/// Profile the target binary at `binary_filepath`, write results at
/// `trace_filepath` and returns its path.
pub(crate) fn profile_target(
target_filepath: &Path,
xctrace_tool: &XcodeInstruments,
app_config: &AppConfig,
workspace: &Workspace,
) -> Result<PathBuf> {
// 1. Get the template name from config
// This borrows a ref to the String in Option<String>. The value can be
// unwrapped because in this version the template was checked earlier to
// be a `Some(x)`.
let template_name = resolve_template_name(app_config.template_name.as_deref().unwrap());
// 2. Compute the trace filepath and create its parent directory
let workspace_root = workspace.root().to_path_buf();
let trace_filepath = prepare_trace_filepath(
target_filepath,
template_name,
app_config,
workspace_root.as_path(),
)?;
// 3. Print current activity `Profiling target/debug/tries`
{
let target_shortpath = target_filepath
.strip_prefix(workspace_root)
.unwrap_or(target_filepath)
.to_string_lossy();
let status_detail = format!("{} with template '{}'", target_shortpath, template_name);
workspace.config().shell().status("Profiling", status_detail)?;
}
let mut command =
xctrace_tool.profiling_command(template_name, &trace_filepath, app_config.time_limit)?;
command.arg(target_filepath);
if !app_config.target_args.is_empty() {
command.args(app_config.target_args.as_slice());
}
let output = command.output()?;
if !output.status.success() {
let stderr =
String::from_utf8(output.stderr).unwrap_or_else(|_| "failed to capture stderr".into());
let stdout =
String::from_utf8(output.stdout).unwrap_or_else(|_| "failed to capture stdout".into());
return Err(anyhow!("instruments errored: {} {}", stderr, stdout));
}
Ok(trace_filepath)
}
/// get the tty of th current terminal session
fn get_tty() -> Result<Option<String>> {
let mut command = Command::new("ps");
command.arg("otty=").arg(std::process::id().to_string());
Ok(String::from_utf8(command.output()?.stdout)?
.split_whitespace()
.next()
.map(|tty| format!("/dev/{}", tty)))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn semvers_can_be_parsed() {
assert_eq!(semver_from_utf8(b"2.3.4").unwrap(), Version::parse("2.3.4").unwrap());
assert_eq!(semver_from_utf8(b"11.1").unwrap(), Version::parse("11.1.0").unwrap());
assert_eq!(semver_from_utf8(b"11").unwrap(), Version::parse("11.0.0").unwrap());
}
}
| {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
} | identifier_body |
instruments.rs | //! interfacing with the `instruments` command line tool
use std::fmt::Write;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Output};
use anyhow::{anyhow, Result};
use cargo::core::Workspace;
use semver::Version;
use crate::opt::AppConfig;
/// Holds available templates.
pub struct TemplateCatalog {
standard_templates: Vec<String>,
custom_templates: Vec<String>,
}
/// Represents the Xcode Instrument version detected.
pub enum XcodeInstruments {
XcTrace,
InstrumentsBinary,
}
impl XcodeInstruments {
/// Detects which version of Xcode Instruments is installed and if it can be launched.
pub(crate) fn detect() -> Result<XcodeInstruments> {
let cur_version = get_macos_version()?;
let macos_xctrace_version = Version::parse("10.15.0").unwrap();
if cur_version >= macos_xctrace_version {
// This is the check used by Homebrew,see
// https://github.com/Homebrew/install/blob/a1d820fc8950312c35073700d0ea88a531bc5950/install.sh#L216
let clt_git_filepath = Path::new("/Library/Developer/CommandLineTools/usr/bin/git");
if clt_git_filepath.exists() {
return Ok(XcodeInstruments::XcTrace);
}
} else {
let instruments_app_filepath = Path::new("/usr/bin/instruments");
if instruments_app_filepath.exists() {
return Ok(XcodeInstruments::InstrumentsBinary);
}
}
Err(anyhow!(
"Xcode Instruments is not installed. Please install the Xcode Command Line Tools."
))
}
/// Return a catalog of available Instruments Templates.
///
/// The custom templates only appears if you have custom templates.
pub(crate) fn available_templates(&self) -> Result<TemplateCatalog> {
match self {
XcodeInstruments::XcTrace => parse_xctrace_template_list(),
XcodeInstruments::InstrumentsBinary => parse_instruments_template_list(),
}
}
/// Prepare the Xcode Instruments profiling command
///
/// If the `xctrace` tool is used, the prepared command looks like
///
/// ```sh
/// xcrun xctrace record --template MyTemplate \
/// --time-limit 5000ms \
/// --output path/to/tracefile \
/// --launch \
/// --
/// ```
///
/// If the older `instruments` tool is used, the prepared command looks
/// like
///
/// ```sh
/// instruments -t MyTemplate \
/// -D /path/to/tracefile \
/// -l 5000ms
/// ```
fn profiling_command(
&self,
template_name: &str,
trace_filepath: &Path,
time_limit: Option<usize>,
) -> Result<Command> {
match self {
XcodeInstruments::XcTrace => {
let mut command = Command::new("xcrun");
command.args(["xctrace", "record"]);
command.args(["--template", template_name]);
if let Some(limit_millis) = time_limit {
let limit_millis_str = format!("{}ms", limit_millis);
command.args(["--time-limit", &limit_millis_str]);
}
command.args(["--output", trace_filepath.to_str().unwrap()]);
// redirect stdin & err to the user's terminal
if let Some(tty) = get_tty()? {
command.args(["--target-stdin", &tty, "--target-stdout", &tty]);
}
command.args(["--launch", "--"]);
Ok(command)
}
XcodeInstruments::InstrumentsBinary => {
let mut command = Command::new("instruments");
command.args(["-t", template_name]);
command.arg("-D").arg(trace_filepath);
if let Some(limit) = time_limit {
command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout, .. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if !status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn parse_xctrace_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line| !line.starts_with('=') && !line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, .. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line| !line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line| !line.starts_with("~/Library/"))
.take_while(|line| !line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if !trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
}
| "Time Profiler" => Some("time"),
"Allocations" => Some("alloc"),
"File Activity" => Some("io"),
"System Trace" => Some("sys"),
_ => None,
}
}
/// Profile the target binary at `binary_filepath`, write results at
/// `trace_filepath` and returns its path.
pub(crate) fn profile_target(
target_filepath: &Path,
xctrace_tool: &XcodeInstruments,
app_config: &AppConfig,
workspace: &Workspace,
) -> Result<PathBuf> {
// 1. Get the template name from config
// This borrows a ref to the String in Option<String>. The value can be
// unwrapped because in this version the template was checked earlier to
// be a `Some(x)`.
let template_name = resolve_template_name(app_config.template_name.as_deref().unwrap());
// 2. Compute the trace filepath and create its parent directory
let workspace_root = workspace.root().to_path_buf();
let trace_filepath = prepare_trace_filepath(
target_filepath,
template_name,
app_config,
workspace_root.as_path(),
)?;
// 3. Print current activity `Profiling target/debug/tries`
{
let target_shortpath = target_filepath
.strip_prefix(workspace_root)
.unwrap_or(target_filepath)
.to_string_lossy();
let status_detail = format!("{} with template '{}'", target_shortpath, template_name);
workspace.config().shell().status("Profiling", status_detail)?;
}
let mut command =
xctrace_tool.profiling_command(template_name, &trace_filepath, app_config.time_limit)?;
command.arg(target_filepath);
if !app_config.target_args.is_empty() {
command.args(app_config.target_args.as_slice());
}
let output = command.output()?;
if !output.status.success() {
let stderr =
String::from_utf8(output.stderr).unwrap_or_else(|_| "failed to capture stderr".into());
let stdout =
String::from_utf8(output.stdout).unwrap_or_else(|_| "failed to capture stdout".into());
return Err(anyhow!("instruments errored: {} {}", stderr, stdout));
}
Ok(trace_filepath)
}
/// get the tty of th current terminal session
fn get_tty() -> Result<Option<String>> {
let mut command = Command::new("ps");
command.arg("otty=").arg(std::process::id().to_string());
Ok(String::from_utf8(command.output()?.stdout)?
.split_whitespace()
.next()
.map(|tty| format!("/dev/{}", tty)))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn semvers_can_be_parsed() {
assert_eq!(semver_from_utf8(b"2.3.4").unwrap(), Version::parse("2.3.4").unwrap());
assert_eq!(semver_from_utf8(b"11.1").unwrap(), Version::parse("11.1.0").unwrap());
assert_eq!(semver_from_utf8(b"11").unwrap(), Version::parse("11.0.0").unwrap());
}
} | /// Return the template name abbreviation if available.
fn abbrev_name(template_name: &str) -> Option<&str> {
match template_name { | random_line_split |
a_fullscreen_wm.rs | //! Fullscreen Window Manager
//!
//! Implement the [`WindowManager`] trait by writing a simple window manager
//! that displays every window fullscreen. When a new window is added, the
//! last window that was visible will become invisible.
//!
//! [`WindowManager`]: ../../cplwm_api/wm/trait.WindowManager.html
//!
//! Now have a look at the source code of this file, it contains a tutorial to
//! help you write the fullscreen window manager.
//!
//! You are free to remove the documentation in this file that is only part of
//! the tutorial or no longer matches the code after your changes.
//!
//! # Status
//!
//! **TODO**: Replace the question mark below with YES, NO, or PARTIAL to
//! indicate the status of this assignment. If you want to tell something
//! about this assignment to the grader, e.g., you have a bug you can't fix,
//! or you want to explain your approach, write it down after the comments
//! section.
//!
//! COMPLETED: YES
//!
//! COMMENTS: Because at the start of the assignment my Rust skills
//! were poor, the implementation could be better. If I knew then what I know
//! now I would do it totally different. I would make 2 vec's and a focus variable,
//! all of type WindowWithInfo , 1 vec for the windows above the focus variable
//! and one vec for the windows under the focus variable (maybe in reverse order).
//!
//! I used a different file for the tests.
// Because not all methods are implemented yet, some arguments are unused,
// which generates warnings. The annotation below disables this warning.
// Remove this annotation when you have implemented all methods, so you get
// warned about variables that you did not use by mistake.
// We import std::error and std::format so we can say error::Error instead of
// std::error::Error, etc.
use std::error;
use std::fmt;
use std::collections::VecDeque;
// Import some types and the WindowManager trait from the cplwm_api crate
// (defined in the api folder).
use cplwm_api::types::{FloatOrTile, PrevOrNext, Screen, Window, WindowLayout, WindowWithInfo};
use cplwm_api::wm::WindowManager;
/// You are free to choose the name for your window manager. As we will use
/// automated tests when grading your assignment, indicate here the name of
/// your window manager data type so we can just use `WMName` instead of
/// having to manually figure out your window manager name.
pub type WMName = FullscreenWM;
/// The FullscreenWM struct
///
/// The first thing to do when writing a window manager, is to define a struct
/// (or enum) that will contain the state of the window manager, e.g. the
/// managed windows along with their geometries, the focused window, etc.
///
/// Depending on the layout and the functionality the window manager provides,
/// this can vary from simple `Vec`s to trees, hashmaps, etc. You can have a
/// look at the [collections](https://doc.rust-lang.org/std/collections/) Rust
/// provides.
///
/// Remember that you are free to add additional dependencies to your project,
/// e.g., for another type of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]: ../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>,
}
/// The errors that this window manager can return.
///
/// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]: ../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn description(&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if !self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
// The last window is focused ...
focused_window: self.get_focused_window(),
// ... and should fill the screen. The other windows are
// simply hidden.
windows: vec![(*w, fullscreen_geometry)],
}
}
// Otherwise, return an empty WindowLayout
None => WindowLayout::new(),
}
}
// Before you continue any further, first have a look at the bottom of
// this file, where we show you how to write unit tests.
/// Try this yourself
///
/// Don't forget that when the argument is `None`, i.e. no window should
/// be focused, `get_focused_window()` must return `None` afterwards. The
/// `focused_window` field of the `WindowLayout` must also be `None`.
///
/// You will probably have to change the code above (method
/// implementations as well as the `FullscreenWM` struct) to achieve this.
fn focus_window(&mut self, window: Option<Window>) -> Result<(), Self::Error> {
// self.focused_window = window;
match window {
Some(i_window) => {
match self.windows.iter().position(|w| *w == i_window) {
None => Err(FullscreenWMError::UnknownWindow(i_window)),
Some(i) => {
// Set window to front
self.windows.remove(i);
self.windows.push_back(i_window);
self.focused_window = Some(i_window);
Ok(())
}
}
}
None => {
self.focused_window = None;
Ok(())
}
}
}
/// Try this yourself
fn cycle_focus(&mut self, dir: PrevOrNext) {
// You will probably notice here that a `Vec` is not the ideal data
// structure to implement this function. Feel free to replace the
// `Vec` with another data structure.
// Do nothing when there are no windows.
if self.windows.is_empty() {
return ();
}
// if self.get_focused_window() == None {
//
match dir {
PrevOrNext::Prev => {
let last_window = self.windows.pop_back().unwrap();
self.windows.push_front(last_window);
}
PrevOrNext::Next => {
let first_window = self.windows.pop_front().unwrap();
self.windows.push_back(first_window);
}
}
// When there is only one window,
// focus it if currently no window is focused, otherwise do nothing.
// When no window is focused, any window may become focused.
let window = self.windows.back().map(|w| *w);
match self.focus_window(window) {
Ok(_) => {}
Err(e) => println!("Error focus_window {}", e),
}
return ();
}
/// Try this yourself
// It should reflect the current state (location/size, floating or tiled,
// fullscreen or not) of the window.
fn get_window_info(&self, window: Window) -> Result<WindowWithInfo, Self::Error> {
let fullscreen_geometry = self.screen.to_geometry();
if self.is_managed(window) {
Ok(WindowWithInfo {
window: window,
geometry: fullscreen_geometry,
float_or_tile: FloatOrTile::Tile,
fullscreen: true,
})
} else |
}
/// Try this yourself
fn get_screen(&self) -> Screen {
self.screen
}
/// Try this yourself
fn resize_screen(&mut self, screen: Screen) {
self.screen = screen
}
}
#[cfg(test)]
mod a_fullscreen_wm_tests {
include!("a_fullscreen_wm_tests.rs");
}
| {
Err(FullscreenWMError::UnknownWindow(window))
} | conditional_block |
a_fullscreen_wm.rs | //! Fullscreen Window Manager
//!
//! Implement the [`WindowManager`] trait by writing a simple window manager
//! that displays every window fullscreen. When a new window is added, the
//! last window that was visible will become invisible.
//!
//! [`WindowManager`]: ../../cplwm_api/wm/trait.WindowManager.html
//!
//! Now have a look at the source code of this file, it contains a tutorial to
//! help you write the fullscreen window manager.
//!
//! You are free to remove the documentation in this file that is only part of
//! the tutorial or no longer matches the code after your changes.
//!
//! # Status
//!
//! **TODO**: Replace the question mark below with YES, NO, or PARTIAL to
//! indicate the status of this assignment. If you want to tell something
//! about this assignment to the grader, e.g., you have a bug you can't fix,
//! or you want to explain your approach, write it down after the comments
//! section.
//!
//! COMPLETED: YES
//!
//! COMMENTS: Because at the start of the assignment my Rust skills
//! were poor, the implementation could be better. If I knew then what I know
//! now I would do it totally different. I would make 2 vec's and a focus variable,
//! all of type WindowWithInfo , 1 vec for the windows above the focus variable
//! and one vec for the windows under the focus variable (maybe in reverse order).
//!
//! I used a different file for the tests.
// Because not all methods are implemented yet, some arguments are unused,
// which generates warnings. The annotation below disables this warning.
// Remove this annotation when you have implemented all methods, so you get
// warned about variables that you did not use by mistake.
// We import std::error and std::format so we can say error::Error instead of
// std::error::Error, etc.
use std::error;
use std::fmt;
use std::collections::VecDeque;
// Import some types and the WindowManager trait from the cplwm_api crate
// (defined in the api folder).
use cplwm_api::types::{FloatOrTile, PrevOrNext, Screen, Window, WindowLayout, WindowWithInfo};
use cplwm_api::wm::WindowManager;
/// You are free to choose the name for your window manager. As we will use
/// automated tests when grading your assignment, indicate here the name of
/// your window manager data type so we can just use `WMName` instead of
/// having to manually figure out your window manager name.
pub type WMName = FullscreenWM;
/// The FullscreenWM struct
///
/// The first thing to do when writing a window manager, is to define a struct
/// (or enum) that will contain the state of the window manager, e.g. the
/// managed windows along with their geometries, the focused window, etc.
///
/// Depending on the layout and the functionality the window manager provides,
/// this can vary from simple `Vec`s to trees, hashmaps, etc. You can have a
/// look at the [collections](https://doc.rust-lang.org/std/collections/) Rust
/// provides.
///
/// Remember that you are free to add additional dependencies to your project,
/// e.g., for another type of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]: ../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>,
}
/// The errors that this window manager can return.
///
/// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]: ../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn description(&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if !self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
// The last window is focused ...
focused_window: self.get_focused_window(),
// ... and should fill the screen. The other windows are
// simply hidden.
windows: vec![(*w, fullscreen_geometry)],
}
}
// Otherwise, return an empty WindowLayout
None => WindowLayout::new(),
}
}
// Before you continue any further, first have a look at the bottom of
// this file, where we show you how to write unit tests.
/// Try this yourself
///
/// Don't forget that when the argument is `None`, i.e. no window should
/// be focused, `get_focused_window()` must return `None` afterwards. The
/// `focused_window` field of the `WindowLayout` must also be `None`.
///
/// You will probably have to change the code above (method
/// implementations as well as the `FullscreenWM` struct) to achieve this.
fn focus_window(&mut self, window: Option<Window>) -> Result<(), Self::Error> |
/// Try this yourself
fn cycle_focus(&mut self, dir: PrevOrNext) {
// You will probably notice here that a `Vec` is not the ideal data
// structure to implement this function. Feel free to replace the
// `Vec` with another data structure.
// Do nothing when there are no windows.
if self.windows.is_empty() {
return ();
}
// if self.get_focused_window() == None {
//
match dir {
PrevOrNext::Prev => {
let last_window = self.windows.pop_back().unwrap();
self.windows.push_front(last_window);
}
PrevOrNext::Next => {
let first_window = self.windows.pop_front().unwrap();
self.windows.push_back(first_window);
}
}
// When there is only one window,
// focus it if currently no window is focused, otherwise do nothing.
// When no window is focused, any window may become focused.
let window = self.windows.back().map(|w| *w);
match self.focus_window(window) {
Ok(_) => {}
Err(e) => println!("Error focus_window {}", e),
}
return ();
}
/// Try this yourself
// It should reflect the current state (location/size, floating or tiled,
// fullscreen or not) of the window.
fn get_window_info(&self, window: Window) -> Result<WindowWithInfo, Self::Error> {
let fullscreen_geometry = self.screen.to_geometry();
if self.is_managed(window) {
Ok(WindowWithInfo {
window: window,
geometry: fullscreen_geometry,
float_or_tile: FloatOrTile::Tile,
fullscreen: true,
})
} else {
Err(FullscreenWMError::UnknownWindow(window))
}
}
/// Try this yourself
fn get_screen(&self) -> Screen {
self.screen
}
/// Try this yourself
fn resize_screen(&mut self, screen: Screen) {
self.screen = screen
}
}
#[cfg(test)]
mod a_fullscreen_wm_tests {
include!("a_fullscreen_wm_tests.rs");
}
| {
// self.focused_window = window;
match window {
Some(i_window) => {
match self.windows.iter().position(|w| *w == i_window) {
None => Err(FullscreenWMError::UnknownWindow(i_window)),
Some(i) => {
// Set window to front
self.windows.remove(i);
self.windows.push_back(i_window);
self.focused_window = Some(i_window);
Ok(())
}
}
}
None => {
self.focused_window = None;
Ok(())
}
}
} | identifier_body |
a_fullscreen_wm.rs | //! Fullscreen Window Manager
//!
//! Implement the [`WindowManager`] trait by writing a simple window manager
//! that displays every window fullscreen. When a new window is added, the
//! last window that was visible will become invisible.
//!
//! [`WindowManager`]: ../../cplwm_api/wm/trait.WindowManager.html
//!
//! Now have a look at the source code of this file, it contains a tutorial to
//! help you write the fullscreen window manager.
//!
//! You are free to remove the documentation in this file that is only part of
//! the tutorial or no longer matches the code after your changes.
//!
//! # Status
//!
//! **TODO**: Replace the question mark below with YES, NO, or PARTIAL to
//! indicate the status of this assignment. If you want to tell something
//! about this assignment to the grader, e.g., you have a bug you can't fix,
//! or you want to explain your approach, write it down after the comments
//! section.
//!
//! COMPLETED: YES
//!
//! COMMENTS: Because at the start of the assignment my Rust skills
//! were poor, the implementation could be better. If I knew then what I know
//! now I would do it totally different. I would make 2 vec's and a focus variable,
//! all of type WindowWithInfo , 1 vec for the windows above the focus variable
//! and one vec for the windows under the focus variable (maybe in reverse order).
//!
//! I used a different file for the tests.
// Because not all methods are implemented yet, some arguments are unused,
// which generates warnings. The annotation below disables this warning.
// Remove this annotation when you have implemented all methods, so you get
// warned about variables that you did not use by mistake.
// We import std::error and std::format so we can say error::Error instead of
// std::error::Error, etc.
use std::error;
use std::fmt;
use std::collections::VecDeque;
// Import some types and the WindowManager trait from the cplwm_api crate
// (defined in the api folder).
use cplwm_api::types::{FloatOrTile, PrevOrNext, Screen, Window, WindowLayout, WindowWithInfo};
use cplwm_api::wm::WindowManager;
/// You are free to choose the name for your window manager. As we will use
/// automated tests when grading your assignment, indicate here the name of
/// your window manager data type so we can just use `WMName` instead of
/// having to manually figure out your window manager name.
pub type WMName = FullscreenWM;
/// The FullscreenWM struct
///
/// The first thing to do when writing a window manager, is to define a struct
/// (or enum) that will contain the state of the window manager, e.g. the
/// managed windows along with their geometries, the focused window, etc.
///
/// Depending on the layout and the functionality the window manager provides,
/// this can vary from simple `Vec`s to trees, hashmaps, etc. You can have a
/// look at the [collections](https://doc.rust-lang.org/std/collections/) Rust
/// provides.
///
/// Remember that you are free to add additional dependencies to your project,
/// e.g., for another type of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]: ../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>, | /// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]: ../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn description(&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if !self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
// The last window is focused ...
focused_window: self.get_focused_window(),
// ... and should fill the screen. The other windows are
// simply hidden.
windows: vec![(*w, fullscreen_geometry)],
}
}
// Otherwise, return an empty WindowLayout
None => WindowLayout::new(),
}
}
// Before you continue any further, first have a look at the bottom of
// this file, where we show you how to write unit tests.
/// Try this yourself
///
/// Don't forget that when the argument is `None`, i.e. no window should
/// be focused, `get_focused_window()` must return `None` afterwards. The
/// `focused_window` field of the `WindowLayout` must also be `None`.
///
/// You will probably have to change the code above (method
/// implementations as well as the `FullscreenWM` struct) to achieve this.
fn focus_window(&mut self, window: Option<Window>) -> Result<(), Self::Error> {
// self.focused_window = window;
match window {
Some(i_window) => {
match self.windows.iter().position(|w| *w == i_window) {
None => Err(FullscreenWMError::UnknownWindow(i_window)),
Some(i) => {
// Set window to front
self.windows.remove(i);
self.windows.push_back(i_window);
self.focused_window = Some(i_window);
Ok(())
}
}
}
None => {
self.focused_window = None;
Ok(())
}
}
}
/// Try this yourself
fn cycle_focus(&mut self, dir: PrevOrNext) {
// You will probably notice here that a `Vec` is not the ideal data
// structure to implement this function. Feel free to replace the
// `Vec` with another data structure.
// Do nothing when there are no windows.
if self.windows.is_empty() {
return ();
}
// if self.get_focused_window() == None {
//
match dir {
PrevOrNext::Prev => {
let last_window = self.windows.pop_back().unwrap();
self.windows.push_front(last_window);
}
PrevOrNext::Next => {
let first_window = self.windows.pop_front().unwrap();
self.windows.push_back(first_window);
}
}
// When there is only one window,
// focus it if currently no window is focused, otherwise do nothing.
// When no window is focused, any window may become focused.
let window = self.windows.back().map(|w| *w);
match self.focus_window(window) {
Ok(_) => {}
Err(e) => println!("Error focus_window {}", e),
}
return ();
}
/// Try this yourself
// It should reflect the current state (location/size, floating or tiled,
// fullscreen or not) of the window.
fn get_window_info(&self, window: Window) -> Result<WindowWithInfo, Self::Error> {
let fullscreen_geometry = self.screen.to_geometry();
if self.is_managed(window) {
Ok(WindowWithInfo {
window: window,
geometry: fullscreen_geometry,
float_or_tile: FloatOrTile::Tile,
fullscreen: true,
})
} else {
Err(FullscreenWMError::UnknownWindow(window))
}
}
/// Try this yourself
fn get_screen(&self) -> Screen {
self.screen
}
/// Try this yourself
fn resize_screen(&mut self, screen: Screen) {
self.screen = screen
}
}
#[cfg(test)]
mod a_fullscreen_wm_tests {
include!("a_fullscreen_wm_tests.rs");
} | }
/// The errors that this window manager can return.
/// | random_line_split |
a_fullscreen_wm.rs | //! Fullscreen Window Manager
//!
//! Implement the [`WindowManager`] trait by writing a simple window manager
//! that displays every window fullscreen. When a new window is added, the
//! last window that was visible will become invisible.
//!
//! [`WindowManager`]: ../../cplwm_api/wm/trait.WindowManager.html
//!
//! Now have a look at the source code of this file, it contains a tutorial to
//! help you write the fullscreen window manager.
//!
//! You are free to remove the documentation in this file that is only part of
//! the tutorial or no longer matches the code after your changes.
//!
//! # Status
//!
//! **TODO**: Replace the question mark below with YES, NO, or PARTIAL to
//! indicate the status of this assignment. If you want to tell something
//! about this assignment to the grader, e.g., you have a bug you can't fix,
//! or you want to explain your approach, write it down after the comments
//! section.
//!
//! COMPLETED: YES
//!
//! COMMENTS: Because at the start of the assignment my Rust skills
//! were poor, the implementation could be better. If I knew then what I know
//! now I would do it totally different. I would make 2 vec's and a focus variable,
//! all of type WindowWithInfo , 1 vec for the windows above the focus variable
//! and one vec for the windows under the focus variable (maybe in reverse order).
//!
//! I used a different file for the tests.
// Because not all methods are implemented yet, some arguments are unused,
// which generates warnings. The annotation below disables this warning.
// Remove this annotation when you have implemented all methods, so you get
// warned about variables that you did not use by mistake.
// We import std::error and std::format so we can say error::Error instead of
// std::error::Error, etc.
use std::error;
use std::fmt;
use std::collections::VecDeque;
// Import some types and the WindowManager trait from the cplwm_api crate
// (defined in the api folder).
use cplwm_api::types::{FloatOrTile, PrevOrNext, Screen, Window, WindowLayout, WindowWithInfo};
use cplwm_api::wm::WindowManager;
/// You are free to choose the name for your window manager. As we will use
/// automated tests when grading your assignment, indicate here the name of
/// your window manager data type so we can just use `WMName` instead of
/// having to manually figure out your window manager name.
pub type WMName = FullscreenWM;
/// The FullscreenWM struct
///
/// The first thing to do when writing a window manager, is to define a struct
/// (or enum) that will contain the state of the window manager, e.g. the
/// managed windows along with their geometries, the focused window, etc.
///
/// Depending on the layout and the functionality the window manager provides,
/// this can vary from simple `Vec`s to trees, hashmaps, etc. You can have a
/// look at the [collections](https://doc.rust-lang.org/std/collections/) Rust
/// provides.
///
/// Remember that you are free to add additional dependencies to your project,
/// e.g., for another type of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]: ../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>,
}
/// The errors that this window manager can return.
///
/// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]: ../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn | (&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if !self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
// The last window is focused ...
focused_window: self.get_focused_window(),
// ... and should fill the screen. The other windows are
// simply hidden.
windows: vec![(*w, fullscreen_geometry)],
}
}
// Otherwise, return an empty WindowLayout
None => WindowLayout::new(),
}
}
// Before you continue any further, first have a look at the bottom of
// this file, where we show you how to write unit tests.
/// Try this yourself
///
/// Don't forget that when the argument is `None`, i.e. no window should
/// be focused, `get_focused_window()` must return `None` afterwards. The
/// `focused_window` field of the `WindowLayout` must also be `None`.
///
/// You will probably have to change the code above (method
/// implementations as well as the `FullscreenWM` struct) to achieve this.
fn focus_window(&mut self, window: Option<Window>) -> Result<(), Self::Error> {
// self.focused_window = window;
match window {
Some(i_window) => {
match self.windows.iter().position(|w| *w == i_window) {
None => Err(FullscreenWMError::UnknownWindow(i_window)),
Some(i) => {
// Set window to front
self.windows.remove(i);
self.windows.push_back(i_window);
self.focused_window = Some(i_window);
Ok(())
}
}
}
None => {
self.focused_window = None;
Ok(())
}
}
}
/// Try this yourself
fn cycle_focus(&mut self, dir: PrevOrNext) {
// You will probably notice here that a `Vec` is not the ideal data
// structure to implement this function. Feel free to replace the
// `Vec` with another data structure.
// Do nothing when there are no windows.
if self.windows.is_empty() {
return ();
}
// if self.get_focused_window() == None {
//
match dir {
PrevOrNext::Prev => {
let last_window = self.windows.pop_back().unwrap();
self.windows.push_front(last_window);
}
PrevOrNext::Next => {
let first_window = self.windows.pop_front().unwrap();
self.windows.push_back(first_window);
}
}
// When there is only one window,
// focus it if currently no window is focused, otherwise do nothing.
// When no window is focused, any window may become focused.
let window = self.windows.back().map(|w| *w);
match self.focus_window(window) {
Ok(_) => {}
Err(e) => println!("Error focus_window {}", e),
}
return ();
}
/// Try this yourself
// It should reflect the current state (location/size, floating or tiled,
// fullscreen or not) of the window.
fn get_window_info(&self, window: Window) -> Result<WindowWithInfo, Self::Error> {
let fullscreen_geometry = self.screen.to_geometry();
if self.is_managed(window) {
Ok(WindowWithInfo {
window: window,
geometry: fullscreen_geometry,
float_or_tile: FloatOrTile::Tile,
fullscreen: true,
})
} else {
Err(FullscreenWMError::UnknownWindow(window))
}
}
/// Try this yourself
fn get_screen(&self) -> Screen {
self.screen
}
/// Try this yourself
fn resize_screen(&mut self, screen: Screen) {
self.screen = screen
}
}
#[cfg(test)]
mod a_fullscreen_wm_tests {
include!("a_fullscreen_wm_tests.rs");
}
| description | identifier_name |
lib.rs | #[macro_use]
extern crate include_dir;
pub mod cosmogony;
mod country_finder;
pub mod file_format;
mod hierarchy_builder;
mod mutable_slice;
pub mod zone;
pub mod zone_typer;
pub use crate::cosmogony::{Cosmogony, CosmogonyMetadata, CosmogonyStats};
use crate::country_finder::CountryFinder;
use crate::file_format::OutputFormat;
use crate::hierarchy_builder::{build_hierarchy, find_inclusions};
use crate::mutable_slice::MutableSlice;
use failure::Error;
use failure::ResultExt;
use log::{debug, info};
use osmpbfreader::{OsmObj, OsmPbfReader};
use std::collections::BTreeMap;
use std::fs::File;
use std::path::Path;
pub use crate::zone::{Zone, ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
}
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if !is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if !is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
}
}
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> |
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json, the whole cosmogony is loaded
pub fn read_zones_from_file(
input: &str,
) -> Result<Box<dyn std::iter::Iterator<Item = Result<Zone, Error>>>, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
match format {
OutputFormat::JsonGz | OutputFormat::Json => {
let cosmo = load_cosmogony(f, format)?;
Ok(Box::new(cosmo.zones.into_iter().map(|z| Ok(z))))
}
OutputFormat::JsonStream => Ok(Box::new(read_zones(f))),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(f);
let r = std::io::BufReader::new(r);
Ok(Box::new(read_zones(r)))
}
}
}
/// Load a cosmogony from a reader and a file_format
pub fn load_cosmogony(
reader: impl std::io::BufRead,
format: OutputFormat,
) -> Result<Cosmogony, Error> {
match format {
OutputFormat::JsonGz => {
let r = flate2::read::GzDecoder::new(reader);
serde_json::from_reader(r).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::Json => {
serde_json::from_reader(reader).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::JsonStream => from_json_stream(reader),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(reader);
let r = std::io::BufReader::new(r);
from_json_stream(r)
}
}
}
| {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id ?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
} | identifier_body |
lib.rs | #[macro_use]
extern crate include_dir;
pub mod cosmogony;
mod country_finder;
pub mod file_format;
mod hierarchy_builder;
mod mutable_slice;
pub mod zone;
pub mod zone_typer;
pub use crate::cosmogony::{Cosmogony, CosmogonyMetadata, CosmogonyStats};
use crate::country_finder::CountryFinder;
use crate::file_format::OutputFormat;
use crate::hierarchy_builder::{build_hierarchy, find_inclusions};
use crate::mutable_slice::MutableSlice;
use failure::Error;
use failure::ResultExt;
use log::{debug, info};
use osmpbfreader::{OsmObj, OsmPbfReader};
use std::collections::BTreeMap;
use std::fs::File;
use std::path::Path;
pub use crate::zone::{Zone, ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
}
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if !is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if !is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
}
}
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id ?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
}
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json, the whole cosmogony is loaded
pub fn read_zones_from_file(
input: &str,
) -> Result<Box<dyn std::iter::Iterator<Item = Result<Zone, Error>>>, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
match format {
OutputFormat::JsonGz | OutputFormat::Json => {
let cosmo = load_cosmogony(f, format)?;
Ok(Box::new(cosmo.zones.into_iter().map(|z| Ok(z))))
}
OutputFormat::JsonStream => Ok(Box::new(read_zones(f))),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(f);
let r = std::io::BufReader::new(r);
Ok(Box::new(read_zones(r)))
}
}
}
/// Load a cosmogony from a reader and a file_format
pub fn | (
reader: impl std::io::BufRead,
format: OutputFormat,
) -> Result<Cosmogony, Error> {
match format {
OutputFormat::JsonGz => {
let r = flate2::read::GzDecoder::new(reader);
serde_json::from_reader(r).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::Json => {
serde_json::from_reader(reader).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::JsonStream => from_json_stream(reader),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(reader);
let r = std::io::BufReader::new(r);
from_json_stream(r)
}
}
}
| load_cosmogony | identifier_name |
lib.rs | #[macro_use]
extern crate include_dir;
pub mod cosmogony;
mod country_finder;
pub mod file_format;
mod hierarchy_builder;
mod mutable_slice;
pub mod zone;
pub mod zone_typer;
pub use crate::cosmogony::{Cosmogony, CosmogonyMetadata, CosmogonyStats};
use crate::country_finder::CountryFinder;
use crate::file_format::OutputFormat;
use crate::hierarchy_builder::{build_hierarchy, find_inclusions};
use crate::mutable_slice::MutableSlice;
use failure::Error;
use failure::ResultExt;
use log::{debug, info};
use osmpbfreader::{OsmObj, OsmPbfReader};
use std::collections::BTreeMap;
use std::fs::File;
use std::path::Path;
pub use crate::zone::{Zone, ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => |
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if !is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if !is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
}
}
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id ?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
}
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json, the whole cosmogony is loaded
pub fn read_zones_from_file(
input: &str,
) -> Result<Box<dyn std::iter::Iterator<Item = Result<Zone, Error>>>, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
match format {
OutputFormat::JsonGz | OutputFormat::Json => {
let cosmo = load_cosmogony(f, format)?;
Ok(Box::new(cosmo.zones.into_iter().map(|z| Ok(z))))
}
OutputFormat::JsonStream => Ok(Box::new(read_zones(f))),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(f);
let r = std::io::BufReader::new(r);
Ok(Box::new(read_zones(r)))
}
}
}
/// Load a cosmogony from a reader and a file_format
pub fn load_cosmogony(
reader: impl std::io::BufRead,
format: OutputFormat,
) -> Result<Cosmogony, Error> {
match format {
OutputFormat::JsonGz => {
let r = flate2::read::GzDecoder::new(reader);
serde_json::from_reader(r).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::Json => {
serde_json::from_reader(reader).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::JsonStream => from_json_stream(reader),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(reader);
let r = std::io::BufReader::new(r);
from_json_stream(r)
}
}
}
| {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
} | conditional_block |
lib.rs | #[macro_use]
extern crate include_dir;
pub mod cosmogony;
mod country_finder;
pub mod file_format;
mod hierarchy_builder;
mod mutable_slice;
pub mod zone;
pub mod zone_typer;
pub use crate::cosmogony::{Cosmogony, CosmogonyMetadata, CosmogonyStats};
use crate::country_finder::CountryFinder;
use crate::file_format::OutputFormat;
use crate::hierarchy_builder::{build_hierarchy, find_inclusions};
use crate::mutable_slice::MutableSlice;
use failure::Error;
use failure::ResultExt;
use log::{debug, info};
use osmpbfreader::{OsmObj, OsmPbfReader};
use std::collections::BTreeMap;
use std::fs::File;
use std::path::Path;
pub use crate::zone::{Zone, ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
}
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if !is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if !is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
} | }
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id ?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
}
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json, the whole cosmogony is loaded
pub fn read_zones_from_file(
input: &str,
) -> Result<Box<dyn std::iter::Iterator<Item = Result<Zone, Error>>>, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
match format {
OutputFormat::JsonGz | OutputFormat::Json => {
let cosmo = load_cosmogony(f, format)?;
Ok(Box::new(cosmo.zones.into_iter().map(|z| Ok(z))))
}
OutputFormat::JsonStream => Ok(Box::new(read_zones(f))),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(f);
let r = std::io::BufReader::new(r);
Ok(Box::new(read_zones(r)))
}
}
}
/// Load a cosmogony from a reader and a file_format
pub fn load_cosmogony(
reader: impl std::io::BufRead,
format: OutputFormat,
) -> Result<Cosmogony, Error> {
match format {
OutputFormat::JsonGz => {
let r = flate2::read::GzDecoder::new(reader);
serde_json::from_reader(r).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::Json => {
serde_json::from_reader(reader).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::JsonStream => from_json_stream(reader),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(reader);
let r = std::io::BufReader::new(r);
from_json_stream(r)
}
}
} | random_line_split | |
main.rs | /*
--- Day 12: Subterranean Sustainability ---
The year 518 is significantly more underground than your history books implied. Either that, or
you've arrived in a vast cavern network under the North Pole.
After exploring a little, you discover a long tunnel that contains a row of small pots as far as
you can see to your left and right. A few of them contain plants - someone is trying to grow things
in these geothermally-heated caves.
The pots are numbered, with 0 in front of you. To the left, the pots are numbered -1, -2, -3, and
so on; to the right, 1, 2, 3.... Your puzzle input contains a list of pots from 0 to the right and
whether they do (#) or do not (.) currently contain a plant, the initial state. (No other pots
currently contain plants.) For example, an initial state of #..##.... indicates that pots 0, 3, and
4 currently contain plants.
Your puzzle input also contains some notes you find on a nearby table: someone has been trying to
figure out how these plants spread to nearby pots. Based on the notes, for each generation of
plants, a given pot has or does not have a plant based on whether that pot (and the two pots on
either side of it) had a plant in the last generation. These are written as LLCRR => N, where L are
pots to the left, C is the current pot being considered, R are the pots to the right, and N is
whether the current pot will have a plant in the next generation. For example:
A note like ..#.. => . means that a pot that contains a plant but with no plants within two pots of
it will not have a plant in it during the next generation.
A note like ##.## => . means that an empty pot with two plants on each side of it will remain empty
in the next generation.
A note like .##.# => # means that a pot has a plant in a given generation if, in the previous
generation, there were plants in that pot, the one immediately to the left, and the one two pots to
the right, but not in the ones immediately to the right and two to the left.
It's not clear what these plants are for, but you're sure it's important, so you'd like to make
sure the current configuration of plants is sustainable by determining what will happen after 20
generations.
For example, given the following input:
initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #
For brevity, in this example, only the combinations which do produce a plant are listed. (Your
input includes all possible combinations.) Then, the next 20 generations will look like this:
1 2 3
0 0 0 0
0: ...#..#.#..##......###...###...........
1: ...#...#....#.....#..#..#..#...........
2: ...##..##...##....#..#..#..##..........
3: ..#.#...#..#.#....#..#..#...#..........
4: ...#.#..#...#.#...#..#..##..##.........
5: ....#...##...#.#..#..#...#...#.........
6: ....##.#.#....#...#..##..##..##........
7: ...#..###.#...##..#...#...#...#........
8: ...#....##.#.#.#..##..##..##..##.......
9: ...##..#..#####....#...#...#...#.......
10: ..#.#..#...#.##....##..##..##..##......
11: ...#...##...#.#...#.#...#...#...#......
12: ...##.#.#....#.#...#.#..##..##..##.....
13: ..#..###.#....#.#...#....#...#...#.....
14: ..#....##.#....#.#..##...##..##..##....
15: ..##..#..#.#....#....#..#.#...#...#....
16: .#.#..#...#.#...##...#...#.#..##..##...
17: ..#...##...#.#.#.#...##...#....#...#...
18: ..##.#.#....#####.#.#.#...##...##..##..
19: .#..###.#..#.#.#######.#.#.#..#.#...#..
20: .#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for .#.#., pot 9 matched .##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn | (state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx != 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' {
PotState::HasPlant
} else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> PlantsState {
let mut new_state: PlantsState = orig_state.clone();
for _ in 0..n_generations {
new_state = get_new_state_after_one_generation(&mut new_state, &mut combinations_map);
}
new_state
}
fn get_pots_with_plant_sum(plants_state: &mut PlantsState) -> i64 {
let mut sum: i64 = 0;
for (idx, state_item) in plants_state.iter().enumerate() {
if *state_item {
sum += idx as i64 - OFFSET as i64;
}
}
sum
}
fn get_pots_with_plant_sum_using_pattern(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> i64 {
let mut sum: i64;
let mut new_state: PlantsState = orig_state.clone();
let mut last_idx: i64 = 100;
let mut diff_a = 0;
let mut diff_b = 0;
let mut diff_c;
// the number 100 is a random high-enough number found empirically
new_state =
get_new_state_after_n_generations(&mut new_state, &mut combinations_map, last_idx as usize);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
for _ in 0..100 {
diff_c = diff_b;
diff_b = diff_a;
let prev_sum = sum;
new_state = get_new_state_after_n_generations(&mut new_state, &mut combinations_map, 1);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
last_idx += 1;
diff_a = sum - prev_sum;
if diff_a != 0 && diff_a == diff_b && diff_b == diff_c {
break;
}
}
sum + diff_a * (n_generations as i64 - last_idx as i64)
}
fn main() {
let mut input_combinations = get_input_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut input_combinations);
let mut state_vector = convert_state_str_to_vec(INITIAL_STATE);
let mut final_state_20 =
get_new_state_after_n_generations(&mut state_vector, &mut combinations_map, 20);
let sum_20 = get_pots_with_plant_sum(&mut final_state_20);
let sum_5b =
get_pots_with_plant_sum_using_pattern(&mut state_vector, &mut combinations_map, 50_000_000_000);
println!("Results:");
println!("- (1) sum of pots with plant for 20: {}", sum_20);
println!("- (2) sum of pots with plant for 5b: {}", sum_5b);
}
#[cfg(test)]
mod tests {
use super::*;
fn get_result_for_combination_str(
combinations_map: &mut CombinationsMap,
combination_str: &str,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for ch in combination_str.chars() {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) =
combinations_map.get(&combination_id).unwrap()
{
let field = if ch == '#' {
combination_branch.has_plant
} else {
combination_branch.empty
};
if field.is_some() {
prev_id = field;
} else {
prev_id = None;
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_example_combinations() -> Vec<String> {
vec![
"...## => #",
"..#.. => #",
".#... => #",
".#.#. => #",
".#.## => #",
".##.. => #",
".#### => #",
"#.#.# => #",
"#.### => #",
"##.#. => #",
"##.## => #",
"###.. => #",
"###.# => #",
"####. => #",
"..... => .",
]
.iter()
.map(|x| x.to_string())
.collect()
}
#[test]
fn test_convert_state_str_to_vec() {
let result = convert_state_str_to_vec("#..##");
let mut expected = vec![true, false, false, true, true];
for _ in 0..OFFSET {
expected.insert(0, false);
}
for _ in 0..OFFSET {
expected.push(false);
}
assert_eq!(result, expected)
}
#[test]
fn test_convert_strs_to_combinations_map() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "...##"),
Some(PotState::HasPlant)
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "#####"),
None,
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "....."),
Some(PotState::Empty),
);
}
#[test]
fn test_get_new_state_after_one_generation() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let expected_final_state = convert_state_str_to_vec("#...#....#.....#..#..#..#");
let new_state = get_new_state_after_one_generation(&mut orig_state, &mut combinations_map);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_new_state_after_n_generations() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("...#..#.#..##......###...###...........");
let expected_final_state = convert_state_str_to_vec(".#....##....#####...#######....#.#..##.");
let new_state = get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_pots_with_plant_sum() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let mut new_state =
get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
let sum = get_pots_with_plant_sum(&mut new_state);
assert_eq!(sum, 325);
}
}
| convert_state_str_to_vec | identifier_name |
main.rs | /*
--- Day 12: Subterranean Sustainability ---
The year 518 is significantly more underground than your history books implied. Either that, or
you've arrived in a vast cavern network under the North Pole.
After exploring a little, you discover a long tunnel that contains a row of small pots as far as
you can see to your left and right. A few of them contain plants - someone is trying to grow things
in these geothermally-heated caves.
The pots are numbered, with 0 in front of you. To the left, the pots are numbered -1, -2, -3, and
so on; to the right, 1, 2, 3.... Your puzzle input contains a list of pots from 0 to the right and
whether they do (#) or do not (.) currently contain a plant, the initial state. (No other pots
currently contain plants.) For example, an initial state of #..##.... indicates that pots 0, 3, and
4 currently contain plants.
Your puzzle input also contains some notes you find on a nearby table: someone has been trying to
figure out how these plants spread to nearby pots. Based on the notes, for each generation of
plants, a given pot has or does not have a plant based on whether that pot (and the two pots on
either side of it) had a plant in the last generation. These are written as LLCRR => N, where L are
pots to the left, C is the current pot being considered, R are the pots to the right, and N is
whether the current pot will have a plant in the next generation. For example:
A note like ..#.. => . means that a pot that contains a plant but with no plants within two pots of
it will not have a plant in it during the next generation.
A note like ##.## => . means that an empty pot with two plants on each side of it will remain empty
in the next generation.
A note like .##.# => # means that a pot has a plant in a given generation if, in the previous
generation, there were plants in that pot, the one immediately to the left, and the one two pots to
the right, but not in the ones immediately to the right and two to the left.
It's not clear what these plants are for, but you're sure it's important, so you'd like to make
sure the current configuration of plants is sustainable by determining what will happen after 20
generations.
For example, given the following input:
initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #
For brevity, in this example, only the combinations which do produce a plant are listed. (Your
input includes all possible combinations.) Then, the next 20 generations will look like this:
1 2 3
0 0 0 0
0: ...#..#.#..##......###...###...........
1: ...#...#....#.....#..#..#..#...........
2: ...##..##...##....#..#..#..##..........
3: ..#.#...#..#.#....#..#..#...#..........
4: ...#.#..#...#.#...#..#..##..##.........
5: ....#...##...#.#..#..#...#...#.........
6: ....##.#.#....#...#..##..##..##........
7: ...#..###.#...##..#...#...#...#........
8: ...#....##.#.#.#..##..##..##..##.......
9: ...##..#..#####....#...#...#...#.......
10: ..#.#..#...#.##....##..##..##..##......
11: ...#...##...#.#...#.#...#...#...#......
12: ...##.#.#....#.#...#.#..##..##..##.....
13: ..#..###.#....#.#...#....#...#...#.....
14: ..#....##.#....#.#..##...##..##..##....
15: ..##..#..#.#....#....#..#.#...#...#....
16: .#.#..#...#.#...##...#...#.#..##..##...
17: ..#...##...#.#.#.#...##...#....#...#...
18: ..##.#.#....#####.#.#.#...##...##..##..
19: .#..###.#..#.#.#######.#.#.#..#.#...#..
20: .#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for .#.#., pot 9 matched .##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn convert_state_str_to_vec(state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx != 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' {
PotState::HasPlant
} else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> PlantsState {
let mut new_state: PlantsState = orig_state.clone();
for _ in 0..n_generations {
new_state = get_new_state_after_one_generation(&mut new_state, &mut combinations_map);
}
new_state
}
fn get_pots_with_plant_sum(plants_state: &mut PlantsState) -> i64 {
let mut sum: i64 = 0;
for (idx, state_item) in plants_state.iter().enumerate() {
if *state_item {
sum += idx as i64 - OFFSET as i64;
}
}
sum
}
fn get_pots_with_plant_sum_using_pattern(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> i64 {
let mut sum: i64;
let mut new_state: PlantsState = orig_state.clone();
let mut last_idx: i64 = 100;
let mut diff_a = 0;
let mut diff_b = 0;
let mut diff_c;
// the number 100 is a random high-enough number found empirically
new_state =
get_new_state_after_n_generations(&mut new_state, &mut combinations_map, last_idx as usize);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
for _ in 0..100 {
diff_c = diff_b;
diff_b = diff_a;
let prev_sum = sum;
new_state = get_new_state_after_n_generations(&mut new_state, &mut combinations_map, 1);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
last_idx += 1;
diff_a = sum - prev_sum;
if diff_a != 0 && diff_a == diff_b && diff_b == diff_c {
break;
}
}
sum + diff_a * (n_generations as i64 - last_idx as i64)
}
fn main() {
let mut input_combinations = get_input_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut input_combinations);
let mut state_vector = convert_state_str_to_vec(INITIAL_STATE);
let mut final_state_20 =
get_new_state_after_n_generations(&mut state_vector, &mut combinations_map, 20);
let sum_20 = get_pots_with_plant_sum(&mut final_state_20);
let sum_5b =
get_pots_with_plant_sum_using_pattern(&mut state_vector, &mut combinations_map, 50_000_000_000);
println!("Results:");
println!("- (1) sum of pots with plant for 20: {}", sum_20);
println!("- (2) sum of pots with plant for 5b: {}", sum_5b);
}
#[cfg(test)]
mod tests {
use super::*;
fn get_result_for_combination_str(
combinations_map: &mut CombinationsMap,
combination_str: &str,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for ch in combination_str.chars() {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) =
combinations_map.get(&combination_id).unwrap()
{
let field = if ch == '#' {
combination_branch.has_plant
} else {
combination_branch.empty
};
if field.is_some() {
prev_id = field;
} else {
prev_id = None;
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_example_combinations() -> Vec<String> {
vec![
"...## => #",
"..#.. => #",
".#... => #",
".#.#. => #",
".#.## => #",
".##.. => #",
".#### => #",
"#.#.# => #",
"#.### => #",
"##.#. => #",
"##.## => #",
"###.. => #",
"###.# => #",
"####. => #",
"..... => .",
]
.iter()
.map(|x| x.to_string()) | let result = convert_state_str_to_vec("#..##");
let mut expected = vec![true, false, false, true, true];
for _ in 0..OFFSET {
expected.insert(0, false);
}
for _ in 0..OFFSET {
expected.push(false);
}
assert_eq!(result, expected)
}
#[test]
fn test_convert_strs_to_combinations_map() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "...##"),
Some(PotState::HasPlant)
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "#####"),
None,
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "....."),
Some(PotState::Empty),
);
}
#[test]
fn test_get_new_state_after_one_generation() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let expected_final_state = convert_state_str_to_vec("#...#....#.....#..#..#..#");
let new_state = get_new_state_after_one_generation(&mut orig_state, &mut combinations_map);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_new_state_after_n_generations() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("...#..#.#..##......###...###...........");
let expected_final_state = convert_state_str_to_vec(".#....##....#####...#######....#.#..##.");
let new_state = get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_pots_with_plant_sum() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let mut new_state =
get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
let sum = get_pots_with_plant_sum(&mut new_state);
assert_eq!(sum, 325);
}
} | .collect()
}
#[test]
fn test_convert_state_str_to_vec() { | random_line_split |
main.rs | /*
--- Day 12: Subterranean Sustainability ---
The year 518 is significantly more underground than your history books implied. Either that, or
you've arrived in a vast cavern network under the North Pole.
After exploring a little, you discover a long tunnel that contains a row of small pots as far as
you can see to your left and right. A few of them contain plants - someone is trying to grow things
in these geothermally-heated caves.
The pots are numbered, with 0 in front of you. To the left, the pots are numbered -1, -2, -3, and
so on; to the right, 1, 2, 3.... Your puzzle input contains a list of pots from 0 to the right and
whether they do (#) or do not (.) currently contain a plant, the initial state. (No other pots
currently contain plants.) For example, an initial state of #..##.... indicates that pots 0, 3, and
4 currently contain plants.
Your puzzle input also contains some notes you find on a nearby table: someone has been trying to
figure out how these plants spread to nearby pots. Based on the notes, for each generation of
plants, a given pot has or does not have a plant based on whether that pot (and the two pots on
either side of it) had a plant in the last generation. These are written as LLCRR => N, where L are
pots to the left, C is the current pot being considered, R are the pots to the right, and N is
whether the current pot will have a plant in the next generation. For example:
A note like ..#.. => . means that a pot that contains a plant but with no plants within two pots of
it will not have a plant in it during the next generation.
A note like ##.## => . means that an empty pot with two plants on each side of it will remain empty
in the next generation.
A note like .##.# => # means that a pot has a plant in a given generation if, in the previous
generation, there were plants in that pot, the one immediately to the left, and the one two pots to
the right, but not in the ones immediately to the right and two to the left.
It's not clear what these plants are for, but you're sure it's important, so you'd like to make
sure the current configuration of plants is sustainable by determining what will happen after 20
generations.
For example, given the following input:
initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #
For brevity, in this example, only the combinations which do produce a plant are listed. (Your
input includes all possible combinations.) Then, the next 20 generations will look like this:
1 2 3
0 0 0 0
0: ...#..#.#..##......###...###...........
1: ...#...#....#.....#..#..#..#...........
2: ...##..##...##....#..#..#..##..........
3: ..#.#...#..#.#....#..#..#...#..........
4: ...#.#..#...#.#...#..#..##..##.........
5: ....#...##...#.#..#..#...#...#.........
6: ....##.#.#....#...#..##..##..##........
7: ...#..###.#...##..#...#...#...#........
8: ...#....##.#.#.#..##..##..##..##.......
9: ...##..#..#####....#...#...#...#.......
10: ..#.#..#...#.##....##..##..##..##......
11: ...#...##...#.#...#.#...#...#...#......
12: ...##.#.#....#.#...#.#..##..##..##.....
13: ..#..###.#....#.#...#....#...#...#.....
14: ..#....##.#....#.#..##...##..##..##....
15: ..##..#..#.#....#....#..#.#...#...#....
16: .#.#..#...#.#...##...#...#.#..##..##...
17: ..#...##...#.#.#.#...##...#....#...#...
18: ..##.#.#....#####.#.#.#...##...##..##..
19: .#..###.#..#.#.#######.#.#.#..#.#...#..
20: .#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for .#.#., pot 9 matched .##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn convert_state_str_to_vec(state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx != 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' {
PotState::HasPlant
} else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> PlantsState {
let mut new_state: PlantsState = orig_state.clone();
for _ in 0..n_generations {
new_state = get_new_state_after_one_generation(&mut new_state, &mut combinations_map);
}
new_state
}
fn get_pots_with_plant_sum(plants_state: &mut PlantsState) -> i64 {
let mut sum: i64 = 0;
for (idx, state_item) in plants_state.iter().enumerate() {
if *state_item {
sum += idx as i64 - OFFSET as i64;
}
}
sum
}
fn get_pots_with_plant_sum_using_pattern(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> i64 |
fn main() {
let mut input_combinations = get_input_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut input_combinations);
let mut state_vector = convert_state_str_to_vec(INITIAL_STATE);
let mut final_state_20 =
get_new_state_after_n_generations(&mut state_vector, &mut combinations_map, 20);
let sum_20 = get_pots_with_plant_sum(&mut final_state_20);
let sum_5b =
get_pots_with_plant_sum_using_pattern(&mut state_vector, &mut combinations_map, 50_000_000_000);
println!("Results:");
println!("- (1) sum of pots with plant for 20: {}", sum_20);
println!("- (2) sum of pots with plant for 5b: {}", sum_5b);
}
#[cfg(test)]
mod tests {
use super::*;
fn get_result_for_combination_str(
combinations_map: &mut CombinationsMap,
combination_str: &str,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for ch in combination_str.chars() {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) =
combinations_map.get(&combination_id).unwrap()
{
let field = if ch == '#' {
combination_branch.has_plant
} else {
combination_branch.empty
};
if field.is_some() {
prev_id = field;
} else {
prev_id = None;
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_example_combinations() -> Vec<String> {
vec![
"...## => #",
"..#.. => #",
".#... => #",
".#.#. => #",
".#.## => #",
".##.. => #",
".#### => #",
"#.#.# => #",
"#.### => #",
"##.#. => #",
"##.## => #",
"###.. => #",
"###.# => #",
"####. => #",
"..... => .",
]
.iter()
.map(|x| x.to_string())
.collect()
}
#[test]
fn test_convert_state_str_to_vec() {
let result = convert_state_str_to_vec("#..##");
let mut expected = vec![true, false, false, true, true];
for _ in 0..OFFSET {
expected.insert(0, false);
}
for _ in 0..OFFSET {
expected.push(false);
}
assert_eq!(result, expected)
}
#[test]
fn test_convert_strs_to_combinations_map() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "...##"),
Some(PotState::HasPlant)
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "#####"),
None,
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "....."),
Some(PotState::Empty),
);
}
#[test]
fn test_get_new_state_after_one_generation() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let expected_final_state = convert_state_str_to_vec("#...#....#.....#..#..#..#");
let new_state = get_new_state_after_one_generation(&mut orig_state, &mut combinations_map);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_new_state_after_n_generations() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("...#..#.#..##......###...###...........");
let expected_final_state = convert_state_str_to_vec(".#....##....#####...#######....#.#..##.");
let new_state = get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_pots_with_plant_sum() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let mut new_state =
get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
let sum = get_pots_with_plant_sum(&mut new_state);
assert_eq!(sum, 325);
}
}
| {
let mut sum: i64;
let mut new_state: PlantsState = orig_state.clone();
let mut last_idx: i64 = 100;
let mut diff_a = 0;
let mut diff_b = 0;
let mut diff_c;
// the number 100 is a random high-enough number found empirically
new_state =
get_new_state_after_n_generations(&mut new_state, &mut combinations_map, last_idx as usize);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
for _ in 0..100 {
diff_c = diff_b;
diff_b = diff_a;
let prev_sum = sum;
new_state = get_new_state_after_n_generations(&mut new_state, &mut combinations_map, 1);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
last_idx += 1;
diff_a = sum - prev_sum;
if diff_a != 0 && diff_a == diff_b && diff_b == diff_c {
break;
}
}
sum + diff_a * (n_generations as i64 - last_idx as i64)
} | identifier_body |
main.rs | /*
--- Day 12: Subterranean Sustainability ---
The year 518 is significantly more underground than your history books implied. Either that, or
you've arrived in a vast cavern network under the North Pole.
After exploring a little, you discover a long tunnel that contains a row of small pots as far as
you can see to your left and right. A few of them contain plants - someone is trying to grow things
in these geothermally-heated caves.
The pots are numbered, with 0 in front of you. To the left, the pots are numbered -1, -2, -3, and
so on; to the right, 1, 2, 3.... Your puzzle input contains a list of pots from 0 to the right and
whether they do (#) or do not (.) currently contain a plant, the initial state. (No other pots
currently contain plants.) For example, an initial state of #..##.... indicates that pots 0, 3, and
4 currently contain plants.
Your puzzle input also contains some notes you find on a nearby table: someone has been trying to
figure out how these plants spread to nearby pots. Based on the notes, for each generation of
plants, a given pot has or does not have a plant based on whether that pot (and the two pots on
either side of it) had a plant in the last generation. These are written as LLCRR => N, where L are
pots to the left, C is the current pot being considered, R are the pots to the right, and N is
whether the current pot will have a plant in the next generation. For example:
A note like ..#.. => . means that a pot that contains a plant but with no plants within two pots of
it will not have a plant in it during the next generation.
A note like ##.## => . means that an empty pot with two plants on each side of it will remain empty
in the next generation.
A note like .##.# => # means that a pot has a plant in a given generation if, in the previous
generation, there were plants in that pot, the one immediately to the left, and the one two pots to
the right, but not in the ones immediately to the right and two to the left.
It's not clear what these plants are for, but you're sure it's important, so you'd like to make
sure the current configuration of plants is sustainable by determining what will happen after 20
generations.
For example, given the following input:
initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #
For brevity, in this example, only the combinations which do produce a plant are listed. (Your
input includes all possible combinations.) Then, the next 20 generations will look like this:
1 2 3
0 0 0 0
0: ...#..#.#..##......###...###...........
1: ...#...#....#.....#..#..#..#...........
2: ...##..##...##....#..#..#..##..........
3: ..#.#...#..#.#....#..#..#...#..........
4: ...#.#..#...#.#...#..#..##..##.........
5: ....#...##...#.#..#..#...#...#.........
6: ....##.#.#....#...#..##..##..##........
7: ...#..###.#...##..#...#...#...#........
8: ...#....##.#.#.#..##..##..##..##.......
9: ...##..#..#####....#...#...#...#.......
10: ..#.#..#...#.##....##..##..##..##......
11: ...#...##...#.#...#.#...#...#...#......
12: ...##.#.#....#.#...#.#..##..##..##.....
13: ..#..###.#....#.#...#....#...#...#.....
14: ..#....##.#....#.#..##...##..##..##....
15: ..##..#..#.#....#....#..#.#...#...#....
16: .#.#..#...#.#...##...#...#.#..##..##...
17: ..#...##...#.#.#.#...##...#....#...#...
18: ..##.#.#....#####.#.#.#...##...##..##..
19: .#..###.#..#.#.#######.#.#.#..#.#...#..
20: .#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for .#.#., pot 9 matched .##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn convert_state_str_to_vec(state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx != 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' | else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> PlantsState {
let mut new_state: PlantsState = orig_state.clone();
for _ in 0..n_generations {
new_state = get_new_state_after_one_generation(&mut new_state, &mut combinations_map);
}
new_state
}
fn get_pots_with_plant_sum(plants_state: &mut PlantsState) -> i64 {
let mut sum: i64 = 0;
for (idx, state_item) in plants_state.iter().enumerate() {
if *state_item {
sum += idx as i64 - OFFSET as i64;
}
}
sum
}
fn get_pots_with_plant_sum_using_pattern(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> i64 {
let mut sum: i64;
let mut new_state: PlantsState = orig_state.clone();
let mut last_idx: i64 = 100;
let mut diff_a = 0;
let mut diff_b = 0;
let mut diff_c;
// the number 100 is a random high-enough number found empirically
new_state =
get_new_state_after_n_generations(&mut new_state, &mut combinations_map, last_idx as usize);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
for _ in 0..100 {
diff_c = diff_b;
diff_b = diff_a;
let prev_sum = sum;
new_state = get_new_state_after_n_generations(&mut new_state, &mut combinations_map, 1);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
last_idx += 1;
diff_a = sum - prev_sum;
if diff_a != 0 && diff_a == diff_b && diff_b == diff_c {
break;
}
}
sum + diff_a * (n_generations as i64 - last_idx as i64)
}
fn main() {
let mut input_combinations = get_input_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut input_combinations);
let mut state_vector = convert_state_str_to_vec(INITIAL_STATE);
let mut final_state_20 =
get_new_state_after_n_generations(&mut state_vector, &mut combinations_map, 20);
let sum_20 = get_pots_with_plant_sum(&mut final_state_20);
let sum_5b =
get_pots_with_plant_sum_using_pattern(&mut state_vector, &mut combinations_map, 50_000_000_000);
println!("Results:");
println!("- (1) sum of pots with plant for 20: {}", sum_20);
println!("- (2) sum of pots with plant for 5b: {}", sum_5b);
}
#[cfg(test)]
mod tests {
use super::*;
fn get_result_for_combination_str(
combinations_map: &mut CombinationsMap,
combination_str: &str,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for ch in combination_str.chars() {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) =
combinations_map.get(&combination_id).unwrap()
{
let field = if ch == '#' {
combination_branch.has_plant
} else {
combination_branch.empty
};
if field.is_some() {
prev_id = field;
} else {
prev_id = None;
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_example_combinations() -> Vec<String> {
vec![
"...## => #",
"..#.. => #",
".#... => #",
".#.#. => #",
".#.## => #",
".##.. => #",
".#### => #",
"#.#.# => #",
"#.### => #",
"##.#. => #",
"##.## => #",
"###.. => #",
"###.# => #",
"####. => #",
"..... => .",
]
.iter()
.map(|x| x.to_string())
.collect()
}
#[test]
fn test_convert_state_str_to_vec() {
let result = convert_state_str_to_vec("#..##");
let mut expected = vec![true, false, false, true, true];
for _ in 0..OFFSET {
expected.insert(0, false);
}
for _ in 0..OFFSET {
expected.push(false);
}
assert_eq!(result, expected)
}
#[test]
fn test_convert_strs_to_combinations_map() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "...##"),
Some(PotState::HasPlant)
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "#####"),
None,
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "....."),
Some(PotState::Empty),
);
}
#[test]
fn test_get_new_state_after_one_generation() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let expected_final_state = convert_state_str_to_vec("#...#....#.....#..#..#..#");
let new_state = get_new_state_after_one_generation(&mut orig_state, &mut combinations_map);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_new_state_after_n_generations() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("...#..#.#..##......###...###...........");
let expected_final_state = convert_state_str_to_vec(".#....##....#####...#######....#.#..##.");
let new_state = get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_pots_with_plant_sum() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let mut new_state =
get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
let sum = get_pots_with_plant_sum(&mut new_state);
assert_eq!(sum, 325);
}
}
| {
PotState::HasPlant
} | conditional_block |
fetch.rs | use crate::{cargo::Source, util, Krate};
use anyhow::{Context, Error};
use bytes::Bytes;
use reqwest::Client;
use std::path::Path;
use tracing::{error, warn};
use tracing_futures::Instrument;
pub(crate) enum KrateSource {
Registry(Bytes),
Git(crate::git::GitSource),
}
impl KrateSource {
pub(crate) fn len(&self) -> usize {
match self {
Self::Registry(bytes) => bytes.len(),
Self::Git(gs) => gs.db.len() + gs.checkout.as_ref().map(|s| s.len()).unwrap_or(0),
}
}
}
pub(crate) async fn from_registry(client: &Client, krate: &Krate) -> Result<KrateSource, Error> {
async {
match &krate.source {
Source::Git { url, rev, .. } => via_git(&url.clone(), rev).await.map(KrateSource::Git),
Source::Registry { registry, chksum } => {
let url = registry.download_url(krate);
let response = client.get(&url).send().await?.error_for_status()?;
let res = util::convert_response(response).await?;
let content = res.into_body();
util::validate_checksum(&content, &chksum)?;
Ok(KrateSource::Registry(content))
}
}
}
.instrument(tracing::debug_span!("fetch"))
.await
}
pub async fn via_git(url: &url::Url, rev: &str) -> Result<crate::git::GitSource, Error> {
// Create a temporary directory to fetch the repo into
let temp_dir = tempfile::tempdir()?;
// Create another temporary directory where we *may* checkout submodules into
let submodule_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let fetch_url = url.as_str().to_owned();
let fetch_rev = rev.to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &fetch_url, &mut |mut opts| {
opts.download_tags(git2::AutotagOption::All);
repo.remote_anonymous(&fetch_url)?
.fetch(
&[
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
// Ensure that the repo actually contains the revision we need | Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
let fetch_rev = rev.to_owned();
let temp_db_path = temp_dir.path().to_owned();
let checkout = tokio::task::spawn(async move {
match crate::git::prepare_submodules(
temp_db_path,
submodule_dir.path().to_owned(),
fetch_rev.clone(),
)
.instrument(tracing::debug_span!("submodule checkout"))
.await
{
Ok(_) => {
util::pack_tar(submodule_dir.path())
.instrument(tracing::debug_span!("tarballing checkout", rev = %fetch_rev))
.await
}
Err(e) => Err(e),
}
});
let (db, checkout) = tokio::join!(
async {
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarballing db", %url, %rev))
.await
},
checkout,
);
Ok(crate::git::GitSource {
db: db?,
checkout: checkout?.ok(),
})
}
pub async fn registry(
url: &url::Url,
krates: impl Iterator<Item = String> + Send + 'static,
) -> Result<Bytes, Error> {
// We don't bother to suport older versions of cargo that don't support
// bare checkouts of registry indexes, as that has been in since early 2017
// See https://github.com/rust-lang/cargo/blob/0e38712d4d7b346747bf91fb26cce8df6934e178/src/cargo/sources/registry/remote.rs#L61
// for details on why cargo still does what it does
let temp_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
//init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let url = url.as_str().to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &url, &mut |mut opts| {
repo.remote_anonymous(&url)?
.fetch(
&[
"refs/heads/master:refs/remotes/origin/master",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
let write_cache = tracing::span!(tracing::Level::DEBUG, "write-cache-entries",);
write_cache.in_scope(|| {
if let Err(e) = write_cache_entries(repo, krates) {
error!("Failed to write all .cache entries: {:#}", e);
}
});
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
// We also write a `.last-updated` file just like cargo so that cargo knows
// the timestamp of the fetch
std::fs::File::create(temp_dir.path().join(".last-updated"))
.context("failed to create .last-updated")?;
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarball"))
.await
}
/// Writes .cache entries in the registry's directory for all of the specified
/// crates. Cargo will write these entries itself if they don't exist the first
/// time it tries to access the crate's metadata, but this noticeably increases
/// initial fetch times. (see src/cargo/sources/registry/index.rs)
fn write_cache_entries(
repo: git2::Repository,
krates: impl Iterator<Item = String>,
) -> Result<(), Error> {
// the path to the repository itself for bare repositories.
let cache = if repo.is_bare() {
repo.path().join(".cache")
} else {
repo.path().parent().unwrap().join(".cache")
};
std::fs::create_dir_all(&cache)?;
// Every .cache entry encodes the sha1 it was created at in the beginning
// so that cargo knows when an entry is out of date with the current HEAD
let head_commit = {
let branch = repo
.find_branch("origin/master", git2::BranchType::Remote)
.context("failed to find 'master' branch")?;
branch
.get()
.target()
.context("unable to find commit for 'master' branch")?
};
let head_commit_str = head_commit.to_string();
let tree = repo
.find_commit(head_commit)
.context("failed to find HEAD commit")?
.tree()
.context("failed to get commit tree")?;
// These can get rather large, so be generous
let mut buffer = Vec::with_capacity(32 * 1024);
for krate in krates {
// cargo always normalizes paths to lowercase
let lkrate = krate.to_lowercase();
let mut rel_path = crate::cargo::get_crate_prefix(&lkrate);
rel_path.push('/');
rel_path.push_str(&lkrate);
let path = &Path::new(&rel_path);
buffer.clear();
{
let write_cache = tracing::span!(tracing::Level::DEBUG, "summary", %krate);
let _s = write_cache.enter();
match write_summary(path, &repo, &tree, head_commit_str.as_bytes(), &mut buffer) {
Ok(num_versions) => tracing::debug!("wrote entries for {} versions", num_versions),
Err(e) => {
warn!("unable to create cache entry for crate: {:#}", e);
continue;
}
}
}
let cache_path = cache.join(rel_path);
if let Err(e) = std::fs::create_dir_all(cache_path.parent().unwrap()) {
warn!(
"failed to create parent .cache directories for crate '{}': {:#}",
krate, e
);
continue;
}
if let Err(e) = std::fs::write(&cache_path, &buffer) {
warn!(
"failed to write .cache entry for crate '{}': {:#}",
krate, e
);
}
}
Ok(())
}
fn write_summary<'blob>(
path: &Path,
repo: &'blob git2::Repository,
tree: &git2::Tree<'blob>,
version: &[u8],
buffer: &mut Vec<u8>,
) -> Result<usize, Error> {
let entry = tree
.get_path(path)
.context("failed to get entry for path")?;
let object = entry
.to_object(repo)
.context("failed to get object for entry")?;
let blob = object.as_blob().context("object is not a blob")?;
// Writes the binary summary for the crate to a buffer, see
// src/cargo/sources/registry/index.rs for details
const CURRENT_CACHE_VERSION: u8 = 1;
buffer.push(CURRENT_CACHE_VERSION);
buffer.extend_from_slice(version);
buffer.push(0);
let mut version_count = 0;
for (version, data) in iter_index_entries(blob.content()) {
buffer.extend_from_slice(version);
buffer.push(0);
buffer.extend_from_slice(data);
buffer.push(0);
version_count += 1;
}
Ok(version_count)
}
fn iter_index_entries(blob: &[u8]) -> impl Iterator<Item = (&[u8], &[u8])> {
fn split_blob(haystack: &[u8]) -> impl Iterator<Item = &[u8]> {
struct Split<'a> {
haystack: &'a [u8],
}
impl<'a> Iterator for Split<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<&'a [u8]> {
if self.haystack.is_empty() {
return None;
}
let (ret, remaining) = match memchr::memchr(b'\n', self.haystack) {
Some(pos) => (&self.haystack[..pos], &self.haystack[pos + 1..]),
None => (self.haystack, &[][..]),
};
self.haystack = remaining;
Some(ret)
}
}
Split { haystack }
}
split_blob(blob).filter_map(|line| {
std::str::from_utf8(line).ok().and_then(|lstr| {
// We need to get the version, as each entry in the .cache
// entry is a tuple of the version and the summary
lstr.find("\"vers\":")
.map(|ind| ind + 7)
.and_then(|ind| lstr[ind..].find('"').map(|bind| ind + bind + 1))
.and_then(|bind| {
lstr[bind..]
.find('"')
.map(|eind| (&line[bind..bind + eind], line))
})
})
})
}
#[cfg(test)]
mod test {
use super::iter_index_entries;
#[test]
fn parses_unpretty() {
const BLOB: &[u8] = include_bytes!("../tests/unpretty-wasi");
let expected = [
"0.0.0",
"0.3.0",
"0.4.0",
"0.5.0",
"0.6.0",
"0.7.0",
"0.9.0+wasi-snapshot-preview1",
"0.10.0+wasi-snapshot-preview1",
];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
#[test]
fn parses_pretty() {
const BLOB: &[u8] = include_bytes!("../tests/pretty-crate");
let expected = ["0.2.0", "0.3.0", "0.3.1", "0.4.0", "0.5.0"];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
} | repo.revparse_single(&fetch_rev)
.with_context(|| format!("{} doesn't contain rev '{}'", fetch_url, fetch_rev))?;
| random_line_split |
fetch.rs | use crate::{cargo::Source, util, Krate};
use anyhow::{Context, Error};
use bytes::Bytes;
use reqwest::Client;
use std::path::Path;
use tracing::{error, warn};
use tracing_futures::Instrument;
pub(crate) enum KrateSource {
Registry(Bytes),
Git(crate::git::GitSource),
}
impl KrateSource {
pub(crate) fn len(&self) -> usize {
match self {
Self::Registry(bytes) => bytes.len(),
Self::Git(gs) => gs.db.len() + gs.checkout.as_ref().map(|s| s.len()).unwrap_or(0),
}
}
}
pub(crate) async fn from_registry(client: &Client, krate: &Krate) -> Result<KrateSource, Error> {
async {
match &krate.source {
Source::Git { url, rev, .. } => via_git(&url.clone(), rev).await.map(KrateSource::Git),
Source::Registry { registry, chksum } => {
let url = registry.download_url(krate);
let response = client.get(&url).send().await?.error_for_status()?;
let res = util::convert_response(response).await?;
let content = res.into_body();
util::validate_checksum(&content, &chksum)?;
Ok(KrateSource::Registry(content))
}
}
}
.instrument(tracing::debug_span!("fetch"))
.await
}
pub async fn via_git(url: &url::Url, rev: &str) -> Result<crate::git::GitSource, Error> {
// Create a temporary directory to fetch the repo into
let temp_dir = tempfile::tempdir()?;
// Create another temporary directory where we *may* checkout submodules into
let submodule_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let fetch_url = url.as_str().to_owned();
let fetch_rev = rev.to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &fetch_url, &mut |mut opts| {
opts.download_tags(git2::AutotagOption::All);
repo.remote_anonymous(&fetch_url)?
.fetch(
&[
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
// Ensure that the repo actually contains the revision we need
repo.revparse_single(&fetch_rev)
.with_context(|| format!("{} doesn't contain rev '{}'", fetch_url, fetch_rev))?;
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
let fetch_rev = rev.to_owned();
let temp_db_path = temp_dir.path().to_owned();
let checkout = tokio::task::spawn(async move {
match crate::git::prepare_submodules(
temp_db_path,
submodule_dir.path().to_owned(),
fetch_rev.clone(),
)
.instrument(tracing::debug_span!("submodule checkout"))
.await
{
Ok(_) => {
util::pack_tar(submodule_dir.path())
.instrument(tracing::debug_span!("tarballing checkout", rev = %fetch_rev))
.await
}
Err(e) => Err(e),
}
});
let (db, checkout) = tokio::join!(
async {
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarballing db", %url, %rev))
.await
},
checkout,
);
Ok(crate::git::GitSource {
db: db?,
checkout: checkout?.ok(),
})
}
pub async fn registry(
url: &url::Url,
krates: impl Iterator<Item = String> + Send + 'static,
) -> Result<Bytes, Error> |
/// Writes .cache entries in the registry's directory for all of the specified
/// crates. Cargo will write these entries itself if they don't exist the first
/// time it tries to access the crate's metadata, but this noticeably increases
/// initial fetch times. (see src/cargo/sources/registry/index.rs)
fn write_cache_entries(
repo: git2::Repository,
krates: impl Iterator<Item = String>,
) -> Result<(), Error> {
// the path to the repository itself for bare repositories.
let cache = if repo.is_bare() {
repo.path().join(".cache")
} else {
repo.path().parent().unwrap().join(".cache")
};
std::fs::create_dir_all(&cache)?;
// Every .cache entry encodes the sha1 it was created at in the beginning
// so that cargo knows when an entry is out of date with the current HEAD
let head_commit = {
let branch = repo
.find_branch("origin/master", git2::BranchType::Remote)
.context("failed to find 'master' branch")?;
branch
.get()
.target()
.context("unable to find commit for 'master' branch")?
};
let head_commit_str = head_commit.to_string();
let tree = repo
.find_commit(head_commit)
.context("failed to find HEAD commit")?
.tree()
.context("failed to get commit tree")?;
// These can get rather large, so be generous
let mut buffer = Vec::with_capacity(32 * 1024);
for krate in krates {
// cargo always normalizes paths to lowercase
let lkrate = krate.to_lowercase();
let mut rel_path = crate::cargo::get_crate_prefix(&lkrate);
rel_path.push('/');
rel_path.push_str(&lkrate);
let path = &Path::new(&rel_path);
buffer.clear();
{
let write_cache = tracing::span!(tracing::Level::DEBUG, "summary", %krate);
let _s = write_cache.enter();
match write_summary(path, &repo, &tree, head_commit_str.as_bytes(), &mut buffer) {
Ok(num_versions) => tracing::debug!("wrote entries for {} versions", num_versions),
Err(e) => {
warn!("unable to create cache entry for crate: {:#}", e);
continue;
}
}
}
let cache_path = cache.join(rel_path);
if let Err(e) = std::fs::create_dir_all(cache_path.parent().unwrap()) {
warn!(
"failed to create parent .cache directories for crate '{}': {:#}",
krate, e
);
continue;
}
if let Err(e) = std::fs::write(&cache_path, &buffer) {
warn!(
"failed to write .cache entry for crate '{}': {:#}",
krate, e
);
}
}
Ok(())
}
fn write_summary<'blob>(
path: &Path,
repo: &'blob git2::Repository,
tree: &git2::Tree<'blob>,
version: &[u8],
buffer: &mut Vec<u8>,
) -> Result<usize, Error> {
let entry = tree
.get_path(path)
.context("failed to get entry for path")?;
let object = entry
.to_object(repo)
.context("failed to get object for entry")?;
let blob = object.as_blob().context("object is not a blob")?;
// Writes the binary summary for the crate to a buffer, see
// src/cargo/sources/registry/index.rs for details
const CURRENT_CACHE_VERSION: u8 = 1;
buffer.push(CURRENT_CACHE_VERSION);
buffer.extend_from_slice(version);
buffer.push(0);
let mut version_count = 0;
for (version, data) in iter_index_entries(blob.content()) {
buffer.extend_from_slice(version);
buffer.push(0);
buffer.extend_from_slice(data);
buffer.push(0);
version_count += 1;
}
Ok(version_count)
}
fn iter_index_entries(blob: &[u8]) -> impl Iterator<Item = (&[u8], &[u8])> {
fn split_blob(haystack: &[u8]) -> impl Iterator<Item = &[u8]> {
struct Split<'a> {
haystack: &'a [u8],
}
impl<'a> Iterator for Split<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<&'a [u8]> {
if self.haystack.is_empty() {
return None;
}
let (ret, remaining) = match memchr::memchr(b'\n', self.haystack) {
Some(pos) => (&self.haystack[..pos], &self.haystack[pos + 1..]),
None => (self.haystack, &[][..]),
};
self.haystack = remaining;
Some(ret)
}
}
Split { haystack }
}
split_blob(blob).filter_map(|line| {
std::str::from_utf8(line).ok().and_then(|lstr| {
// We need to get the version, as each entry in the .cache
// entry is a tuple of the version and the summary
lstr.find("\"vers\":")
.map(|ind| ind + 7)
.and_then(|ind| lstr[ind..].find('"').map(|bind| ind + bind + 1))
.and_then(|bind| {
lstr[bind..]
.find('"')
.map(|eind| (&line[bind..bind + eind], line))
})
})
})
}
#[cfg(test)]
mod test {
use super::iter_index_entries;
#[test]
fn parses_unpretty() {
const BLOB: &[u8] = include_bytes!("../tests/unpretty-wasi");
let expected = [
"0.0.0",
"0.3.0",
"0.4.0",
"0.5.0",
"0.6.0",
"0.7.0",
"0.9.0+wasi-snapshot-preview1",
"0.10.0+wasi-snapshot-preview1",
];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
#[test]
fn parses_pretty() {
const BLOB: &[u8] = include_bytes!("../tests/pretty-crate");
let expected = ["0.2.0", "0.3.0", "0.3.1", "0.4.0", "0.5.0"];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
}
| {
// We don't bother to suport older versions of cargo that don't support
// bare checkouts of registry indexes, as that has been in since early 2017
// See https://github.com/rust-lang/cargo/blob/0e38712d4d7b346747bf91fb26cce8df6934e178/src/cargo/sources/registry/remote.rs#L61
// for details on why cargo still does what it does
let temp_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
//init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let url = url.as_str().to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &url, &mut |mut opts| {
repo.remote_anonymous(&url)?
.fetch(
&[
"refs/heads/master:refs/remotes/origin/master",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
let write_cache = tracing::span!(tracing::Level::DEBUG, "write-cache-entries",);
write_cache.in_scope(|| {
if let Err(e) = write_cache_entries(repo, krates) {
error!("Failed to write all .cache entries: {:#}", e);
}
});
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
// We also write a `.last-updated` file just like cargo so that cargo knows
// the timestamp of the fetch
std::fs::File::create(temp_dir.path().join(".last-updated"))
.context("failed to create .last-updated")?;
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarball"))
.await
} | identifier_body |
fetch.rs | use crate::{cargo::Source, util, Krate};
use anyhow::{Context, Error};
use bytes::Bytes;
use reqwest::Client;
use std::path::Path;
use tracing::{error, warn};
use tracing_futures::Instrument;
pub(crate) enum KrateSource {
Registry(Bytes),
Git(crate::git::GitSource),
}
impl KrateSource {
pub(crate) fn len(&self) -> usize {
match self {
Self::Registry(bytes) => bytes.len(),
Self::Git(gs) => gs.db.len() + gs.checkout.as_ref().map(|s| s.len()).unwrap_or(0),
}
}
}
pub(crate) async fn | (client: &Client, krate: &Krate) -> Result<KrateSource, Error> {
async {
match &krate.source {
Source::Git { url, rev, .. } => via_git(&url.clone(), rev).await.map(KrateSource::Git),
Source::Registry { registry, chksum } => {
let url = registry.download_url(krate);
let response = client.get(&url).send().await?.error_for_status()?;
let res = util::convert_response(response).await?;
let content = res.into_body();
util::validate_checksum(&content, &chksum)?;
Ok(KrateSource::Registry(content))
}
}
}
.instrument(tracing::debug_span!("fetch"))
.await
}
pub async fn via_git(url: &url::Url, rev: &str) -> Result<crate::git::GitSource, Error> {
// Create a temporary directory to fetch the repo into
let temp_dir = tempfile::tempdir()?;
// Create another temporary directory where we *may* checkout submodules into
let submodule_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let fetch_url = url.as_str().to_owned();
let fetch_rev = rev.to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &fetch_url, &mut |mut opts| {
opts.download_tags(git2::AutotagOption::All);
repo.remote_anonymous(&fetch_url)?
.fetch(
&[
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
// Ensure that the repo actually contains the revision we need
repo.revparse_single(&fetch_rev)
.with_context(|| format!("{} doesn't contain rev '{}'", fetch_url, fetch_rev))?;
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
let fetch_rev = rev.to_owned();
let temp_db_path = temp_dir.path().to_owned();
let checkout = tokio::task::spawn(async move {
match crate::git::prepare_submodules(
temp_db_path,
submodule_dir.path().to_owned(),
fetch_rev.clone(),
)
.instrument(tracing::debug_span!("submodule checkout"))
.await
{
Ok(_) => {
util::pack_tar(submodule_dir.path())
.instrument(tracing::debug_span!("tarballing checkout", rev = %fetch_rev))
.await
}
Err(e) => Err(e),
}
});
let (db, checkout) = tokio::join!(
async {
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarballing db", %url, %rev))
.await
},
checkout,
);
Ok(crate::git::GitSource {
db: db?,
checkout: checkout?.ok(),
})
}
pub async fn registry(
url: &url::Url,
krates: impl Iterator<Item = String> + Send + 'static,
) -> Result<Bytes, Error> {
// We don't bother to suport older versions of cargo that don't support
// bare checkouts of registry indexes, as that has been in since early 2017
// See https://github.com/rust-lang/cargo/blob/0e38712d4d7b346747bf91fb26cce8df6934e178/src/cargo/sources/registry/remote.rs#L61
// for details on why cargo still does what it does
let temp_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
//init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let url = url.as_str().to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &url, &mut |mut opts| {
repo.remote_anonymous(&url)?
.fetch(
&[
"refs/heads/master:refs/remotes/origin/master",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
let write_cache = tracing::span!(tracing::Level::DEBUG, "write-cache-entries",);
write_cache.in_scope(|| {
if let Err(e) = write_cache_entries(repo, krates) {
error!("Failed to write all .cache entries: {:#}", e);
}
});
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
// We also write a `.last-updated` file just like cargo so that cargo knows
// the timestamp of the fetch
std::fs::File::create(temp_dir.path().join(".last-updated"))
.context("failed to create .last-updated")?;
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarball"))
.await
}
/// Writes .cache entries in the registry's directory for all of the specified
/// crates. Cargo will write these entries itself if they don't exist the first
/// time it tries to access the crate's metadata, but this noticeably increases
/// initial fetch times. (see src/cargo/sources/registry/index.rs)
fn write_cache_entries(
repo: git2::Repository,
krates: impl Iterator<Item = String>,
) -> Result<(), Error> {
// the path to the repository itself for bare repositories.
let cache = if repo.is_bare() {
repo.path().join(".cache")
} else {
repo.path().parent().unwrap().join(".cache")
};
std::fs::create_dir_all(&cache)?;
// Every .cache entry encodes the sha1 it was created at in the beginning
// so that cargo knows when an entry is out of date with the current HEAD
let head_commit = {
let branch = repo
.find_branch("origin/master", git2::BranchType::Remote)
.context("failed to find 'master' branch")?;
branch
.get()
.target()
.context("unable to find commit for 'master' branch")?
};
let head_commit_str = head_commit.to_string();
let tree = repo
.find_commit(head_commit)
.context("failed to find HEAD commit")?
.tree()
.context("failed to get commit tree")?;
// These can get rather large, so be generous
let mut buffer = Vec::with_capacity(32 * 1024);
for krate in krates {
// cargo always normalizes paths to lowercase
let lkrate = krate.to_lowercase();
let mut rel_path = crate::cargo::get_crate_prefix(&lkrate);
rel_path.push('/');
rel_path.push_str(&lkrate);
let path = &Path::new(&rel_path);
buffer.clear();
{
let write_cache = tracing::span!(tracing::Level::DEBUG, "summary", %krate);
let _s = write_cache.enter();
match write_summary(path, &repo, &tree, head_commit_str.as_bytes(), &mut buffer) {
Ok(num_versions) => tracing::debug!("wrote entries for {} versions", num_versions),
Err(e) => {
warn!("unable to create cache entry for crate: {:#}", e);
continue;
}
}
}
let cache_path = cache.join(rel_path);
if let Err(e) = std::fs::create_dir_all(cache_path.parent().unwrap()) {
warn!(
"failed to create parent .cache directories for crate '{}': {:#}",
krate, e
);
continue;
}
if let Err(e) = std::fs::write(&cache_path, &buffer) {
warn!(
"failed to write .cache entry for crate '{}': {:#}",
krate, e
);
}
}
Ok(())
}
fn write_summary<'blob>(
path: &Path,
repo: &'blob git2::Repository,
tree: &git2::Tree<'blob>,
version: &[u8],
buffer: &mut Vec<u8>,
) -> Result<usize, Error> {
let entry = tree
.get_path(path)
.context("failed to get entry for path")?;
let object = entry
.to_object(repo)
.context("failed to get object for entry")?;
let blob = object.as_blob().context("object is not a blob")?;
// Writes the binary summary for the crate to a buffer, see
// src/cargo/sources/registry/index.rs for details
const CURRENT_CACHE_VERSION: u8 = 1;
buffer.push(CURRENT_CACHE_VERSION);
buffer.extend_from_slice(version);
buffer.push(0);
let mut version_count = 0;
for (version, data) in iter_index_entries(blob.content()) {
buffer.extend_from_slice(version);
buffer.push(0);
buffer.extend_from_slice(data);
buffer.push(0);
version_count += 1;
}
Ok(version_count)
}
fn iter_index_entries(blob: &[u8]) -> impl Iterator<Item = (&[u8], &[u8])> {
fn split_blob(haystack: &[u8]) -> impl Iterator<Item = &[u8]> {
struct Split<'a> {
haystack: &'a [u8],
}
impl<'a> Iterator for Split<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<&'a [u8]> {
if self.haystack.is_empty() {
return None;
}
let (ret, remaining) = match memchr::memchr(b'\n', self.haystack) {
Some(pos) => (&self.haystack[..pos], &self.haystack[pos + 1..]),
None => (self.haystack, &[][..]),
};
self.haystack = remaining;
Some(ret)
}
}
Split { haystack }
}
split_blob(blob).filter_map(|line| {
std::str::from_utf8(line).ok().and_then(|lstr| {
// We need to get the version, as each entry in the .cache
// entry is a tuple of the version and the summary
lstr.find("\"vers\":")
.map(|ind| ind + 7)
.and_then(|ind| lstr[ind..].find('"').map(|bind| ind + bind + 1))
.and_then(|bind| {
lstr[bind..]
.find('"')
.map(|eind| (&line[bind..bind + eind], line))
})
})
})
}
#[cfg(test)]
mod test {
use super::iter_index_entries;
#[test]
fn parses_unpretty() {
const BLOB: &[u8] = include_bytes!("../tests/unpretty-wasi");
let expected = [
"0.0.0",
"0.3.0",
"0.4.0",
"0.5.0",
"0.6.0",
"0.7.0",
"0.9.0+wasi-snapshot-preview1",
"0.10.0+wasi-snapshot-preview1",
];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
#[test]
fn parses_pretty() {
const BLOB: &[u8] = include_bytes!("../tests/pretty-crate");
let expected = ["0.2.0", "0.3.0", "0.3.1", "0.4.0", "0.5.0"];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
}
| from_registry | identifier_name |
repooler.py | #!/usr/bin/env python2.7
import couchdb
import re
import math
from collections import defaultdict, Counter, OrderedDict
import unicodedata
import csv
import copy
import click
from time import time
from datetime import datetime
from genologics.config import BASEURI, USERNAME, PASSWORD
from genologics.lims import Lims
from genologics.entities import Process
#Assumes ind. sample conc measurements have failed. As such it relies on changing relative volume on already normalized samples and structure
#Structure are retained as conc measurements failure means there's no way to know conc. delta between samples from seperate poolss
def connection():
user = ''
pw = ''
couch = couchdb.Server('http://' + user + ':' + pw + '@tools.scilifelab.se:5984')
return couch
#Fetches the structure of a project
def proj_struct(couch, project, target_clusters):
db = couch['x_flowcells']
view = db.view('names/project_ids_list')
fc_track = defaultdict(set)
#Adds flowcells to ALL projects. Due to intractions its easier to just get FCs for ALL projects
for rec in view.rows:
fc = ''.join(rec.key)
fc = unicodedata.normalize('NFKD', fc).encode('ascii','ignore')
id = ''.join(rec.id)
id = unicodedata.normalize('NFKD', id).encode('ascii','ignore')
for projs in rec.value:
projs = ''.join(projs)
projs = unicodedata.normalize('NFKD', projs).encode('ascii','ignore')
if fc_track[projs] == set([]):
fc_track[projs] = dict()
fc_track[projs][fc] = id
#Adds lanes and samples to flowcells, includes samples from other projects if they share lane
if fc_track[project] == set([]):
raise Exception('Error: Project not logged in x_flowcells database!')
for fc, id in fc_track[project].items():
try:
entry = db[id]['illumina']
except KeyError:
print "Error: Illumina table for db entry" , id, "doesn't exist!"
entry = db[id]['illumina']['Demultiplex_Stats']['Barcode_lane_statistics']
for index in xrange(0, len(entry)):
lane = entry[index]['Lane']
sample = entry[index]['Sample']
if 'Clusters' in entry[index]:
clusters = entry[index]['Clusters']
else:
clusters = entry[index]['PF Clusters']
clusters = int(re.sub(r",", "", clusters))
if not isinstance(fc_track[project][fc], dict):
fc_track[project][fc] = dict()
if not lane in fc_track[project][fc]:
fc_track[project][fc][lane] = dict()
#Only counts samples for the given project, other samples are "auto-filled"
if project in sample:
fc_track[project][fc][lane][sample] = clusters
else:
fc_track[project][fc][lane][sample] = target_clusters
#Removes any lanes that don't have any part project samples
for fc, lanes in fc_track[project].items():
for lane,sample in lanes.items():
if not any(project in s for s in sample.keys()):
del fc_track[project][fc][lane]
return fc_track[project]
def parse_indata(struct, target_clusters):
clusters_rem = dict()
clusters_expr = dict()
lane_maps = dict()
counter = 1
#Takes in data and finds unique lane structure, clusters per sample and lane division
#Output could probably be sent as a nested hash.
#Clusters_rem AND clusters_expr may seem redundant, but it saves some calculatin
for fc, lanes in struct.items():
for lane, samples in lanes.items():
#Concatinate structure into a set of unique structures
mapping = sorted(samples.keys(), reverse=True)
if not mapping in lane_maps.values():
lane_maps[counter] = mapping
counter +=1
#Calculate clusters read per sample
for sample, value in samples.items():
if not sample in clusters_rem:
clusters_rem[sample] = target_clusters
clusters_expr[sample] = 0
clusters_rem[sample] -= value
clusters_expr[sample] += value
return [lane_maps, clusters_rem, clusters_expr]
#Creates a set where every sample uniquely appears once and only once
def simple_unique_set(lane_maps):
unique_lane_maps = dict()
for keyz, valz in lane_maps.items():
#Fetch what lanes inclusion of given lane excludes
excluded = list()
for sample in valz:
for k, v in lane_maps.items():
for dupe in v:
if dupe == sample and keyz != k and sample != 'Undetermined':
excluded.append(k)
break
#Check that none of the excluded lanes have uniquely present samples
acceptable = True
for key in excluded:
total_duplicates = list()
for values in lane_maps[key]:
duplicate = 0
for k, v in lane_maps.items():
for dupe in v:
if dupe == values and k != key and sample != 'Undetermined':
duplicate +=1
break
total_duplicates.append(duplicate)
if 0 in total_duplicates:
acceptable = False
break
if acceptable:
#Check that the lane doesn't have sample dupes in the accepted set already
for entries in valz:
for kuyz, vulz in unique_lane_maps.items():
for things in vulz:
if things == entries and entries != 'Undetermined':
acceptable = False
break
if acceptable:
unique_lane_maps[keyz] = valz
lane_maps = unique_lane_maps
#ALL SAMPLES PRESENT CHECK
# summap = []
# for k in lane_maps.keys():
# summap += lane_maps[k]
# print len(set(summap))
validate_template_struct(lane_maps)
def aggregator(lane_maps,clusters_rem,clusters_per_lane):
#Iterate
#Find all samples that are also expressed in another struct
#Sort those structs by duplication
#Fill them to floor(dups); unless mod % 1 > some_number; then ceil(dups)
#Note the remaining necessary
#End
#Use the remaining structs
#Ceil(dups) those babies
raise Exception('Error: Not yet implemented!')
#Gives how many percent of the lane should be allocated to a specific sample
def sample_distributor(lane_maps, clusters_rem, clusters_per_lane):
ideal_ratios = dict()
req_lanes = dict()
for index in lane_maps:
summ = 0
for entry in lane_maps[index]:
if clusters_rem[entry] > 0:
summ += clusters_rem[entry]
for entry in lane_maps[index]:
if not index in ideal_ratios:
ideal_ratios[index] = list()
if clusters_rem[entry] > 0:
ideal_ratios[index].append(clusters_rem[entry]/float(summ))
else:
ideal_ratios[index].append(0.0)
#Minimal number of required lanes per pool
req_lanes[index] = summ/float(clusters_per_lane)
#Have to be rounded up, rounding down when only using duplicates makes no sense
total_lanes = map(math.ceil, req_lanes.values())
return [ideal_ratios, req_lanes, total_lanes]
#Crude way to check that no samples are in different TYPES of lanes
def validate_template_struct(lane_maps):
tempList = list()
for k, v in lane_maps.items():
for index in xrange(1,len(v)):
if not v[index] == 'Undetermined':
tempList.append(v[index])
counter = Counter(tempList)
for values in counter.itervalues():
if values > 1:
raise Exception('Error: This app does NOT handle situations where a sample'
'is present in lanes/well with differing structure!')
#Corrects volumes since conc is non-constant
#Also normalizes the numbers
#Finally translates float -> int without underexpressing anything
def correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes):
# Since some samples are strong and some weaksauce
# 10% in ideal_ratios does not mean 10% of lane volume
# As such, ideal_ratios need to be divided by actual_reads/expected_reads
# Ignores undetermined clusters in calculation
# Assumes sample conc cant be altered; aka only volume is modified
for ind in xrange(1, len(lane_maps.keys())+1):
#Bases w/o sample are not expected
if len(lane_maps[ind]) != 1:
exp = 1/float(len(lane_maps[ind])-1)
else:
exp = 1
laneTypeExpr = 0
counter = 0
for sample in lane_maps[ind]:
if not sample == 'Undetermined':
laneTypeExpr += clusters_expr[sample]
for sample in lane_maps[ind]:
act = clusters_expr[sample]/float(laneTypeExpr)
ideal_ratios[ind][counter] = ideal_ratios[ind][counter]*(exp/act)
counter += 1
#Normalizes numbers
for index in xrange(1, len(ideal_ratios.keys())+1):
curSum = sum(ideal_ratios[index])
for sample in xrange(0, len(ideal_ratios[index])):
if curSum == 0:
ideal_ratios[index][sample] = 0
else:
ideal_ratios[index][sample] = (ideal_ratios[index][sample]/curSum)*100
# Iteratively rounds to whole percent (min pipette for volume) to reach 100%
# ideal_ratio * req_lanes.values() = needed
# acc_ratio * total_lanes = current
# means a sample can take any whole number between the two
acc_ratios = copy.deepcopy(ideal_ratios)
for index in xrange(1, len(ideal_ratios.keys())+1):
for sample in xrange(0, len(ideal_ratios[index])):
acc_ratios[index][sample] = math.ceil(ideal_ratios[index][sample])
if sum(acc_ratios[index]) == 100:
break
else:
while sum(acc_ratios[index]) > 100:
stuck = True
for sample in xrange(1, len(ideal_ratios[index])):
need = ideal_ratios[index][sample]*req_lanes.values()[index-1]
cur = (acc_ratios[index][sample] - 1)*total_lanes[index-1]
if sum(acc_ratios[index]) > 100 and cur >= need:
acc_ratios[index][sample] -= 1
stuck = False
if sum(acc_ratios[index])== 100:
break
if(stuck):
total_lanes[index-1] += 1
return acc_ratios
def generate_output(project, destid, total_lanes, req_lanes, lane_maps, acc_ratios):
#Gathers the container id and well name for all samples in project
#Cred to Denis for providing a base epp
location = dict()
lims = Lims(BASEURI, USERNAME, PASSWORD)
allProjects = lims.get_projects()
for proj in allProjects:
if proj.id == project:
projName = proj.name
break
#All normalization processes for project
norms=['Library Normalization (MiSeq) 4.0', 'Library Normalization (Illumina SBS) 4.0','Library Normalization (HiSeq X) 1.0']
pros=lims.get_processes(type=norms, projectname=projName)
#For all processes
for p in pros:
#For all artifacts in process
for o in p.all_outputs():
#If artifact is analyte type and has project name in sample
if o.type=="Analyte" and project in o.name:
location[o.name.split()[0]] = list()
location[o.name.split()[0]].append(o.location[0].id)
location[o.name.split()[0]].append(o.location[1])
#PRINT section
#Print stats including duplicates
timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H:%M')
sumName = projName, "_summary_", timestamp,".txt"
sumName = ''.join(sumName)
with open(sumName, "w") as summary:
if sum(req_lanes.values()) != 0:
OPT = sum(total_lanes)/sum(req_lanes.values())
else:
OPT = 0
output = "Ideal lanes (same schema): ", str(sum(req_lanes.values())) , ", Total lanes: ", str(sum(total_lanes)), ", OPT: ", str(round(OPT,3)),'\n'
output = ''.join(output)
summary.write( output )
output = "Unique pools: ", str(len(total_lanes)), ", Average pool duplication: ", str(sum(total_lanes)/float(len(total_lanes))) ,'\n'
output = ''.join(output)
summary.write( output )
bin = 0
for index in xrange(1, len(lane_maps)+1):
bin += 1
summary.write('\n')
output = "Wells ", str(bin) , '-' , str(bin+int(total_lanes[index-1])-1),':','\n'
output = ''.join(output)
summary.write( output )
bin += int(total_lanes[index-1]-1)
for counter in xrange(1, len(lane_maps[index])):
output = str(lane_maps[index][counter]),' ', str(acc_ratios[index][counter]), "%",'\n'
output = ''.join(output)
summary.write( output )
#Creates csv
name = projName,"_repool_",timestamp,".csv"
name = ''.join(name)
wells = ['Empty','A','B','C','D','E','F','G','H']
#Index 0 is number, index 1 is Letter
wellIndex = [1, 1]
destNo = 0
with open(name, 'w') as csvfile:
writer = csv.writer(csvfile)
for index in xrange(1, len(lane_maps)+1):
for dupes in xrange(1, int(total_lanes[index-1])+1):
if lane_maps[index] == 0:
raise Exception('Error: Project not logged in x_flowcells database!')
for counter in xrange(1, len(lane_maps[index])):
#<source plate ID>,<source well>,<volume>,<destination plate ID>,<destination well>
#Destination well 200 microL, minimum pipette 2 microL; acc_ratios multiplied by 2.
sample = lane_maps[index][counter]
position = wells[wellIndex[1]],':',str(wellIndex[0])
position = ''.join(position)
try:
output = location[sample][0],location[sample][1],str(int(acc_ratios[index][counter]*2)),str(destid[destNo]),position
except KeyError:
print "Error: Samples incorrectly parsed into database, thus causing sample name conflicts!"
if not acc_ratios[index][counter] == 0:
writer.writerow(output)
#Increment wellsindex
if not acc_ratios[index][counter] == 0:
if not wellIndex[1] >= 8:
wellIndex[1] += 1
else:
wellIndex[1] = 1
if not wellIndex[0] >= 12:
wellIndex[0] += 1
else:
wellIndex[0] = 1
destNo += 1
try:
destid[destNo]
except IndexError:
print "Critical error; not enough destination plates provided"
@click.command()
@click.option('--project_id', required=True,help='REQUIRED: ID of project to repool. Examples:P2652, P1312 etc.')
@click.option('--dest_plate_list', default=['dp_1','dp_2','dp_3','dp_4','dp_5'],
help='List of destination plates for the robot\'s csv file. Include too many rather than too few; excess will be unused Default:[dp_1,dp_2,dp_3,dp_4,dp_5]')
@click.option('--target_clusters', default=320*1000000, help='Threshold of clusters per sample. \nDefault:320*1000000')
@click.option('--clusters_per_lane', default=380*1000000, help='Expected clusters generated by a single lane/well. \nDefault:380*1000000')
@click.option('--allow_non_dupl_struct', is_flag=True, help='Allow for samples to be present in different types of flowcells')
def main(target_clusters, clusters_per_lane, project_id, dest_plate_list, allow_non_dupl_struct):
"""Application that calculates samples under threshold for a project, then calculate the optimal composition for reaching the threshold
without altering concentrations nor the structure of the pools. Outputs both a summary as well as a functional csv file."""
couch = connection()
structure = proj_struct(couch, project_id, target_clusters)
[lane_maps, clusters_rem, clusters_expr] = parse_indata(structure, target_clusters)
if allow_non_dupl_struct:
aggregator(lane_maps,clusters_rem,clusters_per_lane)
else:
simple_unique_set(lane_maps)
[ideal_ratios, req_lanes, total_lanes] = sample_distributor(lane_maps, clusters_rem, clusters_per_lane)
acc_ratios = correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes)
generate_output(project_id, dest_plate_list, total_lanes, req_lanes, lane_maps, acc_ratios)
if __name__ == '__main__':
| main() | conditional_block | |
repooler.py | #!/usr/bin/env python2.7
import couchdb
import re
import math
from collections import defaultdict, Counter, OrderedDict
import unicodedata
import csv
import copy
import click
from time import time
from datetime import datetime
from genologics.config import BASEURI, USERNAME, PASSWORD
from genologics.lims import Lims
from genologics.entities import Process
#Assumes ind. sample conc measurements have failed. As such it relies on changing relative volume on already normalized samples and structure
#Structure are retained as conc measurements failure means there's no way to know conc. delta between samples from seperate poolss
def | ():
user = ''
pw = ''
couch = couchdb.Server('http://' + user + ':' + pw + '@tools.scilifelab.se:5984')
return couch
#Fetches the structure of a project
def proj_struct(couch, project, target_clusters):
db = couch['x_flowcells']
view = db.view('names/project_ids_list')
fc_track = defaultdict(set)
#Adds flowcells to ALL projects. Due to intractions its easier to just get FCs for ALL projects
for rec in view.rows:
fc = ''.join(rec.key)
fc = unicodedata.normalize('NFKD', fc).encode('ascii','ignore')
id = ''.join(rec.id)
id = unicodedata.normalize('NFKD', id).encode('ascii','ignore')
for projs in rec.value:
projs = ''.join(projs)
projs = unicodedata.normalize('NFKD', projs).encode('ascii','ignore')
if fc_track[projs] == set([]):
fc_track[projs] = dict()
fc_track[projs][fc] = id
#Adds lanes and samples to flowcells, includes samples from other projects if they share lane
if fc_track[project] == set([]):
raise Exception('Error: Project not logged in x_flowcells database!')
for fc, id in fc_track[project].items():
try:
entry = db[id]['illumina']
except KeyError:
print "Error: Illumina table for db entry" , id, "doesn't exist!"
entry = db[id]['illumina']['Demultiplex_Stats']['Barcode_lane_statistics']
for index in xrange(0, len(entry)):
lane = entry[index]['Lane']
sample = entry[index]['Sample']
if 'Clusters' in entry[index]:
clusters = entry[index]['Clusters']
else:
clusters = entry[index]['PF Clusters']
clusters = int(re.sub(r",", "", clusters))
if not isinstance(fc_track[project][fc], dict):
fc_track[project][fc] = dict()
if not lane in fc_track[project][fc]:
fc_track[project][fc][lane] = dict()
#Only counts samples for the given project, other samples are "auto-filled"
if project in sample:
fc_track[project][fc][lane][sample] = clusters
else:
fc_track[project][fc][lane][sample] = target_clusters
#Removes any lanes that don't have any part project samples
for fc, lanes in fc_track[project].items():
for lane,sample in lanes.items():
if not any(project in s for s in sample.keys()):
del fc_track[project][fc][lane]
return fc_track[project]
def parse_indata(struct, target_clusters):
clusters_rem = dict()
clusters_expr = dict()
lane_maps = dict()
counter = 1
#Takes in data and finds unique lane structure, clusters per sample and lane division
#Output could probably be sent as a nested hash.
#Clusters_rem AND clusters_expr may seem redundant, but it saves some calculatin
for fc, lanes in struct.items():
for lane, samples in lanes.items():
#Concatinate structure into a set of unique structures
mapping = sorted(samples.keys(), reverse=True)
if not mapping in lane_maps.values():
lane_maps[counter] = mapping
counter +=1
#Calculate clusters read per sample
for sample, value in samples.items():
if not sample in clusters_rem:
clusters_rem[sample] = target_clusters
clusters_expr[sample] = 0
clusters_rem[sample] -= value
clusters_expr[sample] += value
return [lane_maps, clusters_rem, clusters_expr]
#Creates a set where every sample uniquely appears once and only once
def simple_unique_set(lane_maps):
unique_lane_maps = dict()
for keyz, valz in lane_maps.items():
#Fetch what lanes inclusion of given lane excludes
excluded = list()
for sample in valz:
for k, v in lane_maps.items():
for dupe in v:
if dupe == sample and keyz != k and sample != 'Undetermined':
excluded.append(k)
break
#Check that none of the excluded lanes have uniquely present samples
acceptable = True
for key in excluded:
total_duplicates = list()
for values in lane_maps[key]:
duplicate = 0
for k, v in lane_maps.items():
for dupe in v:
if dupe == values and k != key and sample != 'Undetermined':
duplicate +=1
break
total_duplicates.append(duplicate)
if 0 in total_duplicates:
acceptable = False
break
if acceptable:
#Check that the lane doesn't have sample dupes in the accepted set already
for entries in valz:
for kuyz, vulz in unique_lane_maps.items():
for things in vulz:
if things == entries and entries != 'Undetermined':
acceptable = False
break
if acceptable:
unique_lane_maps[keyz] = valz
lane_maps = unique_lane_maps
#ALL SAMPLES PRESENT CHECK
# summap = []
# for k in lane_maps.keys():
# summap += lane_maps[k]
# print len(set(summap))
validate_template_struct(lane_maps)
def aggregator(lane_maps,clusters_rem,clusters_per_lane):
#Iterate
#Find all samples that are also expressed in another struct
#Sort those structs by duplication
#Fill them to floor(dups); unless mod % 1 > some_number; then ceil(dups)
#Note the remaining necessary
#End
#Use the remaining structs
#Ceil(dups) those babies
raise Exception('Error: Not yet implemented!')
#Gives how many percent of the lane should be allocated to a specific sample
def sample_distributor(lane_maps, clusters_rem, clusters_per_lane):
ideal_ratios = dict()
req_lanes = dict()
for index in lane_maps:
summ = 0
for entry in lane_maps[index]:
if clusters_rem[entry] > 0:
summ += clusters_rem[entry]
for entry in lane_maps[index]:
if not index in ideal_ratios:
ideal_ratios[index] = list()
if clusters_rem[entry] > 0:
ideal_ratios[index].append(clusters_rem[entry]/float(summ))
else:
ideal_ratios[index].append(0.0)
#Minimal number of required lanes per pool
req_lanes[index] = summ/float(clusters_per_lane)
#Have to be rounded up, rounding down when only using duplicates makes no sense
total_lanes = map(math.ceil, req_lanes.values())
return [ideal_ratios, req_lanes, total_lanes]
#Crude way to check that no samples are in different TYPES of lanes
def validate_template_struct(lane_maps):
tempList = list()
for k, v in lane_maps.items():
for index in xrange(1,len(v)):
if not v[index] == 'Undetermined':
tempList.append(v[index])
counter = Counter(tempList)
for values in counter.itervalues():
if values > 1:
raise Exception('Error: This app does NOT handle situations where a sample'
'is present in lanes/well with differing structure!')
#Corrects volumes since conc is non-constant
#Also normalizes the numbers
#Finally translates float -> int without underexpressing anything
def correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes):
# Since some samples are strong and some weaksauce
# 10% in ideal_ratios does not mean 10% of lane volume
# As such, ideal_ratios need to be divided by actual_reads/expected_reads
# Ignores undetermined clusters in calculation
# Assumes sample conc cant be altered; aka only volume is modified
for ind in xrange(1, len(lane_maps.keys())+1):
#Bases w/o sample are not expected
if len(lane_maps[ind]) != 1:
exp = 1/float(len(lane_maps[ind])-1)
else:
exp = 1
laneTypeExpr = 0
counter = 0
for sample in lane_maps[ind]:
if not sample == 'Undetermined':
laneTypeExpr += clusters_expr[sample]
for sample in lane_maps[ind]:
act = clusters_expr[sample]/float(laneTypeExpr)
ideal_ratios[ind][counter] = ideal_ratios[ind][counter]*(exp/act)
counter += 1
#Normalizes numbers
for index in xrange(1, len(ideal_ratios.keys())+1):
curSum = sum(ideal_ratios[index])
for sample in xrange(0, len(ideal_ratios[index])):
if curSum == 0:
ideal_ratios[index][sample] = 0
else:
ideal_ratios[index][sample] = (ideal_ratios[index][sample]/curSum)*100
# Iteratively rounds to whole percent (min pipette for volume) to reach 100%
# ideal_ratio * req_lanes.values() = needed
# acc_ratio * total_lanes = current
# means a sample can take any whole number between the two
acc_ratios = copy.deepcopy(ideal_ratios)
for index in xrange(1, len(ideal_ratios.keys())+1):
for sample in xrange(0, len(ideal_ratios[index])):
acc_ratios[index][sample] = math.ceil(ideal_ratios[index][sample])
if sum(acc_ratios[index]) == 100:
break
else:
while sum(acc_ratios[index]) > 100:
stuck = True
for sample in xrange(1, len(ideal_ratios[index])):
need = ideal_ratios[index][sample]*req_lanes.values()[index-1]
cur = (acc_ratios[index][sample] - 1)*total_lanes[index-1]
if sum(acc_ratios[index]) > 100 and cur >= need:
acc_ratios[index][sample] -= 1
stuck = False
if sum(acc_ratios[index])== 100:
break
if(stuck):
total_lanes[index-1] += 1
return acc_ratios
def generate_output(project, destid, total_lanes, req_lanes, lane_maps, acc_ratios):
#Gathers the container id and well name for all samples in project
#Cred to Denis for providing a base epp
location = dict()
lims = Lims(BASEURI, USERNAME, PASSWORD)
allProjects = lims.get_projects()
for proj in allProjects:
if proj.id == project:
projName = proj.name
break
#All normalization processes for project
norms=['Library Normalization (MiSeq) 4.0', 'Library Normalization (Illumina SBS) 4.0','Library Normalization (HiSeq X) 1.0']
pros=lims.get_processes(type=norms, projectname=projName)
#For all processes
for p in pros:
#For all artifacts in process
for o in p.all_outputs():
#If artifact is analyte type and has project name in sample
if o.type=="Analyte" and project in o.name:
location[o.name.split()[0]] = list()
location[o.name.split()[0]].append(o.location[0].id)
location[o.name.split()[0]].append(o.location[1])
#PRINT section
#Print stats including duplicates
timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H:%M')
sumName = projName, "_summary_", timestamp,".txt"
sumName = ''.join(sumName)
with open(sumName, "w") as summary:
if sum(req_lanes.values()) != 0:
OPT = sum(total_lanes)/sum(req_lanes.values())
else:
OPT = 0
output = "Ideal lanes (same schema): ", str(sum(req_lanes.values())) , ", Total lanes: ", str(sum(total_lanes)), ", OPT: ", str(round(OPT,3)),'\n'
output = ''.join(output)
summary.write( output )
output = "Unique pools: ", str(len(total_lanes)), ", Average pool duplication: ", str(sum(total_lanes)/float(len(total_lanes))) ,'\n'
output = ''.join(output)
summary.write( output )
bin = 0
for index in xrange(1, len(lane_maps)+1):
bin += 1
summary.write('\n')
output = "Wells ", str(bin) , '-' , str(bin+int(total_lanes[index-1])-1),':','\n'
output = ''.join(output)
summary.write( output )
bin += int(total_lanes[index-1]-1)
for counter in xrange(1, len(lane_maps[index])):
output = str(lane_maps[index][counter]),' ', str(acc_ratios[index][counter]), "%",'\n'
output = ''.join(output)
summary.write( output )
#Creates csv
name = projName,"_repool_",timestamp,".csv"
name = ''.join(name)
wells = ['Empty','A','B','C','D','E','F','G','H']
#Index 0 is number, index 1 is Letter
wellIndex = [1, 1]
destNo = 0
with open(name, 'w') as csvfile:
writer = csv.writer(csvfile)
for index in xrange(1, len(lane_maps)+1):
for dupes in xrange(1, int(total_lanes[index-1])+1):
if lane_maps[index] == 0:
raise Exception('Error: Project not logged in x_flowcells database!')
for counter in xrange(1, len(lane_maps[index])):
#<source plate ID>,<source well>,<volume>,<destination plate ID>,<destination well>
#Destination well 200 microL, minimum pipette 2 microL; acc_ratios multiplied by 2.
sample = lane_maps[index][counter]
position = wells[wellIndex[1]],':',str(wellIndex[0])
position = ''.join(position)
try:
output = location[sample][0],location[sample][1],str(int(acc_ratios[index][counter]*2)),str(destid[destNo]),position
except KeyError:
print "Error: Samples incorrectly parsed into database, thus causing sample name conflicts!"
if not acc_ratios[index][counter] == 0:
writer.writerow(output)
#Increment wellsindex
if not acc_ratios[index][counter] == 0:
if not wellIndex[1] >= 8:
wellIndex[1] += 1
else:
wellIndex[1] = 1
if not wellIndex[0] >= 12:
wellIndex[0] += 1
else:
wellIndex[0] = 1
destNo += 1
try:
destid[destNo]
except IndexError:
print "Critical error; not enough destination plates provided"
@click.command()
@click.option('--project_id', required=True,help='REQUIRED: ID of project to repool. Examples:P2652, P1312 etc.')
@click.option('--dest_plate_list', default=['dp_1','dp_2','dp_3','dp_4','dp_5'],
help='List of destination plates for the robot\'s csv file. Include too many rather than too few; excess will be unused Default:[dp_1,dp_2,dp_3,dp_4,dp_5]')
@click.option('--target_clusters', default=320*1000000, help='Threshold of clusters per sample. \nDefault:320*1000000')
@click.option('--clusters_per_lane', default=380*1000000, help='Expected clusters generated by a single lane/well. \nDefault:380*1000000')
@click.option('--allow_non_dupl_struct', is_flag=True, help='Allow for samples to be present in different types of flowcells')
def main(target_clusters, clusters_per_lane, project_id, dest_plate_list, allow_non_dupl_struct):
"""Application that calculates samples under threshold for a project, then calculate the optimal composition for reaching the threshold
without altering concentrations nor the structure of the pools. Outputs both a summary as well as a functional csv file."""
couch = connection()
structure = proj_struct(couch, project_id, target_clusters)
[lane_maps, clusters_rem, clusters_expr] = parse_indata(structure, target_clusters)
if allow_non_dupl_struct:
aggregator(lane_maps,clusters_rem,clusters_per_lane)
else:
simple_unique_set(lane_maps)
[ideal_ratios, req_lanes, total_lanes] = sample_distributor(lane_maps, clusters_rem, clusters_per_lane)
acc_ratios = correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes)
generate_output(project_id, dest_plate_list, total_lanes, req_lanes, lane_maps, acc_ratios)
if __name__ == '__main__':
main() | connection | identifier_name |
repooler.py | #!/usr/bin/env python2.7
import couchdb
import re
import math
from collections import defaultdict, Counter, OrderedDict
import unicodedata
import csv
import copy
import click
from time import time
from datetime import datetime
from genologics.config import BASEURI, USERNAME, PASSWORD
from genologics.lims import Lims
from genologics.entities import Process
#Assumes ind. sample conc measurements have failed. As such it relies on changing relative volume on already normalized samples and structure
#Structure are retained as conc measurements failure means there's no way to know conc. delta between samples from seperate poolss
def connection():
user = ''
pw = ''
couch = couchdb.Server('http://' + user + ':' + pw + '@tools.scilifelab.se:5984')
return couch
#Fetches the structure of a project
def proj_struct(couch, project, target_clusters):
db = couch['x_flowcells']
view = db.view('names/project_ids_list')
fc_track = defaultdict(set)
#Adds flowcells to ALL projects. Due to intractions its easier to just get FCs for ALL projects
for rec in view.rows:
fc = ''.join(rec.key)
fc = unicodedata.normalize('NFKD', fc).encode('ascii','ignore')
id = ''.join(rec.id)
id = unicodedata.normalize('NFKD', id).encode('ascii','ignore')
for projs in rec.value:
projs = ''.join(projs)
projs = unicodedata.normalize('NFKD', projs).encode('ascii','ignore')
if fc_track[projs] == set([]):
fc_track[projs] = dict()
fc_track[projs][fc] = id
#Adds lanes and samples to flowcells, includes samples from other projects if they share lane
if fc_track[project] == set([]):
raise Exception('Error: Project not logged in x_flowcells database!')
for fc, id in fc_track[project].items():
try:
entry = db[id]['illumina']
except KeyError:
print "Error: Illumina table for db entry" , id, "doesn't exist!"
entry = db[id]['illumina']['Demultiplex_Stats']['Barcode_lane_statistics']
for index in xrange(0, len(entry)):
lane = entry[index]['Lane']
sample = entry[index]['Sample']
if 'Clusters' in entry[index]:
clusters = entry[index]['Clusters']
else:
clusters = entry[index]['PF Clusters']
clusters = int(re.sub(r",", "", clusters))
if not isinstance(fc_track[project][fc], dict):
fc_track[project][fc] = dict()
if not lane in fc_track[project][fc]:
fc_track[project][fc][lane] = dict()
#Only counts samples for the given project, other samples are "auto-filled"
if project in sample:
fc_track[project][fc][lane][sample] = clusters
else:
fc_track[project][fc][lane][sample] = target_clusters
#Removes any lanes that don't have any part project samples
for fc, lanes in fc_track[project].items():
for lane,sample in lanes.items():
if not any(project in s for s in sample.keys()):
del fc_track[project][fc][lane]
return fc_track[project]
def parse_indata(struct, target_clusters):
clusters_rem = dict()
clusters_expr = dict()
lane_maps = dict()
counter = 1
#Takes in data and finds unique lane structure, clusters per sample and lane division
#Output could probably be sent as a nested hash.
#Clusters_rem AND clusters_expr may seem redundant, but it saves some calculatin
for fc, lanes in struct.items():
for lane, samples in lanes.items():
#Concatinate structure into a set of unique structures
mapping = sorted(samples.keys(), reverse=True)
if not mapping in lane_maps.values():
lane_maps[counter] = mapping
counter +=1
#Calculate clusters read per sample
for sample, value in samples.items():
if not sample in clusters_rem:
clusters_rem[sample] = target_clusters
clusters_expr[sample] = 0
clusters_rem[sample] -= value
clusters_expr[sample] += value
return [lane_maps, clusters_rem, clusters_expr]
#Creates a set where every sample uniquely appears once and only once
def simple_unique_set(lane_maps):
unique_lane_maps = dict()
for keyz, valz in lane_maps.items():
#Fetch what lanes inclusion of given lane excludes
excluded = list()
for sample in valz:
for k, v in lane_maps.items():
for dupe in v:
if dupe == sample and keyz != k and sample != 'Undetermined':
excluded.append(k)
break
#Check that none of the excluded lanes have uniquely present samples
acceptable = True
for key in excluded:
total_duplicates = list()
for values in lane_maps[key]:
duplicate = 0
for k, v in lane_maps.items():
for dupe in v:
if dupe == values and k != key and sample != 'Undetermined':
duplicate +=1
break
total_duplicates.append(duplicate)
if 0 in total_duplicates:
acceptable = False
break
if acceptable:
#Check that the lane doesn't have sample dupes in the accepted set already
for entries in valz:
for kuyz, vulz in unique_lane_maps.items():
for things in vulz:
if things == entries and entries != 'Undetermined':
acceptable = False
break
if acceptable:
unique_lane_maps[keyz] = valz
lane_maps = unique_lane_maps
#ALL SAMPLES PRESENT CHECK
# summap = []
# for k in lane_maps.keys():
# summap += lane_maps[k]
# print len(set(summap))
validate_template_struct(lane_maps)
def aggregator(lane_maps,clusters_rem,clusters_per_lane):
#Iterate
#Find all samples that are also expressed in another struct
#Sort those structs by duplication
#Fill them to floor(dups); unless mod % 1 > some_number; then ceil(dups)
#Note the remaining necessary
#End
#Use the remaining structs
#Ceil(dups) those babies
raise Exception('Error: Not yet implemented!')
#Gives how many percent of the lane should be allocated to a specific sample
def sample_distributor(lane_maps, clusters_rem, clusters_per_lane):
ideal_ratios = dict()
req_lanes = dict()
for index in lane_maps:
summ = 0
for entry in lane_maps[index]:
if clusters_rem[entry] > 0:
summ += clusters_rem[entry]
for entry in lane_maps[index]:
if not index in ideal_ratios:
ideal_ratios[index] = list()
if clusters_rem[entry] > 0:
ideal_ratios[index].append(clusters_rem[entry]/float(summ))
else:
ideal_ratios[index].append(0.0)
#Minimal number of required lanes per pool
req_lanes[index] = summ/float(clusters_per_lane)
#Have to be rounded up, rounding down when only using duplicates makes no sense
total_lanes = map(math.ceil, req_lanes.values())
return [ideal_ratios, req_lanes, total_lanes]
#Crude way to check that no samples are in different TYPES of lanes
def validate_template_struct(lane_maps):
|
#Corrects volumes since conc is non-constant
#Also normalizes the numbers
#Finally translates float -> int without underexpressing anything
def correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes):
# Since some samples are strong and some weaksauce
# 10% in ideal_ratios does not mean 10% of lane volume
# As such, ideal_ratios need to be divided by actual_reads/expected_reads
# Ignores undetermined clusters in calculation
# Assumes sample conc cant be altered; aka only volume is modified
for ind in xrange(1, len(lane_maps.keys())+1):
#Bases w/o sample are not expected
if len(lane_maps[ind]) != 1:
exp = 1/float(len(lane_maps[ind])-1)
else:
exp = 1
laneTypeExpr = 0
counter = 0
for sample in lane_maps[ind]:
if not sample == 'Undetermined':
laneTypeExpr += clusters_expr[sample]
for sample in lane_maps[ind]:
act = clusters_expr[sample]/float(laneTypeExpr)
ideal_ratios[ind][counter] = ideal_ratios[ind][counter]*(exp/act)
counter += 1
#Normalizes numbers
for index in xrange(1, len(ideal_ratios.keys())+1):
curSum = sum(ideal_ratios[index])
for sample in xrange(0, len(ideal_ratios[index])):
if curSum == 0:
ideal_ratios[index][sample] = 0
else:
ideal_ratios[index][sample] = (ideal_ratios[index][sample]/curSum)*100
# Iteratively rounds to whole percent (min pipette for volume) to reach 100%
# ideal_ratio * req_lanes.values() = needed
# acc_ratio * total_lanes = current
# means a sample can take any whole number between the two
acc_ratios = copy.deepcopy(ideal_ratios)
for index in xrange(1, len(ideal_ratios.keys())+1):
for sample in xrange(0, len(ideal_ratios[index])):
acc_ratios[index][sample] = math.ceil(ideal_ratios[index][sample])
if sum(acc_ratios[index]) == 100:
break
else:
while sum(acc_ratios[index]) > 100:
stuck = True
for sample in xrange(1, len(ideal_ratios[index])):
need = ideal_ratios[index][sample]*req_lanes.values()[index-1]
cur = (acc_ratios[index][sample] - 1)*total_lanes[index-1]
if sum(acc_ratios[index]) > 100 and cur >= need:
acc_ratios[index][sample] -= 1
stuck = False
if sum(acc_ratios[index])== 100:
break
if(stuck):
total_lanes[index-1] += 1
return acc_ratios
def generate_output(project, destid, total_lanes, req_lanes, lane_maps, acc_ratios):
#Gathers the container id and well name for all samples in project
#Cred to Denis for providing a base epp
location = dict()
lims = Lims(BASEURI, USERNAME, PASSWORD)
allProjects = lims.get_projects()
for proj in allProjects:
if proj.id == project:
projName = proj.name
break
#All normalization processes for project
norms=['Library Normalization (MiSeq) 4.0', 'Library Normalization (Illumina SBS) 4.0','Library Normalization (HiSeq X) 1.0']
pros=lims.get_processes(type=norms, projectname=projName)
#For all processes
for p in pros:
#For all artifacts in process
for o in p.all_outputs():
#If artifact is analyte type and has project name in sample
if o.type=="Analyte" and project in o.name:
location[o.name.split()[0]] = list()
location[o.name.split()[0]].append(o.location[0].id)
location[o.name.split()[0]].append(o.location[1])
#PRINT section
#Print stats including duplicates
timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H:%M')
sumName = projName, "_summary_", timestamp,".txt"
sumName = ''.join(sumName)
with open(sumName, "w") as summary:
if sum(req_lanes.values()) != 0:
OPT = sum(total_lanes)/sum(req_lanes.values())
else:
OPT = 0
output = "Ideal lanes (same schema): ", str(sum(req_lanes.values())) , ", Total lanes: ", str(sum(total_lanes)), ", OPT: ", str(round(OPT,3)),'\n'
output = ''.join(output)
summary.write( output )
output = "Unique pools: ", str(len(total_lanes)), ", Average pool duplication: ", str(sum(total_lanes)/float(len(total_lanes))) ,'\n'
output = ''.join(output)
summary.write( output )
bin = 0
for index in xrange(1, len(lane_maps)+1):
bin += 1
summary.write('\n')
output = "Wells ", str(bin) , '-' , str(bin+int(total_lanes[index-1])-1),':','\n'
output = ''.join(output)
summary.write( output )
bin += int(total_lanes[index-1]-1)
for counter in xrange(1, len(lane_maps[index])):
output = str(lane_maps[index][counter]),' ', str(acc_ratios[index][counter]), "%",'\n'
output = ''.join(output)
summary.write( output )
#Creates csv
name = projName,"_repool_",timestamp,".csv"
name = ''.join(name)
wells = ['Empty','A','B','C','D','E','F','G','H']
#Index 0 is number, index 1 is Letter
wellIndex = [1, 1]
destNo = 0
with open(name, 'w') as csvfile:
writer = csv.writer(csvfile)
for index in xrange(1, len(lane_maps)+1):
for dupes in xrange(1, int(total_lanes[index-1])+1):
if lane_maps[index] == 0:
raise Exception('Error: Project not logged in x_flowcells database!')
for counter in xrange(1, len(lane_maps[index])):
#<source plate ID>,<source well>,<volume>,<destination plate ID>,<destination well>
#Destination well 200 microL, minimum pipette 2 microL; acc_ratios multiplied by 2.
sample = lane_maps[index][counter]
position = wells[wellIndex[1]],':',str(wellIndex[0])
position = ''.join(position)
try:
output = location[sample][0],location[sample][1],str(int(acc_ratios[index][counter]*2)),str(destid[destNo]),position
except KeyError:
print "Error: Samples incorrectly parsed into database, thus causing sample name conflicts!"
if not acc_ratios[index][counter] == 0:
writer.writerow(output)
#Increment wellsindex
if not acc_ratios[index][counter] == 0:
if not wellIndex[1] >= 8:
wellIndex[1] += 1
else:
wellIndex[1] = 1
if not wellIndex[0] >= 12:
wellIndex[0] += 1
else:
wellIndex[0] = 1
destNo += 1
try:
destid[destNo]
except IndexError:
print "Critical error; not enough destination plates provided"
@click.command()
@click.option('--project_id', required=True,help='REQUIRED: ID of project to repool. Examples:P2652, P1312 etc.')
@click.option('--dest_plate_list', default=['dp_1','dp_2','dp_3','dp_4','dp_5'],
help='List of destination plates for the robot\'s csv file. Include too many rather than too few; excess will be unused Default:[dp_1,dp_2,dp_3,dp_4,dp_5]')
@click.option('--target_clusters', default=320*1000000, help='Threshold of clusters per sample. \nDefault:320*1000000')
@click.option('--clusters_per_lane', default=380*1000000, help='Expected clusters generated by a single lane/well. \nDefault:380*1000000')
@click.option('--allow_non_dupl_struct', is_flag=True, help='Allow for samples to be present in different types of flowcells')
def main(target_clusters, clusters_per_lane, project_id, dest_plate_list, allow_non_dupl_struct):
"""Application that calculates samples under threshold for a project, then calculate the optimal composition for reaching the threshold
without altering concentrations nor the structure of the pools. Outputs both a summary as well as a functional csv file."""
couch = connection()
structure = proj_struct(couch, project_id, target_clusters)
[lane_maps, clusters_rem, clusters_expr] = parse_indata(structure, target_clusters)
if allow_non_dupl_struct:
aggregator(lane_maps,clusters_rem,clusters_per_lane)
else:
simple_unique_set(lane_maps)
[ideal_ratios, req_lanes, total_lanes] = sample_distributor(lane_maps, clusters_rem, clusters_per_lane)
acc_ratios = correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes)
generate_output(project_id, dest_plate_list, total_lanes, req_lanes, lane_maps, acc_ratios)
if __name__ == '__main__':
main() | tempList = list()
for k, v in lane_maps.items():
for index in xrange(1,len(v)):
if not v[index] == 'Undetermined':
tempList.append(v[index])
counter = Counter(tempList)
for values in counter.itervalues():
if values > 1:
raise Exception('Error: This app does NOT handle situations where a sample'
'is present in lanes/well with differing structure!') | identifier_body |
repooler.py | #!/usr/bin/env python2.7
import couchdb
import re
import math
from collections import defaultdict, Counter, OrderedDict
import unicodedata
import csv
import copy
import click
from time import time
from datetime import datetime
from genologics.config import BASEURI, USERNAME, PASSWORD
from genologics.lims import Lims
from genologics.entities import Process
#Assumes ind. sample conc measurements have failed. As such it relies on changing relative volume on already normalized samples and structure
#Structure are retained as conc measurements failure means there's no way to know conc. delta between samples from seperate poolss
def connection():
user = ''
pw = ''
couch = couchdb.Server('http://' + user + ':' + pw + '@tools.scilifelab.se:5984')
return couch
#Fetches the structure of a project
def proj_struct(couch, project, target_clusters):
db = couch['x_flowcells']
view = db.view('names/project_ids_list')
fc_track = defaultdict(set)
#Adds flowcells to ALL projects. Due to intractions its easier to just get FCs for ALL projects
for rec in view.rows:
fc = ''.join(rec.key)
fc = unicodedata.normalize('NFKD', fc).encode('ascii','ignore')
id = ''.join(rec.id)
id = unicodedata.normalize('NFKD', id).encode('ascii','ignore')
for projs in rec.value:
projs = ''.join(projs)
projs = unicodedata.normalize('NFKD', projs).encode('ascii','ignore')
if fc_track[projs] == set([]):
fc_track[projs] = dict()
fc_track[projs][fc] = id
#Adds lanes and samples to flowcells, includes samples from other projects if they share lane
if fc_track[project] == set([]):
raise Exception('Error: Project not logged in x_flowcells database!')
for fc, id in fc_track[project].items():
try:
entry = db[id]['illumina']
except KeyError:
print "Error: Illumina table for db entry" , id, "doesn't exist!"
entry = db[id]['illumina']['Demultiplex_Stats']['Barcode_lane_statistics']
for index in xrange(0, len(entry)):
lane = entry[index]['Lane']
sample = entry[index]['Sample']
if 'Clusters' in entry[index]:
clusters = entry[index]['Clusters']
else:
clusters = entry[index]['PF Clusters']
clusters = int(re.sub(r",", "", clusters))
if not isinstance(fc_track[project][fc], dict):
fc_track[project][fc] = dict()
if not lane in fc_track[project][fc]:
fc_track[project][fc][lane] = dict()
#Only counts samples for the given project, other samples are "auto-filled"
if project in sample:
fc_track[project][fc][lane][sample] = clusters
else:
fc_track[project][fc][lane][sample] = target_clusters
#Removes any lanes that don't have any part project samples
for fc, lanes in fc_track[project].items():
for lane,sample in lanes.items():
if not any(project in s for s in sample.keys()):
del fc_track[project][fc][lane]
return fc_track[project]
def parse_indata(struct, target_clusters):
clusters_rem = dict()
clusters_expr = dict()
lane_maps = dict()
counter = 1
#Takes in data and finds unique lane structure, clusters per sample and lane division
#Output could probably be sent as a nested hash.
#Clusters_rem AND clusters_expr may seem redundant, but it saves some calculatin
for fc, lanes in struct.items():
for lane, samples in lanes.items():
#Concatinate structure into a set of unique structures
mapping = sorted(samples.keys(), reverse=True)
if not mapping in lane_maps.values():
lane_maps[counter] = mapping
counter +=1
#Calculate clusters read per sample
for sample, value in samples.items():
if not sample in clusters_rem:
clusters_rem[sample] = target_clusters
clusters_expr[sample] = 0
clusters_rem[sample] -= value
clusters_expr[sample] += value
return [lane_maps, clusters_rem, clusters_expr]
#Creates a set where every sample uniquely appears once and only once
def simple_unique_set(lane_maps):
unique_lane_maps = dict()
for keyz, valz in lane_maps.items():
#Fetch what lanes inclusion of given lane excludes
excluded = list()
for sample in valz:
for k, v in lane_maps.items():
for dupe in v:
if dupe == sample and keyz != k and sample != 'Undetermined':
excluded.append(k)
break
#Check that none of the excluded lanes have uniquely present samples
acceptable = True
for key in excluded:
total_duplicates = list()
for values in lane_maps[key]:
duplicate = 0
for k, v in lane_maps.items():
for dupe in v:
if dupe == values and k != key and sample != 'Undetermined':
duplicate +=1
break
total_duplicates.append(duplicate)
if 0 in total_duplicates:
acceptable = False
break
if acceptable:
#Check that the lane doesn't have sample dupes in the accepted set already
for entries in valz:
for kuyz, vulz in unique_lane_maps.items():
for things in vulz:
if things == entries and entries != 'Undetermined':
acceptable = False
break
if acceptable:
unique_lane_maps[keyz] = valz
lane_maps = unique_lane_maps
#ALL SAMPLES PRESENT CHECK
# summap = []
# for k in lane_maps.keys():
# summap += lane_maps[k]
# print len(set(summap))
validate_template_struct(lane_maps)
def aggregator(lane_maps,clusters_rem,clusters_per_lane):
#Iterate
#Find all samples that are also expressed in another struct
#Sort those structs by duplication
#Fill them to floor(dups); unless mod % 1 > some_number; then ceil(dups)
#Note the remaining necessary
#End
#Use the remaining structs
#Ceil(dups) those babies
raise Exception('Error: Not yet implemented!')
#Gives how many percent of the lane should be allocated to a specific sample
def sample_distributor(lane_maps, clusters_rem, clusters_per_lane):
ideal_ratios = dict()
req_lanes = dict()
for index in lane_maps:
summ = 0
for entry in lane_maps[index]:
if clusters_rem[entry] > 0:
summ += clusters_rem[entry]
for entry in lane_maps[index]:
if not index in ideal_ratios:
ideal_ratios[index] = list()
if clusters_rem[entry] > 0:
ideal_ratios[index].append(clusters_rem[entry]/float(summ))
else:
ideal_ratios[index].append(0.0)
#Minimal number of required lanes per pool
req_lanes[index] = summ/float(clusters_per_lane)
#Have to be rounded up, rounding down when only using duplicates makes no sense
total_lanes = map(math.ceil, req_lanes.values())
return [ideal_ratios, req_lanes, total_lanes]
#Crude way to check that no samples are in different TYPES of lanes
def validate_template_struct(lane_maps):
tempList = list()
for k, v in lane_maps.items():
for index in xrange(1,len(v)): | for values in counter.itervalues():
if values > 1:
raise Exception('Error: This app does NOT handle situations where a sample'
'is present in lanes/well with differing structure!')
#Corrects volumes since conc is non-constant
#Also normalizes the numbers
#Finally translates float -> int without underexpressing anything
def correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes):
# Since some samples are strong and some weaksauce
# 10% in ideal_ratios does not mean 10% of lane volume
# As such, ideal_ratios need to be divided by actual_reads/expected_reads
# Ignores undetermined clusters in calculation
# Assumes sample conc cant be altered; aka only volume is modified
for ind in xrange(1, len(lane_maps.keys())+1):
#Bases w/o sample are not expected
if len(lane_maps[ind]) != 1:
exp = 1/float(len(lane_maps[ind])-1)
else:
exp = 1
laneTypeExpr = 0
counter = 0
for sample in lane_maps[ind]:
if not sample == 'Undetermined':
laneTypeExpr += clusters_expr[sample]
for sample in lane_maps[ind]:
act = clusters_expr[sample]/float(laneTypeExpr)
ideal_ratios[ind][counter] = ideal_ratios[ind][counter]*(exp/act)
counter += 1
#Normalizes numbers
for index in xrange(1, len(ideal_ratios.keys())+1):
curSum = sum(ideal_ratios[index])
for sample in xrange(0, len(ideal_ratios[index])):
if curSum == 0:
ideal_ratios[index][sample] = 0
else:
ideal_ratios[index][sample] = (ideal_ratios[index][sample]/curSum)*100
# Iteratively rounds to whole percent (min pipette for volume) to reach 100%
# ideal_ratio * req_lanes.values() = needed
# acc_ratio * total_lanes = current
# means a sample can take any whole number between the two
acc_ratios = copy.deepcopy(ideal_ratios)
for index in xrange(1, len(ideal_ratios.keys())+1):
for sample in xrange(0, len(ideal_ratios[index])):
acc_ratios[index][sample] = math.ceil(ideal_ratios[index][sample])
if sum(acc_ratios[index]) == 100:
break
else:
while sum(acc_ratios[index]) > 100:
stuck = True
for sample in xrange(1, len(ideal_ratios[index])):
need = ideal_ratios[index][sample]*req_lanes.values()[index-1]
cur = (acc_ratios[index][sample] - 1)*total_lanes[index-1]
if sum(acc_ratios[index]) > 100 and cur >= need:
acc_ratios[index][sample] -= 1
stuck = False
if sum(acc_ratios[index])== 100:
break
if(stuck):
total_lanes[index-1] += 1
return acc_ratios
def generate_output(project, destid, total_lanes, req_lanes, lane_maps, acc_ratios):
#Gathers the container id and well name for all samples in project
#Cred to Denis for providing a base epp
location = dict()
lims = Lims(BASEURI, USERNAME, PASSWORD)
allProjects = lims.get_projects()
for proj in allProjects:
if proj.id == project:
projName = proj.name
break
#All normalization processes for project
norms=['Library Normalization (MiSeq) 4.0', 'Library Normalization (Illumina SBS) 4.0','Library Normalization (HiSeq X) 1.0']
pros=lims.get_processes(type=norms, projectname=projName)
#For all processes
for p in pros:
#For all artifacts in process
for o in p.all_outputs():
#If artifact is analyte type and has project name in sample
if o.type=="Analyte" and project in o.name:
location[o.name.split()[0]] = list()
location[o.name.split()[0]].append(o.location[0].id)
location[o.name.split()[0]].append(o.location[1])
#PRINT section
#Print stats including duplicates
timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H:%M')
sumName = projName, "_summary_", timestamp,".txt"
sumName = ''.join(sumName)
with open(sumName, "w") as summary:
if sum(req_lanes.values()) != 0:
OPT = sum(total_lanes)/sum(req_lanes.values())
else:
OPT = 0
output = "Ideal lanes (same schema): ", str(sum(req_lanes.values())) , ", Total lanes: ", str(sum(total_lanes)), ", OPT: ", str(round(OPT,3)),'\n'
output = ''.join(output)
summary.write( output )
output = "Unique pools: ", str(len(total_lanes)), ", Average pool duplication: ", str(sum(total_lanes)/float(len(total_lanes))) ,'\n'
output = ''.join(output)
summary.write( output )
bin = 0
for index in xrange(1, len(lane_maps)+1):
bin += 1
summary.write('\n')
output = "Wells ", str(bin) , '-' , str(bin+int(total_lanes[index-1])-1),':','\n'
output = ''.join(output)
summary.write( output )
bin += int(total_lanes[index-1]-1)
for counter in xrange(1, len(lane_maps[index])):
output = str(lane_maps[index][counter]),' ', str(acc_ratios[index][counter]), "%",'\n'
output = ''.join(output)
summary.write( output )
#Creates csv
name = projName,"_repool_",timestamp,".csv"
name = ''.join(name)
wells = ['Empty','A','B','C','D','E','F','G','H']
#Index 0 is number, index 1 is Letter
wellIndex = [1, 1]
destNo = 0
with open(name, 'w') as csvfile:
writer = csv.writer(csvfile)
for index in xrange(1, len(lane_maps)+1):
for dupes in xrange(1, int(total_lanes[index-1])+1):
if lane_maps[index] == 0:
raise Exception('Error: Project not logged in x_flowcells database!')
for counter in xrange(1, len(lane_maps[index])):
#<source plate ID>,<source well>,<volume>,<destination plate ID>,<destination well>
#Destination well 200 microL, minimum pipette 2 microL; acc_ratios multiplied by 2.
sample = lane_maps[index][counter]
position = wells[wellIndex[1]],':',str(wellIndex[0])
position = ''.join(position)
try:
output = location[sample][0],location[sample][1],str(int(acc_ratios[index][counter]*2)),str(destid[destNo]),position
except KeyError:
print "Error: Samples incorrectly parsed into database, thus causing sample name conflicts!"
if not acc_ratios[index][counter] == 0:
writer.writerow(output)
#Increment wellsindex
if not acc_ratios[index][counter] == 0:
if not wellIndex[1] >= 8:
wellIndex[1] += 1
else:
wellIndex[1] = 1
if not wellIndex[0] >= 12:
wellIndex[0] += 1
else:
wellIndex[0] = 1
destNo += 1
try:
destid[destNo]
except IndexError:
print "Critical error; not enough destination plates provided"
@click.command()
@click.option('--project_id', required=True,help='REQUIRED: ID of project to repool. Examples:P2652, P1312 etc.')
@click.option('--dest_plate_list', default=['dp_1','dp_2','dp_3','dp_4','dp_5'],
help='List of destination plates for the robot\'s csv file. Include too many rather than too few; excess will be unused Default:[dp_1,dp_2,dp_3,dp_4,dp_5]')
@click.option('--target_clusters', default=320*1000000, help='Threshold of clusters per sample. \nDefault:320*1000000')
@click.option('--clusters_per_lane', default=380*1000000, help='Expected clusters generated by a single lane/well. \nDefault:380*1000000')
@click.option('--allow_non_dupl_struct', is_flag=True, help='Allow for samples to be present in different types of flowcells')
def main(target_clusters, clusters_per_lane, project_id, dest_plate_list, allow_non_dupl_struct):
"""Application that calculates samples under threshold for a project, then calculate the optimal composition for reaching the threshold
without altering concentrations nor the structure of the pools. Outputs both a summary as well as a functional csv file."""
couch = connection()
structure = proj_struct(couch, project_id, target_clusters)
[lane_maps, clusters_rem, clusters_expr] = parse_indata(structure, target_clusters)
if allow_non_dupl_struct:
aggregator(lane_maps,clusters_rem,clusters_per_lane)
else:
simple_unique_set(lane_maps)
[ideal_ratios, req_lanes, total_lanes] = sample_distributor(lane_maps, clusters_rem, clusters_per_lane)
acc_ratios = correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes)
generate_output(project_id, dest_plate_list, total_lanes, req_lanes, lane_maps, acc_ratios)
if __name__ == '__main__':
main() | if not v[index] == 'Undetermined':
tempList.append(v[index])
counter = Counter(tempList) | random_line_split |
coltest4.py | #!/usr/bin/python
# coding: utf-8
#----------------------------------------------------------
#
# Light Painting Programm for NeoPixel Strip
#
# Based on several examples found on the Adafruit Learning System
# Thanks to Tony DiCola , Phillip Burgess and others
#
# This version adapted and expanded by Peter K. Boxler, Switzerland
# January 2015
#
# NeoPixel Products here:
# ----> https://www.adafruit.com/products/1506
#-----------------------------------------------------------
#
import sys, getopt, os
import time, math
from time import sleep
import RPi.GPIO as GPIO
from PIL import Image
import struct
from neopixel import *
#Button handling:
red_button = 4
black_button = 17
BLACK=1
REDSHORT=2
REDLONG=3
but={1:'Black', 2:'Red-short', 3:'Red-long'}
led_rot=23
led_green=25
striplen=144
# LED strip configuration:
LED_COUNT = striplen # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
# Configurable values
column_delay_time = 0.100
xmlfile="a.xml" #default input file name
onoff={1:'ON', 0:'OFF'}
debug=0 # debug print out
do_term=0 # term signal in mainloop
gamma =2.8 # gamma correction
gamma_a=(list) # list of gamma corrections for every brightness 0-255
debugg=0
delay=20
pattern1=[0,50,80,120,180,220,250,250,250,250,250,250,220,180,120,80,50,0]
pattern2=[0,50,60,80,100,120,180,220,250,250,250,250,250,250]
pattern3=[ [32,0,194],
[0,97,255],
[0,255,212],
[90,160,40],
[127,125,20],
[177,195,10],
[255,133,0],
[255,34,0],
[255,0,0],
[144,0,0]
]
pattern4=[ [32,0,194],
[0,97,255],
[0,255,212],
[90,160,40],
[127,125,20],
[177,19,10],
]
pattern5=[ [32,0,194],
[0,97,255],
[0,255,212],
[200,30,20]
]
BLACK=[0,0,0]
BOTTOM=-1
TOP=-1
UP=1
DOWN=0
FULL=-2
UPDOWN={1:'UP', 0:'DOWN'}
#
# ***** Function Parse commandline arguments ***********************
# get and parse commandline args
def arguments(argv):
global debug,file_path
try:
opts, args=getopt.getopt(argv,"hdDp:")
except getopt.GetoptError:
myPrint ("Parameter Error")
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print ("App neopix1 Actions -----------------------------------")
print ("usage: neopix1 [-d ]")
sys.exit(2)
elif opt == '-d': debug = 1
elif opt == '-D':
debug = 1
debugg = 1
elif opt == '-p': file_path = arg
# ***********************************************
#------- Funktion initpgm ---------------------------------
#
def initpgm():
global gamma_a, gamma
g_maxin = 255.0
g_maxout = 255.0
# setup General Input/Output-Pins of Raspberry
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(red_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # red
GPIO.setup(black_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # black
GPIO.setup (led_rot, GPIO.OUT) # rote Led
GPIO.setup (led_green, GPIO.OUT) # rote Led
GPIO.output(led_rot, False)
GPIO.output(led_green, False)
# Calculate gamma correction table based on variable gamma
# Gamma correction ist used for all pixels in the image
gamma_a = bytearray(256)
for i in range(256):
gamma_a[i] = int(pow(i / g_maxin, gamma) * 255.0 + 0.5)
# if debug:
# for z in range(256):
# print "gamma_a %d %d" % (z,gamma_a[z])
# ----------
def float_to_hex(f):
return hex(struct.unpack('<I', struct.pack('<f', f))[0])
#------------
#------Wait for button Press (Black or Red) --------------------------------
def button_pressed():
|
#-------------------------------------------
# ***** Function blink-led **************************
def blink_led(pin,anzahl): # blink led 3 mal bei start und bei shutdown
for i in range(anzahl):
GPIO.output(pin, True)
sleep(0.1)
GPIO.output(pin, False)
sleep(0.1)
# -------------------------
def draw() :
print"Zeichnen................"
sleep (1)
return(0)
#-------------------------
# ----- set all pixel to dark
def clearled(color):
for i in range (striplen):
strip.setPixelColor(i, (Color(color[0],color[1],color[2])))
strip.show()
return(0)
#--------------------------
#---------------------------------------------
def wheel2(pos,how=0):
# Generate rainbow colors within 0-255.
if pos < 85:
if how:
return (pos * 3, 255 - pos * 3, 0)
else:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
if how:
return (255 - pos * 3, 0, pos * 3)
else:
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
if how:
return (0, pos * 3, 255 - pos * 3)
else:
return Color (0, pos * 3, 255 - pos * 3)
#--------------------------------------------
#---------------------------------------------
def wheel (start , how=0, gamma=0):
# Generate rainbow colors within 0-255.
global gamma_a
if start < 85:
# print "%d: %d %d %d" % (start, start * 3, 255 - start * 3, 0)
if how:
if gamma:
return (gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else: # forward
return (start * 3, 255 - start * 3, 0)
else:
if gamma:
return Color(gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else:
return Color(start * 3, 255 - start * 3, 0)
elif start < 170:
# print "%d: %d %d %d" % (start, 255- (start-85) * 3, 0, (start-85)*3 )
start -= 85
if how:
if gamma:
return (gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else: #forward
return (255 - start * 3, 0, start * 3)
else:
if gamma:
return Color(gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else:
return Color(255 - start * 3, 0, start * 3)
else:
# print "%d: %d %d %d" % (start, 0, (start-170) * 3, 255 - (start-170) * 3)
start -= 170
if how:
if gamma:
return (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return (0, start * 3, 255 - start * 3)
else:
if gamma:
return Color (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return Color (0, start * 3, 255 - start * 3)
#--------------------------------------------
def rainbow_alt(strip, wait_ms=20, iterations=1):
if debug: print "Draw rainbow that fades across all pixels at once."
for j in range(256*iterations):
for i in range(striplen):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.setBrightness(80)
strip.show()
time.sleep(wait_ms/1000.0)
# -- Function write_rainbow ----------------------------------
# generate a rainbow pattern with lenght len, starting at pos
def write_rainbow(strip, start, len, gamma=0,bright=60):
if debug: print "write_rainbow start %d len %d gamma %d" % (start,len,gamma)
# use pixel start to start+len
stripl=strip.numPixels()
for i in range( 0,len):
if i < stripl:
strip.setPixelColor(start+i, wheel((i+i*256/len) & 255,0,gamma))
strip.setBrightness(bright)
strip.show()
# -- Function make_rainbow ----------------------------------
# return a rainbow pattern with lenght len
def make_rainbow(len, gamma=0):
if debug: print "return a rainbow pattern with lenght %d" % len
pat=[]
for i in range( 0,len):
pat.append(wheel((i+i*256/len) & 255,1,gamma))
return(pat)
#-----------------------------------------------
def rainbow(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbow start %d end %d iter: %d bright: %d" % (start, end, iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel((i+j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
#--------------------------------------------------
def rainbowCycle(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbowCycle anzahl: %d bright: %d" % (iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel(((i * 256 / 150) + j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
# sleep(0.4)
#----------------------------------------------------
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color):
global gamma_a
if debug: print "now in colorwipe"
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
#------------------------------------------------------
# Define functions which animate LEDs in various ways.
def colorWipe2(strip):
global gamma_a
if debug: print "Draw colorwipe2"
step=int(255/(striplen/3)) # Helligkeits step
if debug: print "step: %d " % step
if debug: print "now red"
max=255
for i in range(striplen/3):
color=Color(gamma_a[max],0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(i*step)
# print "i: %d max: %d" % (i,max)
max=255
y=0
if debug: print "now blue"
for i in range(striplen/3,2*striplen/3):
color=Color(0,0,gamma_a[max])
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(y*step)
y=y+1
# print i,max
max=255
y=0
if debug: print "now green"
for i in range(2*striplen/3,striplen):
color=Color(0,gamma_a[max],0)
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(y*step)
y=y+1
# print "i: %d max: %d" % (i,max)
#
clearled(BLACK)
if debug: print "now red umgekehrt"
i=0
max=0
for i in range(striplen/3):
color=Color(gamma_a[max],0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
strip.show()
i=i+1
time.sleep(delay/1000.0)
max=0+(i*step)
time.sleep(delay/1000.0)
# print "i: %d max: %d" % (i,max)
if debug: print "Anzahl show %d: " % i
return(0)
# ---------------------
def colorWipe3(strip):
global gamma_a
if debug: print "Draw colorwipe3, color and then fade"
max=255
for i in range(striplen):
color=Color(max,0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
bright=255
for z in range (15):
# if debug: print "bright %d" % bright
strip.setBrightness(bright)
strip.show()
bright=255-(z*255/15)
sleep(0.2)
#------------------------------------------------------
def colorWipe6(strip, pattern, dir, patdir, oben, unten, colorback, bright=60):
global gamma_a, debug
i=0 # count for number of show() calls
lenpat=len(pattern) # length of pattern
if (unten > oben and oben !=TOP) or (oben > striplen):
if debug: print "Error - unten/oben fehlerhaft (striplen %d %s/%s)" % (striplen, unten,oben)
return(2)
# setup
# define startposition
if dir==DOWN: # direction Down
if oben == TOP: # the full lenght
startpos=striplen-1 # start here
else:
startpos=oben-lenpat # not at bottom
# setup
# define loop counts
if oben == TOP and unten != BOTTOM:
anzahl = startpos-unten+2
elif oben == TOP and unten == BOTTOM:
anzahl=striplen+lenpat
elif oben != TOP and unten == BOTTOM:
anzahl=oben+1
elif oben != TOP and unten != BOTTOM:
anzahl=oben-unten-lenpat+2
else: # direction is Up
# define startposition
if unten == BOTTOM: # the full lenght
startpos=-lenpat+1 # start here
else:
startpos=unten-1 # not at bottom
# define loop counts
if oben == TOP and unten != BOTTOM:
anzahl = (striplen-unten+1) +1
elif oben == TOP and unten == BOTTOM:
anzahl= (striplen+lenpat-1) +1
elif oben != TOP and unten == BOTTOM:
anzahl= (oben-1) +1
elif oben != TOP and unten != BOTTOM:
anzahl=oben-unten-lenpat+2
if debug: print "colorwipe6 %s startpos %d loopcount %d lenpat %d" % (UPDOWN[dir],startpos, anzahl, lenpat)
for z in range( 0, anzahl) :
set=0
if dir==DOWN:
pos=startpos-z
else:
pos=startpos+z # count backwards
# if pos1== 130: sleep(3)
for pattind in range(lenpat):
if patdir==UP:
pattindex=lenpat-pattind-1
else:
pattindex=pattind
pos1=pos+pattind
if (pos1 >= 0) and (pos1 < striplen):
if debugg: print "z: %d position: %d patternindex: %d" % (z,pos1, pattindex)
color=Color( pattern[pattindex][0] , pattern[pattindex][1], pattern[pattindex][2])
strip.setPixelColor(pos1, color)
set=1
if (pos1+1) < striplen and dir==DOWN: # set trailing led to black
strip.setPixelColor(pos1+1, Color(colorback[0],colorback[1],colorback[2]))
if debugg: print "black at %d" % (pos1+1)
set=1
elif dir==UP and pos1-lenpat>=0:
strip.setPixelColor(pos1-lenpat, Color(colorback[0],colorback[1],colorback[2]))
if debugg: print "black at %d" % (pos1-lenpat)
set=1
if set: # if a pixel was set
if bright<90:
strip.setBrightness(bright)
strip.show() # light them up
i=i+1 # increment show counter
set=0
if debugg: print "show"
time.sleep(delay/1000.0) # delay
# sleep(1)
if debugg: print "Anzahl show %d: " % i
if debug: print "colorwipe6 %s startpos %d loopcount %d lenpat %d" % (UPDOWN[dir],startpos, anzahl, lenpat)
return()
#----------------------------------------------
# Main Loop
#---------------------------------------------
def main_loop():
global do_term
i=0 # i contains image number
# images are in list images
background=[0,0,0]
# der loop läuft, bis ein Keyboard interrupt kommt, ctrl-c ----
try:
patt=make_rainbow(40)
if debugg: print patt
startpos=20
for i in range(len(patt)):
strip.setPixelColor(i+startpos, Color(patt[i][0],patt[i][1],patt[i][2]))
strip.setBrightness(60)
strip.show()
sleep(0.1)
sleep(2)
clearled(BLACK)
write_rainbow(strip,100, 30,0) # paint rainbow at pos 90 in length 20
write_rainbow(strip,20,30)
sleep(10)
clearled(BLACK)
colorWipe2(strip) # Red wipe
sleep(2)
clearled(BLACK)
colorWipe3(strip) # Red wipe
sleep(2)
strip.setBrightness(80)
clearled(BLACK)
colorWipe6(strip,patt,UP,UP,60,BOTTOM,background) # Red wipe
sleep(0.2)
clearled(BLACK)
colorWipe6(strip,patt,DOWN,UP,60,BOTTOM,background) # Red wipe
sleep(0.4)
colorWipe6(strip,patt,DOWN,DOWN,TOP,60,background) # Red wipe
sleep(0.2)
clearled(BLACK)
colorWipe6(strip,patt,UP,DOWN,TOP,40,background) # Red wipe
sleep(0.1)
clearled(BLACK)
background=[0,0,0]
background=[30,10,20]
clearled(background)
colorWipe6(strip,pattern3,DOWN,UP,TOP,19,background) # Red wipe
sleep(0.3)
clearled(BLACK)
rainbow(strip,20,100)
sleep(3)
clearled(BLACK)
rainbowCycle(strip)
sleep(1)
clearled(BLACK)
rainbowCycle(strip,80,130)
sleep(1)
clearled(BLACK)
except KeyboardInterrupt:
# aufräumem
print ("\nKeyboard Interrupt in butest")
do_term=1
pass
clearled(BLACK)
return(0)
#---- End Main Loop -------------------------------------------------------------
# *************************************************
# Program starts here
# *************************************************
if __name__ == '__main__':
#
arguments(sys.argv[1:]) # get commandline arguments
if debug: print "Run with debug"
initpgm()
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
# Intialize the library (must be called once before other functions).
strip.begin()
blink_led(led_green,2) # blink led 2 mal while waiting
main_loop() # work is done in main_loop()
# return from mainloop if Ctrl-C on Keyboard
#
# Clean-Up and terminate
GPIO.cleanup(led_rot)
GPIO.cleanup(led_green)
GPIO.cleanup(red_button)
GPIO.cleanup(black_button)
print ("Program terminated....")
sys.exit(0)
#**************************************************************
# That is the end
#***************************************************************
#
| print "Waiting for Tastendruck..."
while True:
inpblack=1
inpred=1
inpblack=GPIO.input(black_button) # high if NOT pressed !
inpred=GPIO.input(red_button)
# print "Button %d %d" % (inpblack, inpred)
sleep(0.2)
if not inpblack: return(BLACK) # black button went to low
if not inpred: # red button went to low
sleep(1) # check if red is pressed long or short
inpred=GPIO.input(red_button)
sleep(0.1)
if inpred: return(REDSHORT)
else: return(REDLONG)
pass | identifier_body |
coltest4.py | #!/usr/bin/python
# coding: utf-8
#----------------------------------------------------------
#
# Light Painting Programm for NeoPixel Strip
#
# Based on several examples found on the Adafruit Learning System
# Thanks to Tony DiCola , Phillip Burgess and others
#
# This version adapted and expanded by Peter K. Boxler, Switzerland
# January 2015
#
# NeoPixel Products here:
# ----> https://www.adafruit.com/products/1506
#-----------------------------------------------------------
#
import sys, getopt, os
import time, math
from time import sleep
import RPi.GPIO as GPIO
from PIL import Image
import struct
from neopixel import *
#Button handling:
red_button = 4
black_button = 17
BLACK=1
REDSHORT=2
REDLONG=3
but={1:'Black', 2:'Red-short', 3:'Red-long'}
led_rot=23
led_green=25
striplen=144
# LED strip configuration:
LED_COUNT = striplen # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
# Configurable values
column_delay_time = 0.100
xmlfile="a.xml" #default input file name
onoff={1:'ON', 0:'OFF'}
debug=0 # debug print out
do_term=0 # term signal in mainloop
gamma =2.8 # gamma correction
gamma_a=(list) # list of gamma corrections for every brightness 0-255
debugg=0
delay=20
pattern1=[0,50,80,120,180,220,250,250,250,250,250,250,220,180,120,80,50,0]
pattern2=[0,50,60,80,100,120,180,220,250,250,250,250,250,250]
pattern3=[ [32,0,194],
[0,97,255],
[0,255,212],
[90,160,40],
[127,125,20],
[177,195,10],
[255,133,0],
[255,34,0],
[255,0,0],
[144,0,0]
]
pattern4=[ [32,0,194],
[0,97,255],
[0,255,212],
[90,160,40],
[127,125,20],
[177,19,10],
]
pattern5=[ [32,0,194],
[0,97,255],
[0,255,212],
[200,30,20]
]
BLACK=[0,0,0]
BOTTOM=-1
TOP=-1
UP=1
DOWN=0
FULL=-2
UPDOWN={1:'UP', 0:'DOWN'}
#
# ***** Function Parse commandline arguments ***********************
# get and parse commandline args
def arguments(argv):
global debug,file_path
try:
opts, args=getopt.getopt(argv,"hdDp:")
except getopt.GetoptError:
myPrint ("Parameter Error")
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print ("App neopix1 Actions -----------------------------------")
print ("usage: neopix1 [-d ]")
sys.exit(2)
elif opt == '-d': debug = 1
elif opt == '-D':
debug = 1
debugg = 1
elif opt == '-p': file_path = arg
# ***********************************************
#------- Funktion initpgm ---------------------------------
#
def initpgm():
global gamma_a, gamma
g_maxin = 255.0
g_maxout = 255.0
# setup General Input/Output-Pins of Raspberry
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(red_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # red
GPIO.setup(black_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # black
GPIO.setup (led_rot, GPIO.OUT) # rote Led
GPIO.setup (led_green, GPIO.OUT) # rote Led
GPIO.output(led_rot, False)
GPIO.output(led_green, False)
# Calculate gamma correction table based on variable gamma
# Gamma correction ist used for all pixels in the image
gamma_a = bytearray(256)
for i in range(256):
gamma_a[i] = int(pow(i / g_maxin, gamma) * 255.0 + 0.5)
# if debug:
# for z in range(256):
# print "gamma_a %d %d" % (z,gamma_a[z])
# ----------
def float_to_hex(f):
return hex(struct.unpack('<I', struct.pack('<f', f))[0])
#------------
#------Wait for button Press (Black or Red) --------------------------------
def button_pressed():
print "Waiting for Tastendruck..."
while True:
inpblack=1
inpred=1
inpblack=GPIO.input(black_button) # high if NOT pressed !
inpred=GPIO.input(red_button)
# print "Button %d %d" % (inpblack, inpred)
sleep(0.2)
if not inpblack: return(BLACK) # black button went to low
if not inpred: # red button went to low
sleep(1) # check if red is pressed long or short
inpred=GPIO.input(red_button)
sleep(0.1)
if inpred: return(REDSHORT)
else: return(REDLONG)
pass
#-------------------------------------------
# ***** Function blink-led **************************
def blink_led(pin,anzahl): # blink led 3 mal bei start und bei shutdown
for i in range(anzahl):
GPIO.output(pin, True)
sleep(0.1)
GPIO.output(pin, False)
sleep(0.1)
# -------------------------
def draw() :
print"Zeichnen................"
sleep (1)
return(0)
#-------------------------
# ----- set all pixel to dark
def clearled(color):
for i in range (striplen):
strip.setPixelColor(i, (Color(color[0],color[1],color[2])))
strip.show()
return(0)
#--------------------------
#---------------------------------------------
def wheel2(pos,how=0):
# Generate rainbow colors within 0-255.
if pos < 85:
if how:
return (pos * 3, 255 - pos * 3, 0)
else:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
if how:
return (255 - pos * 3, 0, pos * 3)
else:
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
if how:
return (0, pos * 3, 255 - pos * 3)
else:
return Color (0, pos * 3, 255 - pos * 3)
#--------------------------------------------
#---------------------------------------------
def wheel (start , how=0, gamma=0):
# Generate rainbow colors within 0-255.
global gamma_a
if start < 85:
# print "%d: %d %d %d" % (start, start * 3, 255 - start * 3, 0)
if how:
if gamma:
return (gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else: # forward
return (start * 3, 255 - start * 3, 0)
else:
if gamma:
return Color(gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else:
return Color(start * 3, 255 - start * 3, 0)
elif start < 170:
# print "%d: %d %d %d" % (start, 255- (start-85) * 3, 0, (start-85)*3 )
start -= 85
if how:
if gamma:
return (gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else: #forward
return (255 - start * 3, 0, start * 3)
else: | return Color(gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else:
return Color(255 - start * 3, 0, start * 3)
else:
# print "%d: %d %d %d" % (start, 0, (start-170) * 3, 255 - (start-170) * 3)
start -= 170
if how:
if gamma:
return (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return (0, start * 3, 255 - start * 3)
else:
if gamma:
return Color (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return Color (0, start * 3, 255 - start * 3)
#--------------------------------------------
def rainbow_alt(strip, wait_ms=20, iterations=1):
if debug: print "Draw rainbow that fades across all pixels at once."
for j in range(256*iterations):
for i in range(striplen):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.setBrightness(80)
strip.show()
time.sleep(wait_ms/1000.0)
# -- Function write_rainbow ----------------------------------
# generate a rainbow pattern with lenght len, starting at pos
def write_rainbow(strip, start, len, gamma=0,bright=60):
if debug: print "write_rainbow start %d len %d gamma %d" % (start,len,gamma)
# use pixel start to start+len
stripl=strip.numPixels()
for i in range( 0,len):
if i < stripl:
strip.setPixelColor(start+i, wheel((i+i*256/len) & 255,0,gamma))
strip.setBrightness(bright)
strip.show()
# -- Function make_rainbow ----------------------------------
# return a rainbow pattern with lenght len
def make_rainbow(len, gamma=0):
if debug: print "return a rainbow pattern with lenght %d" % len
pat=[]
for i in range( 0,len):
pat.append(wheel((i+i*256/len) & 255,1,gamma))
return(pat)
#-----------------------------------------------
def rainbow(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbow start %d end %d iter: %d bright: %d" % (start, end, iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel((i+j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
#--------------------------------------------------
def rainbowCycle(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbowCycle anzahl: %d bright: %d" % (iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel(((i * 256 / 150) + j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
# sleep(0.4)
#----------------------------------------------------
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color):
global gamma_a
if debug: print "now in colorwipe"
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
#------------------------------------------------------
# Define functions which animate LEDs in various ways.
def colorWipe2(strip):
global gamma_a
if debug: print "Draw colorwipe2"
step=int(255/(striplen/3)) # Helligkeits step
if debug: print "step: %d " % step
if debug: print "now red"
max=255
for i in range(striplen/3):
color=Color(gamma_a[max],0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(i*step)
# print "i: %d max: %d" % (i,max)
max=255
y=0
if debug: print "now blue"
for i in range(striplen/3,2*striplen/3):
color=Color(0,0,gamma_a[max])
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(y*step)
y=y+1
# print i,max
max=255
y=0
if debug: print "now green"
for i in range(2*striplen/3,striplen):
color=Color(0,gamma_a[max],0)
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(y*step)
y=y+1
# print "i: %d max: %d" % (i,max)
#
clearled(BLACK)
if debug: print "now red umgekehrt"
i=0
max=0
for i in range(striplen/3):
color=Color(gamma_a[max],0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
strip.show()
i=i+1
time.sleep(delay/1000.0)
max=0+(i*step)
time.sleep(delay/1000.0)
# print "i: %d max: %d" % (i,max)
if debug: print "Anzahl show %d: " % i
return(0)
# ---------------------
def colorWipe3(strip):
global gamma_a
if debug: print "Draw colorwipe3, color and then fade"
max=255
for i in range(striplen):
color=Color(max,0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
bright=255
for z in range (15):
# if debug: print "bright %d" % bright
strip.setBrightness(bright)
strip.show()
bright=255-(z*255/15)
sleep(0.2)
#------------------------------------------------------
def colorWipe6(strip, pattern, dir, patdir, oben, unten, colorback, bright=60):
global gamma_a, debug
i=0 # count for number of show() calls
lenpat=len(pattern) # length of pattern
if (unten > oben and oben !=TOP) or (oben > striplen):
if debug: print "Error - unten/oben fehlerhaft (striplen %d %s/%s)" % (striplen, unten,oben)
return(2)
# setup
# define startposition
if dir==DOWN: # direction Down
if oben == TOP: # the full lenght
startpos=striplen-1 # start here
else:
startpos=oben-lenpat # not at bottom
# setup
# define loop counts
if oben == TOP and unten != BOTTOM:
anzahl = startpos-unten+2
elif oben == TOP and unten == BOTTOM:
anzahl=striplen+lenpat
elif oben != TOP and unten == BOTTOM:
anzahl=oben+1
elif oben != TOP and unten != BOTTOM:
anzahl=oben-unten-lenpat+2
else: # direction is Up
# define startposition
if unten == BOTTOM: # the full lenght
startpos=-lenpat+1 # start here
else:
startpos=unten-1 # not at bottom
# define loop counts
if oben == TOP and unten != BOTTOM:
anzahl = (striplen-unten+1) +1
elif oben == TOP and unten == BOTTOM:
anzahl= (striplen+lenpat-1) +1
elif oben != TOP and unten == BOTTOM:
anzahl= (oben-1) +1
elif oben != TOP and unten != BOTTOM:
anzahl=oben-unten-lenpat+2
if debug: print "colorwipe6 %s startpos %d loopcount %d lenpat %d" % (UPDOWN[dir],startpos, anzahl, lenpat)
for z in range( 0, anzahl) :
set=0
if dir==DOWN:
pos=startpos-z
else:
pos=startpos+z # count backwards
# if pos1== 130: sleep(3)
for pattind in range(lenpat):
if patdir==UP:
pattindex=lenpat-pattind-1
else:
pattindex=pattind
pos1=pos+pattind
if (pos1 >= 0) and (pos1 < striplen):
if debugg: print "z: %d position: %d patternindex: %d" % (z,pos1, pattindex)
color=Color( pattern[pattindex][0] , pattern[pattindex][1], pattern[pattindex][2])
strip.setPixelColor(pos1, color)
set=1
if (pos1+1) < striplen and dir==DOWN: # set trailing led to black
strip.setPixelColor(pos1+1, Color(colorback[0],colorback[1],colorback[2]))
if debugg: print "black at %d" % (pos1+1)
set=1
elif dir==UP and pos1-lenpat>=0:
strip.setPixelColor(pos1-lenpat, Color(colorback[0],colorback[1],colorback[2]))
if debugg: print "black at %d" % (pos1-lenpat)
set=1
if set: # if a pixel was set
if bright<90:
strip.setBrightness(bright)
strip.show() # light them up
i=i+1 # increment show counter
set=0
if debugg: print "show"
time.sleep(delay/1000.0) # delay
# sleep(1)
if debugg: print "Anzahl show %d: " % i
if debug: print "colorwipe6 %s startpos %d loopcount %d lenpat %d" % (UPDOWN[dir],startpos, anzahl, lenpat)
return()
#----------------------------------------------
# Main Loop
#---------------------------------------------
def main_loop():
global do_term
i=0 # i contains image number
# images are in list images
background=[0,0,0]
# der loop läuft, bis ein Keyboard interrupt kommt, ctrl-c ----
try:
patt=make_rainbow(40)
if debugg: print patt
startpos=20
for i in range(len(patt)):
strip.setPixelColor(i+startpos, Color(patt[i][0],patt[i][1],patt[i][2]))
strip.setBrightness(60)
strip.show()
sleep(0.1)
sleep(2)
clearled(BLACK)
write_rainbow(strip,100, 30,0) # paint rainbow at pos 90 in length 20
write_rainbow(strip,20,30)
sleep(10)
clearled(BLACK)
colorWipe2(strip) # Red wipe
sleep(2)
clearled(BLACK)
colorWipe3(strip) # Red wipe
sleep(2)
strip.setBrightness(80)
clearled(BLACK)
colorWipe6(strip,patt,UP,UP,60,BOTTOM,background) # Red wipe
sleep(0.2)
clearled(BLACK)
colorWipe6(strip,patt,DOWN,UP,60,BOTTOM,background) # Red wipe
sleep(0.4)
colorWipe6(strip,patt,DOWN,DOWN,TOP,60,background) # Red wipe
sleep(0.2)
clearled(BLACK)
colorWipe6(strip,patt,UP,DOWN,TOP,40,background) # Red wipe
sleep(0.1)
clearled(BLACK)
background=[0,0,0]
background=[30,10,20]
clearled(background)
colorWipe6(strip,pattern3,DOWN,UP,TOP,19,background) # Red wipe
sleep(0.3)
clearled(BLACK)
rainbow(strip,20,100)
sleep(3)
clearled(BLACK)
rainbowCycle(strip)
sleep(1)
clearled(BLACK)
rainbowCycle(strip,80,130)
sleep(1)
clearled(BLACK)
except KeyboardInterrupt:
# aufräumem
print ("\nKeyboard Interrupt in butest")
do_term=1
pass
clearled(BLACK)
return(0)
#---- End Main Loop -------------------------------------------------------------
# *************************************************
# Program starts here
# *************************************************
if __name__ == '__main__':
#
arguments(sys.argv[1:]) # get commandline arguments
if debug: print "Run with debug"
initpgm()
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
# Intialize the library (must be called once before other functions).
strip.begin()
blink_led(led_green,2) # blink led 2 mal while waiting
main_loop() # work is done in main_loop()
# return from mainloop if Ctrl-C on Keyboard
#
# Clean-Up and terminate
GPIO.cleanup(led_rot)
GPIO.cleanup(led_green)
GPIO.cleanup(red_button)
GPIO.cleanup(black_button)
print ("Program terminated....")
sys.exit(0)
#**************************************************************
# That is the end
#***************************************************************
# |
if gamma: | random_line_split |
coltest4.py | #!/usr/bin/python
# coding: utf-8
#----------------------------------------------------------
#
# Light Painting Programm for NeoPixel Strip
#
# Based on several examples found on the Adafruit Learning System
# Thanks to Tony DiCola , Phillip Burgess and others
#
# This version adapted and expanded by Peter K. Boxler, Switzerland
# January 2015
#
# NeoPixel Products here:
# ----> https://www.adafruit.com/products/1506
#-----------------------------------------------------------
#
import sys, getopt, os
import time, math
from time import sleep
import RPi.GPIO as GPIO
from PIL import Image
import struct
from neopixel import *
#Button handling:
red_button = 4
black_button = 17
BLACK=1
REDSHORT=2
REDLONG=3
but={1:'Black', 2:'Red-short', 3:'Red-long'}
led_rot=23
led_green=25
striplen=144
# LED strip configuration:
LED_COUNT = striplen # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
# Configurable values
column_delay_time = 0.100
xmlfile="a.xml" #default input file name
onoff={1:'ON', 0:'OFF'}
debug=0 # debug print out
do_term=0 # term signal in mainloop
gamma =2.8 # gamma correction
gamma_a=(list) # list of gamma corrections for every brightness 0-255
debugg=0
delay=20
pattern1=[0,50,80,120,180,220,250,250,250,250,250,250,220,180,120,80,50,0]
pattern2=[0,50,60,80,100,120,180,220,250,250,250,250,250,250]
pattern3=[ [32,0,194],
[0,97,255],
[0,255,212],
[90,160,40],
[127,125,20],
[177,195,10],
[255,133,0],
[255,34,0],
[255,0,0],
[144,0,0]
]
pattern4=[ [32,0,194],
[0,97,255],
[0,255,212],
[90,160,40],
[127,125,20],
[177,19,10],
]
pattern5=[ [32,0,194],
[0,97,255],
[0,255,212],
[200,30,20]
]
BLACK=[0,0,0]
BOTTOM=-1
TOP=-1
UP=1
DOWN=0
FULL=-2
UPDOWN={1:'UP', 0:'DOWN'}
#
# ***** Function Parse commandline arguments ***********************
# get and parse commandline args
def arguments(argv):
global debug,file_path
try:
opts, args=getopt.getopt(argv,"hdDp:")
except getopt.GetoptError:
myPrint ("Parameter Error")
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print ("App neopix1 Actions -----------------------------------")
print ("usage: neopix1 [-d ]")
sys.exit(2)
elif opt == '-d': debug = 1
elif opt == '-D':
debug = 1
debugg = 1
elif opt == '-p': file_path = arg
# ***********************************************
#------- Funktion initpgm ---------------------------------
#
def initpgm():
global gamma_a, gamma
g_maxin = 255.0
g_maxout = 255.0
# setup General Input/Output-Pins of Raspberry
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(red_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # red
GPIO.setup(black_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # black
GPIO.setup (led_rot, GPIO.OUT) # rote Led
GPIO.setup (led_green, GPIO.OUT) # rote Led
GPIO.output(led_rot, False)
GPIO.output(led_green, False)
# Calculate gamma correction table based on variable gamma
# Gamma correction ist used for all pixels in the image
gamma_a = bytearray(256)
for i in range(256):
gamma_a[i] = int(pow(i / g_maxin, gamma) * 255.0 + 0.5)
# if debug:
# for z in range(256):
# print "gamma_a %d %d" % (z,gamma_a[z])
# ----------
def float_to_hex(f):
return hex(struct.unpack('<I', struct.pack('<f', f))[0])
#------------
#------Wait for button Press (Black or Red) --------------------------------
def button_pressed():
print "Waiting for Tastendruck..."
while True:
inpblack=1
inpred=1
inpblack=GPIO.input(black_button) # high if NOT pressed !
inpred=GPIO.input(red_button)
# print "Button %d %d" % (inpblack, inpred)
sleep(0.2)
if not inpblack: return(BLACK) # black button went to low
if not inpred: # red button went to low
sleep(1) # check if red is pressed long or short
inpred=GPIO.input(red_button)
sleep(0.1)
if inpred: return(REDSHORT)
else: return(REDLONG)
pass
#-------------------------------------------
# ***** Function blink-led **************************
def blink_led(pin,anzahl): # blink led 3 mal bei start und bei shutdown
for i in range(anzahl):
GPIO.output(pin, True)
sleep(0.1)
GPIO.output(pin, False)
sleep(0.1)
# -------------------------
def draw() :
print"Zeichnen................"
sleep (1)
return(0)
#-------------------------
# ----- set all pixel to dark
def clearled(color):
for i in range (striplen):
strip.setPixelColor(i, (Color(color[0],color[1],color[2])))
strip.show()
return(0)
#--------------------------
#---------------------------------------------
def wheel2(pos,how=0):
# Generate rainbow colors within 0-255.
if pos < 85:
if how:
return (pos * 3, 255 - pos * 3, 0)
else:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
if how:
return (255 - pos * 3, 0, pos * 3)
else:
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
if how:
return (0, pos * 3, 255 - pos * 3)
else:
return Color (0, pos * 3, 255 - pos * 3)
#--------------------------------------------
#---------------------------------------------
def wheel (start , how=0, gamma=0):
# Generate rainbow colors within 0-255.
global gamma_a
if start < 85:
# print "%d: %d %d %d" % (start, start * 3, 255 - start * 3, 0)
if how:
if gamma:
return (gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else: # forward
return (start * 3, 255 - start * 3, 0)
else:
if gamma:
return Color(gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else:
|
elif start < 170:
# print "%d: %d %d %d" % (start, 255- (start-85) * 3, 0, (start-85)*3 )
start -= 85
if how:
if gamma:
return (gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else: #forward
return (255 - start * 3, 0, start * 3)
else:
if gamma:
return Color(gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else:
return Color(255 - start * 3, 0, start * 3)
else:
# print "%d: %d %d %d" % (start, 0, (start-170) * 3, 255 - (start-170) * 3)
start -= 170
if how:
if gamma:
return (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return (0, start * 3, 255 - start * 3)
else:
if gamma:
return Color (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return Color (0, start * 3, 255 - start * 3)
#--------------------------------------------
def rainbow_alt(strip, wait_ms=20, iterations=1):
if debug: print "Draw rainbow that fades across all pixels at once."
for j in range(256*iterations):
for i in range(striplen):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.setBrightness(80)
strip.show()
time.sleep(wait_ms/1000.0)
# -- Function write_rainbow ----------------------------------
# generate a rainbow pattern with lenght len, starting at pos
def write_rainbow(strip, start, len, gamma=0,bright=60):
if debug: print "write_rainbow start %d len %d gamma %d" % (start,len,gamma)
# use pixel start to start+len
stripl=strip.numPixels()
for i in range( 0,len):
if i < stripl:
strip.setPixelColor(start+i, wheel((i+i*256/len) & 255,0,gamma))
strip.setBrightness(bright)
strip.show()
# -- Function make_rainbow ----------------------------------
# return a rainbow pattern with lenght len
def make_rainbow(len, gamma=0):
if debug: print "return a rainbow pattern with lenght %d" % len
pat=[]
for i in range( 0,len):
pat.append(wheel((i+i*256/len) & 255,1,gamma))
return(pat)
#-----------------------------------------------
def rainbow(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbow start %d end %d iter: %d bright: %d" % (start, end, iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel((i+j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
#--------------------------------------------------
def rainbowCycle(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbowCycle anzahl: %d bright: %d" % (iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel(((i * 256 / 150) + j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
# sleep(0.4)
#----------------------------------------------------
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color):
global gamma_a
if debug: print "now in colorwipe"
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
#------------------------------------------------------
# Define functions which animate LEDs in various ways.
def colorWipe2(strip):
global gamma_a
if debug: print "Draw colorwipe2"
step=int(255/(striplen/3)) # Helligkeits step
if debug: print "step: %d " % step
if debug: print "now red"
max=255
for i in range(striplen/3):
color=Color(gamma_a[max],0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(i*step)
# print "i: %d max: %d" % (i,max)
max=255
y=0
if debug: print "now blue"
for i in range(striplen/3,2*striplen/3):
color=Color(0,0,gamma_a[max])
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(y*step)
y=y+1
# print i,max
max=255
y=0
if debug: print "now green"
for i in range(2*striplen/3,striplen):
color=Color(0,gamma_a[max],0)
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(y*step)
y=y+1
# print "i: %d max: %d" % (i,max)
#
clearled(BLACK)
if debug: print "now red umgekehrt"
i=0
max=0
for i in range(striplen/3):
color=Color(gamma_a[max],0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
strip.show()
i=i+1
time.sleep(delay/1000.0)
max=0+(i*step)
time.sleep(delay/1000.0)
# print "i: %d max: %d" % (i,max)
if debug: print "Anzahl show %d: " % i
return(0)
# ---------------------
def colorWipe3(strip):
global gamma_a
if debug: print "Draw colorwipe3, color and then fade"
max=255
for i in range(striplen):
color=Color(max,0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
bright=255
for z in range (15):
# if debug: print "bright %d" % bright
strip.setBrightness(bright)
strip.show()
bright=255-(z*255/15)
sleep(0.2)
#------------------------------------------------------
def colorWipe6(strip, pattern, dir, patdir, oben, unten, colorback, bright=60):
global gamma_a, debug
i=0 # count for number of show() calls
lenpat=len(pattern) # length of pattern
if (unten > oben and oben !=TOP) or (oben > striplen):
if debug: print "Error - unten/oben fehlerhaft (striplen %d %s/%s)" % (striplen, unten,oben)
return(2)
# setup
# define startposition
if dir==DOWN: # direction Down
if oben == TOP: # the full lenght
startpos=striplen-1 # start here
else:
startpos=oben-lenpat # not at bottom
# setup
# define loop counts
if oben == TOP and unten != BOTTOM:
anzahl = startpos-unten+2
elif oben == TOP and unten == BOTTOM:
anzahl=striplen+lenpat
elif oben != TOP and unten == BOTTOM:
anzahl=oben+1
elif oben != TOP and unten != BOTTOM:
anzahl=oben-unten-lenpat+2
else: # direction is Up
# define startposition
if unten == BOTTOM: # the full lenght
startpos=-lenpat+1 # start here
else:
startpos=unten-1 # not at bottom
# define loop counts
if oben == TOP and unten != BOTTOM:
anzahl = (striplen-unten+1) +1
elif oben == TOP and unten == BOTTOM:
anzahl= (striplen+lenpat-1) +1
elif oben != TOP and unten == BOTTOM:
anzahl= (oben-1) +1
elif oben != TOP and unten != BOTTOM:
anzahl=oben-unten-lenpat+2
if debug: print "colorwipe6 %s startpos %d loopcount %d lenpat %d" % (UPDOWN[dir],startpos, anzahl, lenpat)
for z in range( 0, anzahl) :
set=0
if dir==DOWN:
pos=startpos-z
else:
pos=startpos+z # count backwards
# if pos1== 130: sleep(3)
for pattind in range(lenpat):
if patdir==UP:
pattindex=lenpat-pattind-1
else:
pattindex=pattind
pos1=pos+pattind
if (pos1 >= 0) and (pos1 < striplen):
if debugg: print "z: %d position: %d patternindex: %d" % (z,pos1, pattindex)
color=Color( pattern[pattindex][0] , pattern[pattindex][1], pattern[pattindex][2])
strip.setPixelColor(pos1, color)
set=1
if (pos1+1) < striplen and dir==DOWN: # set trailing led to black
strip.setPixelColor(pos1+1, Color(colorback[0],colorback[1],colorback[2]))
if debugg: print "black at %d" % (pos1+1)
set=1
elif dir==UP and pos1-lenpat>=0:
strip.setPixelColor(pos1-lenpat, Color(colorback[0],colorback[1],colorback[2]))
if debugg: print "black at %d" % (pos1-lenpat)
set=1
if set: # if a pixel was set
if bright<90:
strip.setBrightness(bright)
strip.show() # light them up
i=i+1 # increment show counter
set=0
if debugg: print "show"
time.sleep(delay/1000.0) # delay
# sleep(1)
if debugg: print "Anzahl show %d: " % i
if debug: print "colorwipe6 %s startpos %d loopcount %d lenpat %d" % (UPDOWN[dir],startpos, anzahl, lenpat)
return()
#----------------------------------------------
# Main Loop
#---------------------------------------------
def main_loop():
global do_term
i=0 # i contains image number
# images are in list images
background=[0,0,0]
# der loop läuft, bis ein Keyboard interrupt kommt, ctrl-c ----
try:
patt=make_rainbow(40)
if debugg: print patt
startpos=20
for i in range(len(patt)):
strip.setPixelColor(i+startpos, Color(patt[i][0],patt[i][1],patt[i][2]))
strip.setBrightness(60)
strip.show()
sleep(0.1)
sleep(2)
clearled(BLACK)
write_rainbow(strip,100, 30,0) # paint rainbow at pos 90 in length 20
write_rainbow(strip,20,30)
sleep(10)
clearled(BLACK)
colorWipe2(strip) # Red wipe
sleep(2)
clearled(BLACK)
colorWipe3(strip) # Red wipe
sleep(2)
strip.setBrightness(80)
clearled(BLACK)
colorWipe6(strip,patt,UP,UP,60,BOTTOM,background) # Red wipe
sleep(0.2)
clearled(BLACK)
colorWipe6(strip,patt,DOWN,UP,60,BOTTOM,background) # Red wipe
sleep(0.4)
colorWipe6(strip,patt,DOWN,DOWN,TOP,60,background) # Red wipe
sleep(0.2)
clearled(BLACK)
colorWipe6(strip,patt,UP,DOWN,TOP,40,background) # Red wipe
sleep(0.1)
clearled(BLACK)
background=[0,0,0]
background=[30,10,20]
clearled(background)
colorWipe6(strip,pattern3,DOWN,UP,TOP,19,background) # Red wipe
sleep(0.3)
clearled(BLACK)
rainbow(strip,20,100)
sleep(3)
clearled(BLACK)
rainbowCycle(strip)
sleep(1)
clearled(BLACK)
rainbowCycle(strip,80,130)
sleep(1)
clearled(BLACK)
except KeyboardInterrupt:
# aufräumem
print ("\nKeyboard Interrupt in butest")
do_term=1
pass
clearled(BLACK)
return(0)
#---- End Main Loop -------------------------------------------------------------
# *************************************************
# Program starts here
# *************************************************
if __name__ == '__main__':
#
arguments(sys.argv[1:]) # get commandline arguments
if debug: print "Run with debug"
initpgm()
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
# Intialize the library (must be called once before other functions).
strip.begin()
blink_led(led_green,2) # blink led 2 mal while waiting
main_loop() # work is done in main_loop()
# return from mainloop if Ctrl-C on Keyboard
#
# Clean-Up and terminate
GPIO.cleanup(led_rot)
GPIO.cleanup(led_green)
GPIO.cleanup(red_button)
GPIO.cleanup(black_button)
print ("Program terminated....")
sys.exit(0)
#**************************************************************
# That is the end
#***************************************************************
#
| return Color(start * 3, 255 - start * 3, 0) | conditional_block |
coltest4.py | #!/usr/bin/python
# coding: utf-8
#----------------------------------------------------------
#
# Light Painting Programm for NeoPixel Strip
#
# Based on several examples found on the Adafruit Learning System
# Thanks to Tony DiCola , Phillip Burgess and others
#
# This version adapted and expanded by Peter K. Boxler, Switzerland
# January 2015
#
# NeoPixel Products here:
# ----> https://www.adafruit.com/products/1506
#-----------------------------------------------------------
#
import sys, getopt, os
import time, math
from time import sleep
import RPi.GPIO as GPIO
from PIL import Image
import struct
from neopixel import *
#Button handling:
red_button = 4
black_button = 17
BLACK=1
REDSHORT=2
REDLONG=3
but={1:'Black', 2:'Red-short', 3:'Red-long'}
led_rot=23
led_green=25
striplen=144
# LED strip configuration:
LED_COUNT = striplen # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
# Configurable values
column_delay_time = 0.100
xmlfile="a.xml" #default input file name
onoff={1:'ON', 0:'OFF'}
debug=0 # debug print out
do_term=0 # term signal in mainloop
gamma =2.8 # gamma correction
gamma_a=(list) # list of gamma corrections for every brightness 0-255
debugg=0
delay=20
pattern1=[0,50,80,120,180,220,250,250,250,250,250,250,220,180,120,80,50,0]
pattern2=[0,50,60,80,100,120,180,220,250,250,250,250,250,250]
pattern3=[ [32,0,194],
[0,97,255],
[0,255,212],
[90,160,40],
[127,125,20],
[177,195,10],
[255,133,0],
[255,34,0],
[255,0,0],
[144,0,0]
]
pattern4=[ [32,0,194],
[0,97,255],
[0,255,212],
[90,160,40],
[127,125,20],
[177,19,10],
]
pattern5=[ [32,0,194],
[0,97,255],
[0,255,212],
[200,30,20]
]
BLACK=[0,0,0]
BOTTOM=-1
TOP=-1
UP=1
DOWN=0
FULL=-2
UPDOWN={1:'UP', 0:'DOWN'}
#
# ***** Function Parse commandline arguments ***********************
# get and parse commandline args
def arguments(argv):
global debug,file_path
try:
opts, args=getopt.getopt(argv,"hdDp:")
except getopt.GetoptError:
myPrint ("Parameter Error")
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print ("App neopix1 Actions -----------------------------------")
print ("usage: neopix1 [-d ]")
sys.exit(2)
elif opt == '-d': debug = 1
elif opt == '-D':
debug = 1
debugg = 1
elif opt == '-p': file_path = arg
# ***********************************************
#------- Funktion initpgm ---------------------------------
#
def initpgm():
global gamma_a, gamma
g_maxin = 255.0
g_maxout = 255.0
# setup General Input/Output-Pins of Raspberry
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(red_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # red
GPIO.setup(black_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # black
GPIO.setup (led_rot, GPIO.OUT) # rote Led
GPIO.setup (led_green, GPIO.OUT) # rote Led
GPIO.output(led_rot, False)
GPIO.output(led_green, False)
# Calculate gamma correction table based on variable gamma
# Gamma correction ist used for all pixels in the image
gamma_a = bytearray(256)
for i in range(256):
gamma_a[i] = int(pow(i / g_maxin, gamma) * 255.0 + 0.5)
# if debug:
# for z in range(256):
# print "gamma_a %d %d" % (z,gamma_a[z])
# ----------
def float_to_hex(f):
return hex(struct.unpack('<I', struct.pack('<f', f))[0])
#------------
#------Wait for button Press (Black or Red) --------------------------------
def button_pressed():
print "Waiting for Tastendruck..."
while True:
inpblack=1
inpred=1
inpblack=GPIO.input(black_button) # high if NOT pressed !
inpred=GPIO.input(red_button)
# print "Button %d %d" % (inpblack, inpred)
sleep(0.2)
if not inpblack: return(BLACK) # black button went to low
if not inpred: # red button went to low
sleep(1) # check if red is pressed long or short
inpred=GPIO.input(red_button)
sleep(0.1)
if inpred: return(REDSHORT)
else: return(REDLONG)
pass
#-------------------------------------------
# ***** Function blink-led **************************
def blink_led(pin,anzahl): # blink led 3 mal bei start und bei shutdown
for i in range(anzahl):
GPIO.output(pin, True)
sleep(0.1)
GPIO.output(pin, False)
sleep(0.1)
# -------------------------
def draw() :
print"Zeichnen................"
sleep (1)
return(0)
#-------------------------
# ----- set all pixel to dark
def clearled(color):
for i in range (striplen):
strip.setPixelColor(i, (Color(color[0],color[1],color[2])))
strip.show()
return(0)
#--------------------------
#---------------------------------------------
def wheel2(pos,how=0):
# Generate rainbow colors within 0-255.
if pos < 85:
if how:
return (pos * 3, 255 - pos * 3, 0)
else:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
if how:
return (255 - pos * 3, 0, pos * 3)
else:
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
if how:
return (0, pos * 3, 255 - pos * 3)
else:
return Color (0, pos * 3, 255 - pos * 3)
#--------------------------------------------
#---------------------------------------------
def wheel (start , how=0, gamma=0):
# Generate rainbow colors within 0-255.
global gamma_a
if start < 85:
# print "%d: %d %d %d" % (start, start * 3, 255 - start * 3, 0)
if how:
if gamma:
return (gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else: # forward
return (start * 3, 255 - start * 3, 0)
else:
if gamma:
return Color(gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else:
return Color(start * 3, 255 - start * 3, 0)
elif start < 170:
# print "%d: %d %d %d" % (start, 255- (start-85) * 3, 0, (start-85)*3 )
start -= 85
if how:
if gamma:
return (gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else: #forward
return (255 - start * 3, 0, start * 3)
else:
if gamma:
return Color(gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else:
return Color(255 - start * 3, 0, start * 3)
else:
# print "%d: %d %d %d" % (start, 0, (start-170) * 3, 255 - (start-170) * 3)
start -= 170
if how:
if gamma:
return (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return (0, start * 3, 255 - start * 3)
else:
if gamma:
return Color (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return Color (0, start * 3, 255 - start * 3)
#--------------------------------------------
def rainbow_alt(strip, wait_ms=20, iterations=1):
if debug: print "Draw rainbow that fades across all pixels at once."
for j in range(256*iterations):
for i in range(striplen):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.setBrightness(80)
strip.show()
time.sleep(wait_ms/1000.0)
# -- Function write_rainbow ----------------------------------
# generate a rainbow pattern with lenght len, starting at pos
def write_rainbow(strip, start, len, gamma=0,bright=60):
if debug: print "write_rainbow start %d len %d gamma %d" % (start,len,gamma)
# use pixel start to start+len
stripl=strip.numPixels()
for i in range( 0,len):
if i < stripl:
strip.setPixelColor(start+i, wheel((i+i*256/len) & 255,0,gamma))
strip.setBrightness(bright)
strip.show()
# -- Function make_rainbow ----------------------------------
# return a rainbow pattern with lenght len
def make_rainbow(len, gamma=0):
if debug: print "return a rainbow pattern with lenght %d" % len
pat=[]
for i in range( 0,len):
pat.append(wheel((i+i*256/len) & 255,1,gamma))
return(pat)
#-----------------------------------------------
def rainbow(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbow start %d end %d iter: %d bright: %d" % (start, end, iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel((i+j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
#--------------------------------------------------
def rainbowCycle(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbowCycle anzahl: %d bright: %d" % (iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel(((i * 256 / 150) + j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
# sleep(0.4)
#----------------------------------------------------
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color):
global gamma_a
if debug: print "now in colorwipe"
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
#------------------------------------------------------
# Define functions which animate LEDs in various ways.
def colorWipe2(strip):
global gamma_a
if debug: print "Draw colorwipe2"
step=int(255/(striplen/3)) # Helligkeits step
if debug: print "step: %d " % step
if debug: print "now red"
max=255
for i in range(striplen/3):
color=Color(gamma_a[max],0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(i*step)
# print "i: %d max: %d" % (i,max)
max=255
y=0
if debug: print "now blue"
for i in range(striplen/3,2*striplen/3):
color=Color(0,0,gamma_a[max])
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(y*step)
y=y+1
# print i,max
max=255
y=0
if debug: print "now green"
for i in range(2*striplen/3,striplen):
color=Color(0,gamma_a[max],0)
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(y*step)
y=y+1
# print "i: %d max: %d" % (i,max)
#
clearled(BLACK)
if debug: print "now red umgekehrt"
i=0
max=0
for i in range(striplen/3):
color=Color(gamma_a[max],0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
strip.show()
i=i+1
time.sleep(delay/1000.0)
max=0+(i*step)
time.sleep(delay/1000.0)
# print "i: %d max: %d" % (i,max)
if debug: print "Anzahl show %d: " % i
return(0)
# ---------------------
def | (strip):
global gamma_a
if debug: print "Draw colorwipe3, color and then fade"
max=255
for i in range(striplen):
color=Color(max,0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
bright=255
for z in range (15):
# if debug: print "bright %d" % bright
strip.setBrightness(bright)
strip.show()
bright=255-(z*255/15)
sleep(0.2)
#------------------------------------------------------
def colorWipe6(strip, pattern, dir, patdir, oben, unten, colorback, bright=60):
global gamma_a, debug
i=0 # count for number of show() calls
lenpat=len(pattern) # length of pattern
if (unten > oben and oben !=TOP) or (oben > striplen):
if debug: print "Error - unten/oben fehlerhaft (striplen %d %s/%s)" % (striplen, unten,oben)
return(2)
# setup
# define startposition
if dir==DOWN: # direction Down
if oben == TOP: # the full lenght
startpos=striplen-1 # start here
else:
startpos=oben-lenpat # not at bottom
# setup
# define loop counts
if oben == TOP and unten != BOTTOM:
anzahl = startpos-unten+2
elif oben == TOP and unten == BOTTOM:
anzahl=striplen+lenpat
elif oben != TOP and unten == BOTTOM:
anzahl=oben+1
elif oben != TOP and unten != BOTTOM:
anzahl=oben-unten-lenpat+2
else: # direction is Up
# define startposition
if unten == BOTTOM: # the full lenght
startpos=-lenpat+1 # start here
else:
startpos=unten-1 # not at bottom
# define loop counts
if oben == TOP and unten != BOTTOM:
anzahl = (striplen-unten+1) +1
elif oben == TOP and unten == BOTTOM:
anzahl= (striplen+lenpat-1) +1
elif oben != TOP and unten == BOTTOM:
anzahl= (oben-1) +1
elif oben != TOP and unten != BOTTOM:
anzahl=oben-unten-lenpat+2
if debug: print "colorwipe6 %s startpos %d loopcount %d lenpat %d" % (UPDOWN[dir],startpos, anzahl, lenpat)
for z in range( 0, anzahl) :
set=0
if dir==DOWN:
pos=startpos-z
else:
pos=startpos+z # count backwards
# if pos1== 130: sleep(3)
for pattind in range(lenpat):
if patdir==UP:
pattindex=lenpat-pattind-1
else:
pattindex=pattind
pos1=pos+pattind
if (pos1 >= 0) and (pos1 < striplen):
if debugg: print "z: %d position: %d patternindex: %d" % (z,pos1, pattindex)
color=Color( pattern[pattindex][0] , pattern[pattindex][1], pattern[pattindex][2])
strip.setPixelColor(pos1, color)
set=1
if (pos1+1) < striplen and dir==DOWN: # set trailing led to black
strip.setPixelColor(pos1+1, Color(colorback[0],colorback[1],colorback[2]))
if debugg: print "black at %d" % (pos1+1)
set=1
elif dir==UP and pos1-lenpat>=0:
strip.setPixelColor(pos1-lenpat, Color(colorback[0],colorback[1],colorback[2]))
if debugg: print "black at %d" % (pos1-lenpat)
set=1
if set: # if a pixel was set
if bright<90:
strip.setBrightness(bright)
strip.show() # light them up
i=i+1 # increment show counter
set=0
if debugg: print "show"
time.sleep(delay/1000.0) # delay
# sleep(1)
if debugg: print "Anzahl show %d: " % i
if debug: print "colorwipe6 %s startpos %d loopcount %d lenpat %d" % (UPDOWN[dir],startpos, anzahl, lenpat)
return()
#----------------------------------------------
# Main Loop
#---------------------------------------------
def main_loop():
global do_term
i=0 # i contains image number
# images are in list images
background=[0,0,0]
# der loop läuft, bis ein Keyboard interrupt kommt, ctrl-c ----
try:
patt=make_rainbow(40)
if debugg: print patt
startpos=20
for i in range(len(patt)):
strip.setPixelColor(i+startpos, Color(patt[i][0],patt[i][1],patt[i][2]))
strip.setBrightness(60)
strip.show()
sleep(0.1)
sleep(2)
clearled(BLACK)
write_rainbow(strip,100, 30,0) # paint rainbow at pos 90 in length 20
write_rainbow(strip,20,30)
sleep(10)
clearled(BLACK)
colorWipe2(strip) # Red wipe
sleep(2)
clearled(BLACK)
colorWipe3(strip) # Red wipe
sleep(2)
strip.setBrightness(80)
clearled(BLACK)
colorWipe6(strip,patt,UP,UP,60,BOTTOM,background) # Red wipe
sleep(0.2)
clearled(BLACK)
colorWipe6(strip,patt,DOWN,UP,60,BOTTOM,background) # Red wipe
sleep(0.4)
colorWipe6(strip,patt,DOWN,DOWN,TOP,60,background) # Red wipe
sleep(0.2)
clearled(BLACK)
colorWipe6(strip,patt,UP,DOWN,TOP,40,background) # Red wipe
sleep(0.1)
clearled(BLACK)
background=[0,0,0]
background=[30,10,20]
clearled(background)
colorWipe6(strip,pattern3,DOWN,UP,TOP,19,background) # Red wipe
sleep(0.3)
clearled(BLACK)
rainbow(strip,20,100)
sleep(3)
clearled(BLACK)
rainbowCycle(strip)
sleep(1)
clearled(BLACK)
rainbowCycle(strip,80,130)
sleep(1)
clearled(BLACK)
except KeyboardInterrupt:
# aufräumem
print ("\nKeyboard Interrupt in butest")
do_term=1
pass
clearled(BLACK)
return(0)
#---- End Main Loop -------------------------------------------------------------
# *************************************************
# Program starts here
# *************************************************
if __name__ == '__main__':
#
arguments(sys.argv[1:]) # get commandline arguments
if debug: print "Run with debug"
initpgm()
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
# Intialize the library (must be called once before other functions).
strip.begin()
blink_led(led_green,2) # blink led 2 mal while waiting
main_loop() # work is done in main_loop()
# return from mainloop if Ctrl-C on Keyboard
#
# Clean-Up and terminate
GPIO.cleanup(led_rot)
GPIO.cleanup(led_green)
GPIO.cleanup(red_button)
GPIO.cleanup(black_button)
print ("Program terminated....")
sys.exit(0)
#**************************************************************
# That is the end
#***************************************************************
#
| colorWipe3 | identifier_name |
python_module.py | # # -*- coding:utf-8 -*-
import sys
print('\n---------------------module:__builtin__--------------------')
print(vars())
print(dir(sys.modules['__builtin__']))
print('\n---------------------module:time--------------------')
import time
print(time.asctime()) # 返回时间格式:Sun May 7 21:46:15 2017
print(time.time()) # 返回时间戳 ‘1494164954.6677325’
print(time.gmtime()) # 返回本地时间 的struct time对象格式,time.struct_time(tm_year=2017, tm_mon=5, tm_mday=7, tm_hour=22, tm_min=4, tm_sec=53, tm_wday=6, tm_yday=127, tm_isdst=0)
print(time.localtime()) # 返回本地时间 的struct time对象格式,time.struct_time(tm_year=2017, tm_mon=5, tm_mday=7, tm_hour=22, tm_min=4, tm_sec=53, tm_wday=6, tm_yday=127, tm_isdst=0)
print(time.gmtime(time.time()-800000)) # 返回utc时间的struc时间对象格式
print(time.asctime(time.localtime())) # 返回时间格式Sun May 7 22:15:09 2017
print(time.ctime()) # 返回时间格式Sun May 7 22:15:09 2017
print(time.strftime('%Y-%m-%d')) #默认当前时间 2017-05-07
print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())) #默认当前时间 2017-05-07
string_struct = time.strptime("2016/05/22","%Y/%m/%d") # 将日期字符串 转成 struct时间对象格式
print(string_struct) # 返回struct time对象格式 time.struct_time(tm_year=2016, tm_mon=5, tm_mday=22, tm_hour=0, tm_min=0, tm_sec=0, tm_wday=6, tm_yday=143, tm_isdst=-1)
# 将日期字符串转成时间戳
struct_stamp = time.mktime(string_struct) # 将struct time时间对象转成时间戳
print(struct_stamp) # 返回时间戳 ‘1463846400.0’
# 将时间戳转为字符串格式
print(time.gmtime(time.time()-86640)) # 将utc时间戳转换成struct_time格式
print(time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime()) ) # 将utc struct_time格式转成指定的字符串格式
print('\n---------------------module:datetime--------------------')
import datetime
# 时间加减
print(datetime.datetime.now()) # 返回当前时间 2017-05-07 22:36:45.179732
print(datetime.date.fromtimestamp(time.time())) # 时间戳直接转换成日期格式 2017-05-07
print(datetime.datetime.now() + datetime.timedelta(3)) # 返回时间在当前日期上 +3 天
print(datetime.datetime.now() + datetime.timedelta(-3)) # 返回时间在当前日期上 -3 天
print(datetime.datetime.now() + datetime.timedelta(hours= 3)) # 返回时间在当前时间上 +3 小时
print(datetime.datetime.now() + datetime.timedelta(minutes= 30)) # 返回时间在当前时间上 +30 分钟
c_time = datetime.datetime.now()
print(c_time) # 当前时间为 2017-05-07 22:52:44.016732
print(c_time.replace(minute=3,hour=2)) # 时间替换 替换时间为‘2017-05-07 02:03:18.181732’
print(datetime.timedelta) # 表示时间间隔,即两个时间点之间的长度
print (datetime.datetime.now() - datetime.timedelta(days=5)) # 返回时间在当前时间上 -5 天
print('\n---------------------module:calendar--------------------')
# python 日历模块
import calendar
print(calendar.calendar(theyear= 2017)) # 返回2017年整年日历
print(calendar.month(2017,5)) # 返回某年某月的日历,返回类型为字符串类型
calendar.setfirstweekday(calendar.WEDNESDAY) # 设置日历的第一天(第一天以星期三开始)
cal = calendar.month(2017, 4)
print (cal)
print(calendar.monthrange(2017,5)) # 返回某个月的第一天和这个月的所有天数
print(calendar.monthcalendar(2017,5)) # 返回某个月以每一周为元素的序列
cal = calendar.HTMLCalendar(calendar.MONDAY)
print(cal.formatmonth(2017, 5)) # 在html中打印某年某月的日历
print(calendar.isleap(2017)) # 判断是否为闰年
print(calendar.leapdays(2000,2017)) # 判断两个年份间闰年的个数
print('\n---------------------module:random--------------------')
import random
# 随机数
print(random.random()) # 返回一个随机小数'0.4800545746046827'
print(random.randint(1,5)) # 返回(1-5)随机整型数据
print(random.randrange(1,10)) # 返回(1-10)随机数据
# 生成随机验证码
code = ''
for i in range(4):
current = random.randrange(0,4)
if current != i:
temp = chr(random.randint(65,90))
else:
temp = random.randint(0,9)
code += str(temp)
print(code)
print('\n---------------------module:os--------------------')
import os
print(os.getcwd()) # 获得当前工作目录
print(os.chdir(os.getcwd())) # 改变当前脚本的工作路径,相当于shell下的cd
print(os.curdir) # 返回当前目录‘.'
print(os.pardir) # 获取当前目录的父目录字符串名‘..'
print(os.makedirs('dirname1/dirname2')) # 可生成多层递归目录
print(os.removedirs('dirname1/dirname2')) # 若目录为空,则删除,并递归到上一级目录,如若也为空,则删除,依此类推
print(os.mkdir('test4')) # 生成单级目录;相当于shell中mkdir dirname
print(os.rmdir('test4')) # 删除单级空目录,若目录不为空则无法删除,报错;相当于shell中rmdir dirname
print(os.listdir(os.getcwd())) # 列出指定目录下的所有文件和子目录,包括隐藏文件,并以列表方式打印
# print(os.remove('log.log')) # 删除一个指定的文件
os.mkdir('oldname')
print(os.rename("oldname","newname")) # 重命名文件/目录)
print(os.stat(os.getcwd())) # 获取文件/目录信息
print(os.pathsep) # 输出用于分割文件路径的字符串';'
print(os.name) # 输出字符串指示当前使用平台。win->'nt'; Linux->'posix'
# print(os.system(command='ls')) # 运行shell命令,直接显示
print(os.environ) # 获得系统的环境变量
print(os.path.abspath(os.getcwd())) # 返回path规范化的绝对路径
print(os.path.split(os.getcwd())) # 将path分割成目录和文件名二元组返回
print(os.path.dirname(os.getcwd())) # 返回path的目录。其实就是os.path.split(path)的第一个元素
print(os.path.basename(os.getcwd())) # 返回path最后的文件名。如果path以/或\结尾,那么就会返回空值。即os.path.split(path)的第二个元素
print(os.path.exists('test')) # 判断path是否存在
print(os.path.isabs(os.getcwd())) # 如果path是绝对路径,返回True
print(os.path.isfile('test')) # 如果path是一个存在的文件,返回True。否则返回False
print(os.path.isdir(os.getcwd())) # 如果path是一个存在的目录,则返回True。否则返回False
print(os.path.getatime(os.getcwd())) # 返回path所指向的文件或者目录的最后存取时间
print(os.path.getmtime(os.getcwd())) # 返回path所指向的文件或者目录的最后修改时间
print('\n---------------------module:sys--------------------')
import sys
print(sys.argv) # 命令行参数List,第一个元素是程序本身路径
# print(sys.exit(0)) # 退出程序,正常退出时exit(0)
print(sys.version) # 获取python的版本信息
print(sys.path) # 返回模块的搜索路径,初始化时使用PYTHONPATH环境变量的值
print(sys.platform) # 返回操作平台的名称
print('\n---------------------module:shutil--------------------')
import shutil
fsrc = open("fsrc.txt", 'w+') #直接打开一个文件,如果文件不存在则创建文件
fdst = open("fdst.txt", 'w') #直接打开一个文件,如果文件不存在则创建文件
fsrc.write('fsrc')
src = "fsrc.txt"
dst = "fdst.txt"
shutil.copyfileobj(fsrc, fdst, length=16*1024) # 将文件内容拷贝到另一个文件中,可以是部分内容
shutil.copyfile(src, dst) # 拷贝文件
shutil.copymode(src, dst) # 仅拷贝权限。内容、组、用户均不变
shutil.copystat(src, dst) # 拷贝状态的信息,包括:mode bits, atime, mtime, flags
shutil.copy(src, dst) # 拷贝文件和权限
shutil.copy2(src, dst) # 拷贝文件和状态信息
shutil.move(src, dst) # 递归的去移动文件
# base_name: 压缩包的文件名,也可以是压缩包的路径。只是文件名时,则保存至当前目录,否则保存至指定路径
# format: 压缩包种类,“zip”, “tar”, “bztar”,“gztar”
# root_dir: 要压缩的文件夹路径(默认当前目录)
# owner: 用户,默认当前用户
# group: 组,默认当前组
# logger: 用于记录日志,通常是logging.Logger对象
shutil.make_archive('archive_base_name', 'zip',os.getcwd()) # 创建压缩包并返回文件路径,例如:zip、tar
print('\n---------------------module:zipfile--------------------')
#shutil 对压缩包的处理是调用 ZipFile 和 TarFile 两个模块来进行的:
# zipfile 压缩解压
import zipfile
# 压缩
z = zipfile.ZipFile('archive_base_name.zip', 'w')
z.write('fdst.txt')
z.close()
# 解压
z = zipfile.ZipFile('archive_base_name.zip', 'r')
z.extractall()
z.close()
print('\n---------------------module:tarfile--------------------')
# tarfile 压缩解压
import tarfile
# 压缩
tar = tarfile.open('your.tar','w')
tar.add('archive_base_name.zip')
# tar.add('/Users/wupeiqi/PycharmProjects/cmdb.zip', arcname='cmdb.zip')
tar.close()
# 解压
tar = tarfile.open('your.tar','r')
tar.extractall() # 可设置解压地址
tar.close()
print('\n---------------------module:xml--------------------')
# xml的格式如下,就是通过<>节点来区别数据结构的:
xmlstr = r'''<?xml version="1.0"?>
<data>
<country name="Liechtenstein">
<rank updated="yes">2</rank>
<year>2008</year>
<gdppc>141100</gdppc>
<neighbor name="Austria" direction="E"/>
<neighbor name="Switzerland" direction="W"/>
</country>
<country name="Singapore">
<rank updated="yes">5</rank>
<year>2011</year>
<gdppc>59900</gdppc>
<neighbor name="Malaysia" direction="N"/>
</country>
<country name="Panama">
| <gdppc>13600</gdppc>
<neighbor name="Costa Rica" direction="W"/>
<neighbor name="Colombia" direction="E"/>
</country>
</data>
'''
# xml协议在各个语言里的都 是支持的,在python中可以用以下模块操作xml
import xml.etree.ElementTree as ET
fpxml = open('xmltest.xml', 'w+')
fpxml.write(xmlstr)
fpxml.close()
tree = ET.parse("xmltest.xml")
root = tree.getroot()
print(root.tag)
#遍历xml文档
for child in root:
print(child.tag, child.attrib)
for i in child:
print(i.tag,i.text)
#只遍历year 节点
for node in root.iter('year'):
print(node.tag,node.text)
# 修改和删除xml文档内容
import xml.etree.ElementTree as ET
tree = ET.parse("xmltest.xml")
root = tree.getroot()
#修改
for node in root.iter('year'):
new_year = int(node.text) + 1
node.text = str(new_year)
node.set("updated","yes")
tree.write("xmltest.xml")
#删除node
for country in root.findall('country'):
rank = int(country.find('rank').text)
if rank > 50:
root.remove(country)
tree.write('output.xml')
# 自己创建xml文档
import xml.etree.ElementTree as ET
new_xml = ET.Element("namelist")
name = ET.SubElement(new_xml, "name", attrib={"enrolled": "yes"})
age = ET.SubElement(name, "age", attrib={"checked": "no"})
age = ET.SubElement(name, "age")
age.text = '33'
name2 = ET.SubElement(new_xml, "name", attrib={"enrolled": "no"})
age = ET.SubElement(name2, "age")
age.text = '19'
et = ET.ElementTree(new_xml) # 生成文档对象
et.write("test.xml", encoding="utf-8", xml_declaration=True)
ET.dump(new_xml) # 打印生成的格式
print('\n---------------------module:configparser--------------------')
# 好多软件的常见文档格式如下
'''
[DEFAULT]
compressionlevel = 9
serveraliveinterval = 45
compression = yes
forwardx11 = yes
[bitbucket.org]
user = hg
[topsecret.server.com]
host port = 50022
forwardx11 = no
'''
# python 生成一个这样的文档
try:
import configparser
except ImportError as e:
pass
else:
config = configparser.ConfigParser()
config["DEFAULT"] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Host Port'] = '50022'
topsecret['ForwardX11'] = 'no'
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
# 写完了还可以再读出来
config = configparser.ConfigParser()
config.sections()
file = config.read('example.ini')
print(file) # ['example.ini']
title = config.sections()
print(title) # ['bitbucket.org', 'topsecret.server.com']
print('bitbucket.org' in config) # True
print('bytebong.com' in config) # False
print(config['bitbucket.org']['User']) # hg
print(config['DEFAULT']['Compression']) # yes
topsecret = config['topsecret.server.com']
print(topsecret['ForwardX11']) # no
print(topsecret['Host Port']) # 50022
for key in config['topsecret.server.com']:
print(key)
'''
输出结果:
host port
forwardx11
compressionlevel
serveraliveinterval
compression
'''
print(config['topsecret.server.com']['Compression']) # yes
# configparser增删改查语法
config = configparser.ConfigParser()
config.read('i.cfg')
secs = config.sections() # 返回配置文件中的主节点
print (secs)
options = config.options('bitbucket.org')
print(options) # 返回所有子节点信息
item_list = config.items('bitbucket.org')
print(item_list) # 列出所有子节点详细信息
val = config.get('topsecret.server.com','host port')
print(val) # 返回单个子节点信息
val2 = config.getint('topsecret.server.com','host port')
print(val2)
# 删除'bitbucket.org'
sec = config.remove_section('bitbucket.org')
config.write(open('i.cfg','w'))
sec2 = config.add_section('huhuan2') # 添加主节点
config.set('huhuan2','k','1111') # 添加子节点
config.set('huhuan','kk','2222')
config.remove_option('huhuan','kk') # 删除子节点
config.write(open('i.cfg','w'))
print('\n---------------------module:logging--------------------')
import logging
# %(message)s 日志信息
# %(levelno)s 日志级别
# datefmt 设置时间格式
# filename 设置日志保存的路径
# level=loggin.INFO意思是,把日志纪录级别设置为INFO,也就是说,只有比日志是INFO或比INFO级别更高的日志才会被纪录到文件里,
# 在这个例子, 第一条日志是不会被纪录的,如果希望纪录debug的日志,那把日志级别改成DEBUG就行了。
logging.basicConfig(format='%(asctime)s %(message)s %(levelno)s', datefmt='%m/%d/%Y %I:%M:%S %p',filename='example.log',level=logging.INFO)
logging.debug('This message should go to the log file')
logging.info('So should this')
logger = logging.getLogger('TEST_LOG') # 获得一个Logger
logger.setLevel(logging.DEBUG) # 设置日志级别
ch = logging.StreamHandler() # logging.StreamHandler这个Handler可以向类似与sys.stdout或者sys.stderr的任何文件对象(file object)输出信息。
ch.setLevel(logging.DEBUG)
fh = logging.FileHandler("access.log") # 用于向一个文件输出日志信息。不过FileHandler会帮你打开这个文件
fh.setLevel(logging.WARNING)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # 设置日志记录的最终输出格式
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# 添加ch,fh到logger
logger.addHandler(ch)
logger.addHandler(fh)
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')
print('\n---------------------module:pickle--------------------')
try:
import pickle
except ImportError as e:
pass
else:
date = {'k1':'123','k2':'hello'}
str = pickle.dumps(date) # pickle.dumps 将数据通过特殊的形式转换为只有python认识的字符串
print(str)
with open('result.pk','w') as fp: # pickle.dump 将数据通过特殊的形式转换为只有python认识的字符串并写入文件
pickle.dump(date,fp)
print('\n---------------------module:json--------------------')
import json
str1 = json.dumps(date) # json.dumps 将数据通过特殊形式转换为所有程序语言都认识的字符串
print(str1)
with open('result1.json','w') as fp: #json.dump 将数据通过特殊的形式转换为只有python认识的字符串并写入文件
json.dump(date,fp) | <rank updated="yes">69</rank>
<year>2011</year>
| random_line_split |
python_module.py | # # -*- coding:utf-8 -*-
import sys
print('\n---------------------module:__builtin__--------------------')
print(vars())
print(dir(sys.modules['__builtin__']))
print('\n---------------------module:time--------------------')
import time
print(time.asctime()) # 返回时间格式:Sun May 7 21:46:15 2017
print(time.time()) # 返回时间戳 ‘1494164954.6677325’
print(time.gmtime()) # 返回本地时间 的struct time对象格式,time.struct_time(tm_year=2017, tm_mon=5, tm_mday=7, tm_hour=22, tm_min=4, tm_sec=53, tm_wday=6, tm_yday=127, tm_isdst=0)
print(time.localtime()) # 返回本地时间 的struct time对象格式,time.struct_time(tm_year=2017, tm_mon=5, tm_mday=7, tm_hour=22, tm_min=4, tm_sec=53, tm_wday=6, tm_yday=127, tm_isdst=0)
print(time.gmtime(time.time()-800000)) # 返回utc时间的struc时间对象格式
print(time.asctime(time.localtime())) # 返回时间格式Sun May 7 22:15:09 2017
print(time.ctime()) # 返回时间格式Sun May 7 22:15:09 2017
print(time.strftime('%Y-%m-%d')) #默认当前时间 2017-05-07
print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())) #默认当前时间 2017-05-07
string_struct = time.strptime("2016/05/22","%Y/%m/%d") # 将日期字符串 转成 struct时间对象格式
print(string_struct) # 返回struct time对象格式 time.struct_time(tm_year=2016, tm_mon=5, tm_mday=22, tm_hour=0, tm_min=0, tm_sec=0, tm_wday=6, tm_yday=143, tm_isdst=-1)
# 将日期字符串转成时间戳
struct_stamp = time.mktime(string_struct) # 将struct time时间对象转成时间戳
print(struct_stamp) # 返回时间戳 ‘1463846400.0’
# 将时间戳转为字符串格式
print(time.gmtime(time.time()-86640)) # 将utc时间戳转换成struct_time格式
print(time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime()) ) # 将utc struct_time格式转成指定的字符串格式
print('\n---------------------module:datetime--------------------')
import datetime
# 时间加减
print(datetime.datetime.now()) # 返回当前时间 2017-05-07 22:36:45.179732
print(datetime.date.fromtimestamp(time.time())) # 时间戳直接转换成日期格式 2017-05-07
print(datetime.datetime.now() + datetime.timedelta(3)) # 返回时间在当前日期上 +3 天
print(datetime.datetime.now() + datetime.timedelta(-3)) # 返回时间在当前日期上 -3 天
print(datetime.datetime.now() + datetime.timedelta(hours= 3)) # 返回时间在当前时间上 +3 小时
print(datetime.datetime.now() + datetime.timedelta(minutes= 30)) # 返回时间在当前时间上 +30 分钟
c_time = datetime.datetime.now()
print(c_time) # 当前时间为 2017-05-07 22:52:44.016732
print(c_time.replace(minute=3,hour=2)) # 时间替换 替换时间为‘2017-05-07 02:03:18.181732’
print(datetime.timedelta) # 表示时间间隔,即两个时间点之间的长度
print (datetime.datetime.now() - datetime.timedelta(days=5)) # 返回时间在当前时间上 -5 天
print('\n---------------------module:calendar--------------------')
# python 日历模块
import calendar
print(calendar.calendar(theyear= 2017)) # 返回2017年整年日历
print(calendar.month(2017,5)) # 返回某年某月的日历,返回类型为字符串类型
calendar.setfirstweekday(calendar.WEDNESDAY) # 设置日历的第一天(第一天以星期三开始)
cal = calendar.month(2017, 4)
print (cal)
print(calendar.monthrange(2017,5)) # 返回某个月的第一天和这个月的所有天数
print(calendar.monthcalendar(2017,5)) # 返回某个月以每一周为元素的序列
cal = calendar.HTMLCalendar(calendar.MONDAY)
print(cal.formatmonth(2017, 5)) # 在html中打印某年某月的日历
print(calendar.isleap(2017)) # 判断是否为闰年
print(calendar.leapdays(2000,2017)) # 判断两个年份间闰年的个数
print('\n---------------------module:random--------------------')
import random
# 随机数
print(random.random()) # 返回一个随机小数'0.4800545746046827'
print(random.randint(1,5)) # 返回(1-5)随机整型数据
print(random.randrange(1,10)) # 返回(1-10)随机数据
# 生成随机验证码
code = ''
for i in range(4):
current = random.randrange(0,4)
if current != i:
temp = chr(random.randint(65,90))
else:
temp = random.randint(0,9)
code += str(temp)
print(code)
print('\n---------------------module:os--------------------')
import os
print(os.getcwd()) # 获得当前工作目录
print(os.chdir(os.getcwd())) # 改变当前脚本的工作路径,相当于shell下的cd
print(os.curdir) # 返回当前目录‘.'
print(os.pardir) # 获取当前目录的父目录字符串名‘..'
print(os.makedirs('dirname1/dirname2')) # 可生成多层递归目录
print(os.removedirs('dirname1/dirname2')) # 若目录为空,则删除,并递归到上一级目录,如若也为空,则删除,依此类推
print(os.mkdir('test4')) # 生成单级目录;相当于shell中mkdir dirname
print(os.rmdir('test4')) # 删除单级空目录,若目录不为空则无法删除,报错;相当于shell中rmdir dirname
print(os.listdir(os.getcwd())) # 列出指定目录下的所有文件和子目录,包括隐藏文件,并以列表方式打印
# print(os.remove('log.log')) # 删除一个指定的文件
os.mkdir('oldname')
print(os.rename("oldname","newname")) # 重命名文件/目录)
print(os.stat(os.getcwd())) # 获取文件/目录信息
print(os.pathsep) # 输出用于分割文件路径的字符串';'
print(os.name) # 输出字符串指示当前使用平台。win->'nt'; Linux->'posix'
# print(os.system(command='ls')) # 运行shell命令,直接显示
print(os.environ) # 获得系统的环境变量
print(os.path.abspath(os.getcwd())) # 返回path规范化的绝对路径
print(os.path.split(os.getcwd())) # 将path分割成目录和文件名二元组返回
print(os.path.dirname(os.getcwd())) # 返回path的目录。其实就是os.path.split(path)的第一个元素
print(os.path.basename(os.getcwd())) # 返回path最后的文件名。如果path以/或\结尾,那么就会返回空值。即os.path.split(path)的第二个元素
print(os.path.exists('test')) # 判断path是否存在
print(os.path.isabs(os.getcwd())) # 如果path是绝对路径,返回True
print(os.path.isfile('test')) # 如果path是一个存在的文件,返回True。否则返回False
print(os.path.isdir(os.getcwd())) # 如果path是一个存在的目录,则返回True。否则返回False
print(os.path.getatime(os.getcwd())) # 返回path所指向的文件或者目录的最后存取时间
print(os.path.getmtime(os.getcwd())) # 返回path所指向的文件或者目录的最后修改时间
print('\n---------------------module:sys--------------------')
import sys
print(sys.argv) # 命令行参数List,第一个元素是程序本身路径
# print(sys.exit(0)) # 退出程序,正常退出时exit(0)
print(sys.version) # 获取python的版本信息
print(sys.path) # 返回模块的搜索路径,初始化时使用PYTHONPATH环境变量的值
print(sys.platform) # 返回操作平台的名称
print('\n---------------------module:shutil--------------------')
import shutil
fsrc = open("fsrc.txt", 'w+') #直接打开一个文件,如果文件不存在则创建文件
fdst = open("fdst.txt", 'w') #直接打开一个文件,如果文件不存在则创建文件
fsrc.write('fsrc')
src = "fsrc.txt"
dst = "fdst.txt"
shutil.copyfileobj(fsrc, fdst, length=16*1024) # 将文件内容拷贝到另一个文件中,可以是部分内容
shutil.copyfile(src, dst) # 拷贝文件
shutil.copymode(src, dst) # 仅拷贝权限。内容、组、用户均不变
shutil.copystat(src, dst) # 拷贝状态的信息,包括:mode bits, atime, mtime, flags
shutil.copy(src, dst) # 拷贝文件和权限
shutil.copy2(src, dst) # 拷贝文件和状态信息
shutil.move(src, dst) # 递归的去移动文件
# base_name: 压缩包的文件名,也可以是压缩包的路径。只是文件名时,则保存至当前目录,否则保存至指定路径
# format: 压缩包种类,“zip”, “tar”, “bztar”,“gztar”
# root_dir: 要压缩的文件夹路径(默认当前目录)
# owner: 用户,默认当前用户
# group: 组,默认当前组
# logger: 用于记录日志,通常是logging.Logger对象
shutil.make_archive('archive_base_name', 'zip',os.getcwd()) # 创建压缩包并返回文件路径,例如:zip、tar
print('\n---------------------module:zipfile--------------------')
#shutil 对压缩包的处理是调用 ZipFile 和 TarFile 两个模块来进行的:
# zipfile 压缩解压
import zipfile
# 压缩
z = zipfile.ZipFile('archive_base_name.zip', 'w')
z.write('fdst.txt')
z.close()
# 解压
z = zipfile.ZipFile('archive_base_name.zip', 'r')
z.extractall()
z.close()
print('\n---------------------module:tarfile--------------------')
# tarfile 压缩解压
import tarfile
# 压缩
tar = tarfile.open('your.tar','w')
tar.add('archive_base_name.zip')
# tar.add('/Users/wupeiqi/PycharmProjects/cmdb.zip', arcname='cmdb.zip')
tar.close()
# 解压
tar = tarfile.open('your.tar','r')
tar.extractall() # 可设置解压地址
tar.close()
print('\n---------------------module:xml--------------------')
# xml的格式如下,就是通过<>节点来区别数据结构的:
xmlstr = r'''<?xml version="1.0"?>
<data>
<country name="Liechtenstein">
<rank updated="yes">2</rank>
<year>2008</year>
<gdppc>141100</gdppc>
<neighbor name="Austria" direction="E"/>
<neighbor name="Switzerland" direction="W"/>
</country>
<country name="Singapore">
<rank updated="yes">5</rank>
<year>2011</year>
<gdppc>59900</gdppc>
<neighbor name="Malaysia" direction="N"/>
</country>
<country name="Panama">
<rank updated="yes">69</rank>
<year>2011</year>
<gdppc>13600</gdppc>
<neighbor name="Costa Rica" direction="W"/>
<neighbor name="Colombia" direction="E"/>
</country>
</data>
'''
# xml协议在各个语言里的都 是支持的,在python中可以用以下模块操作xml
import xml.etree.ElementTree as ET
fpxml = open('xmltest.xml', 'w+')
fpxml.write(xmlstr)
fpxml.close()
tree = ET.parse("xmltest.xml")
root = tree.getroot()
print(root.tag)
#遍历xml文档
for child in root:
print(child.tag, child.attrib)
for i in child:
print(i.tag,i.text)
#只遍历year 节点
for node in root.iter('year'):
print(node.tag,node.text)
# 修改和删除xml文档内容
import xml.etree.ElementTree as ET
tree = ET.parse("xmltest.xml")
root = tree.getroot()
#修改
for node in root.iter('year'):
new_year = int(node.text) + 1
node.text = str(new_year)
node.set("updated","yes")
tree.write("xmltest.xml")
#删除node
for country in root.findall('country'):
rank = int(country.find('rank').text)
if rank > 50:
root.remove(country)
tree.write('output.xml')
# 自己创建xml文档
import xml.etree.ElementTree as ET
new_xml = ET.Element("namelist")
name = ET.SubElement(new_xml, "name", attrib={"enrolled": "yes"})
age = ET.SubElement(name, "age", attrib={"checked": "no"})
age = ET.SubElement(name, "age")
age.text = '33'
name2 = ET.SubElement(new_xml, "name", attrib={"enrolled": "no"})
age = ET.SubElement(name2, "age")
age.text = '19'
et = ET.ElementTree(new_xml) # 生成文档对象
et.write("test.xml", encoding="utf-8", xml_declaration=True)
ET.dump(new_xml) # 打印生成的格式
print('\n---------------------module:configparser--------------------')
# 好多软件的常见文档格式如下
'''
[DEFAULT]
compressionlevel = 9
serveraliveinterval = 45
compression = yes
forwardx11 = yes
[bitbucket.org]
user = hg
[topsecret.server.com]
host port = 50022
forwardx11 = no
'''
# python 生成一个这样的文档
try:
import configparser
except ImportError as e:
pass
else:
config = configparser.ConfigParser()
config["DEFAULT"] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Host Port'] = '50022'
topsecret['ForwardX11'] = 'no'
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
# 写完了还可以再读出来
config = configparser.ConfigParser()
config.sections()
file = config.read('example.ini')
print(file) # ['example.ini']
title = config.sections()
print(title) # ['bitbucket.org', 'topsecret.server.com']
print('bitbucket.org' in config) # True
print('bytebong.com' in config) # False
print(config['bitbucket.org']['User']) # hg
print(config['DEFAULT']['Compression']) # yes
topsecret = config['topsecret.server.com']
print(topsecret['ForwardX11']) # no
print(topsecret['Host Port']) # 50022
for key in config['topsecret.server.com']:
print(key)
'''
输出结果:
host port
forwardx11
compressionlevel
serveraliveinterval
compression
'''
print(config['topsecret.server.com']['Compression']) # yes
# configparser增删改查语法
config = configparser.ConfigParser()
config.read('i.cfg')
secs = config.sections() # 返回配置文件中的主节点
print (secs)
options = config.options('bitbucket.org')
pr | # 返回所有子节点信息
item_list = config.items('bitbucket.org')
print(item_list) # 列出所有子节点详细信息
val = config.get('topsecret.server.com','host port')
print(val) # 返回单个子节点信息
val2 = config.getint('topsecret.server.com','host port')
print(val2)
# 删除'bitbucket.org'
sec = config.remove_section('bitbucket.org')
config.write(open('i.cfg','w'))
sec2 = config.add_section('huhuan2') # 添加主节点
config.set('huhuan2','k','1111') # 添加子节点
config.set('huhuan','kk','2222')
config.remove_option('huhuan','kk') # 删除子节点
config.write(open('i.cfg','w'))
print('\n---------------------module:logging--------------------')
import logging
# %(message)s 日志信息
# %(levelno)s 日志级别
# datefmt 设置时间格式
# filename 设置日志保存的路径
# level=loggin.INFO意思是,把日志纪录级别设置为INFO,也就是说,只有比日志是INFO或比INFO级别更高的日志才会被纪录到文件里,
# 在这个例子, 第一条日志是不会被纪录的,如果希望纪录debug的日志,那把日志级别改成DEBUG就行了。
logging.basicConfig(format='%(asctime)s %(message)s %(levelno)s', datefmt='%m/%d/%Y %I:%M:%S %p',filename='example.log',level=logging.INFO)
logging.debug('This message should go to the log file')
logging.info('So should this')
logger = logging.getLogger('TEST_LOG') # 获得一个Logger
logger.setLevel(logging.DEBUG) # 设置日志级别
ch = logging.StreamHandler() # logging.StreamHandler这个Handler可以向类似与sys.stdout或者sys.stderr的任何文件对象(file object)输出信息。
ch.setLevel(logging.DEBUG)
fh = logging.FileHandler("access.log") # 用于向一个文件输出日志信息。不过FileHandler会帮你打开这个文件
fh.setLevel(logging.WARNING)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # 设置日志记录的最终输出格式
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# 添加ch,fh到logger
logger.addHandler(ch)
logger.addHandler(fh)
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')
print('\n---------------------module:pickle--------------------')
try:
import pickle
except ImportError as e:
pass
else:
date = {'k1':'123','k2':'hello'}
str = pickle.dumps(date) # pickle.dumps 将数据通过特殊的形式转换为只有python认识的字符串
print(str)
with open('result.pk','w') as fp: # pickle.dump 将数据通过特殊的形式转换为只有python认识的字符串并写入文件
pickle.dump(date,fp)
print('\n---------------------module:json--------------------')
import json
str1 = json.dumps(date) # json.dumps 将数据通过特殊形式转换为所有程序语言都认识的字符串
print(str1)
with open('result1.json','w') as fp: #json.dump 将数据通过特殊的形式转换为只有python认识的字符串并写入文件
json.dump(date,fp) | int(options) | conditional_block |
retransmission.rs | //! Retransmission is covered in section 6.3 of RFC 4960.
//!
//! 1. Perform round-trip time (RTT) measurements from the time a TSN is sent until it is
//! acknowledged.
//! a) A measurement must be made once per round-trip, but no more. I interpret this to mean
//! that only one measurement may be in progress at a time.
//! b) Measurements must not be made on retransmissions. If the TSN being measured is
//! retransmitted, the measurement must be aborted.
//! 2. Adjust the retransmission timeout (RTO) after every measurement is concluded.
//! a) Use the specified smoothing algorithm to calculate a new RTO.
//! b) Clamp the new RTO to RTO.Min .. Option<RTO.Max>
//! c) If RTTVAR is zero, increase it to the clock granularity.
//! 3. Manage the retransmission timer ("T3-rtx").
//! R1) On any DATA send, if the timer is not running, start the timer with RTO.
//! R2) If all outstanding data has been acknowledged, then cancel the timer.
//! R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
//! // TODO:
//! R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged (e.g. it
//! was dropped by the peer), then start the timer.
//! 4. Handle timer expiration.
//! // TODO:
//! E1) Update congestion control as needed.
//! - adjust ssthresh according to Section 7.2.3
//! - set cwnd to the MTU
//! E2) Double RTO up to RTO.Max to provide back-off.
//! E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet based on
//! the MTU.
//! - Any remaining DATA chunks should be "marked for retransmission" and sent as soon as
//! cwnd allows.
//! E4) Restart the timer according to (R1) above.
use std::time::{Duration, Instant};
use tokio_timer;
use super::Association;
use packet::chunk::{Chunk, GapAckBlock};
use packet::TSN;
use stack::settings::DEFAULT_SCTP_PARAMETERS;
/// The retransmission state that will be embedded in every association.
pub struct State {
pub timer: Option<tokio_timer::Sleep>,
pub measurements: Measurements,
pub tx_high_water_mark: TSN,
}
impl State {
pub fn new(tx_high_water_mark: TSN) -> State {
State {
timer: None,
measurements: Measurements::new(),
tx_high_water_mark,
}
}
}
/// Use a trait to add retransmission functionality to Association.
///
/// This is awkward, but there really is a huge amount of state in an association, with many parts
/// interdependent on many other parts. This makes it difficult to cleanly separate concerns such
/// as retransmission in an obvious and simple way. (I.e. without a lot of Rc<RefCell<_>>, for
/// instance.)
///
/// Most C network stack implementations I've seen just interleave all the concerns together, and
/// (in my opinion) this reduces the readability. So we can at least put retransmission concerns
/// in a different source file, even if doing so is only cosmetic.
///
/// We could also have just added more inherent methods to Association here, but I'm hoping that
/// using a trait is more clear.
pub trait Retransmission {
fn on_outgoing_data(&mut self, chunk_tsn: TSN);
fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN, earliest_outstanding_tsn: Option<TSN>);
fn on_gap_ack_block(&mut self, start: TSN, end: TSN); // TODO remove
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]);
fn on_timeout(&mut self);
}
impl Retransmission for Association {
fn on_outgoing_data(&mut self, chunk_tsn: TSN) {
// On fresh transmissions, perform RTT measurements.
if chunk_tsn > self.rtx.tx_high_water_mark {
// This is a newly sent chunk (not a retransmission), so take a measurement if needed.
self.rtx.measurements.on_outgoing_chunk(chunk_tsn);
// Raise the high water mark.
self.rtx.tx_high_water_mark = chunk_tsn;
}
// R1) On any transmission, start the rtx timer if it is not already running.
if self.rtx.timer.is_none() {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto))
}
}
fn on_cumulative_ack(
&mut self,
cumulative_tsn_ack: TSN,
earliest_outstanding_tsn: Option<TSN>,
) {
// Perform RTT measurements
self.rtx.measurements.on_cumulative_ack(cumulative_tsn_ack);
if self.data.sent_queue.is_empty() && self.rtx.timer.is_some() {
// R2) If all outstanding data has been acknowledged, then cancel the timer.
self.rtx.timer = None;
} else if let Some(earliest_outstanding_tsn) = earliest_outstanding_tsn {
// R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
if cumulative_tsn_ack >= earliest_outstanding_tsn {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto));
}
}
}
// TODO remove
fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// Perform RTT measurements
self.rtx.measurements.on_gap_ack_block(start, end);
}
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]) {
let mut tsn = cumulative_tsn_ack;
for block in gap_ack_blocks {
let ack_start = cumulative_tsn_ack + block.start as u32;
let ack_end = cumulative_tsn_ack + block.end as u32;
// Chunks in the TSN range [ack_start,ack_end] (inclusive) are assumed to
// have been received. However, the receiver has the option of discarding them and
// having us retransmit them, so they must stay in the sent queue until acknowledged
// via the cumulative TSN.
// Perform RTT measurements, if needed
self.rtx.measurements.on_gap_ack_block(ack_start, ack_end);
// This should always be true if the peer is constructing SACKs properly.
if ack_start > tsn + 1 {
let gap_start = tsn + 1;
let gap_end = ack_start - 1;
// This could just be a for loop, whenever std::iter::Step becomes stable.
let mut gap_tsn = gap_start;
loop {
// TODO: Mark this gap chunk for retransmission.
gap_tsn += 1;
if gap_tsn > gap_end {
break;
}
}
}
// TODO: Store received ranges, so we can know if the peer decides to drop them?
// (So we can implement R4.)
tsn = ack_end;
}
// R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged
// (e.g. it was dropped by the peer), then start the timer.
//
// TODO
}
fn on_timeout(&mut self) {
// E1) Update congestion control as needed.
// - adjust ssthresh according to Section 7.2.3
// - set cwnd to the MTU
// TODO
// E2) Double RTO up to RTO.Max to provide back-off.
self.rtx.measurements.rto *= 2;
let rto_max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
self.rtx.measurements.rto = self.rtx.measurements.rto.min(rto_max);
// E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet
// based on the MTU.
retransmit_immediate(self);
// Any remaining DATA chunks should be "marked for retransmission" and sent as soon
// as cwnd allows.
retransmit_all_except_first(self);
}
}
/// Immediately retransmit the earliest unacknowledged sent chunk. Ideally, we would see how many
/// of the earliest chunks could fit into a packet given the current MTU.
fn retransmit_immediate(association: &mut Association) {
// Retrieve the first unacknowledged chunk. | if let Some(rtx_chunk) = rtx_chunk {
// Re-transmit chunk
println!("re-sending chunk: {:?}", rtx_chunk);
association.send_chunk(Chunk::Data(rtx_chunk));
// E4) Restart timer
association.rtx.timer = Some(
association
.resources
.timer
.sleep(association.rtx.measurements.rto),
)
}
}
/// "Mark" a range of unacknowledged packets for retransmission.
fn retransmit_range(association: &mut Association, first: TSN, last: TSN) {
// TODO: Don't retransmit chunks that were acknowledged in the gap-ack blocks of the most
// recent SACK.
// Re-queue unacknowledged chunks in the specified range.
let bytes =
association
.data
.sent_queue
.transfer_range(&mut association.data.send_queue, first, last);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission.
#[allow(unused)]
fn retransmit_all(association: &mut Association) {
// Re-queue unacknowledged chunks
let bytes = association
.data
.sent_queue
.transfer_all(&mut association.data.send_queue);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission, except for the first. (Which was
/// presumably sent via retransmit_immediate().)
fn retransmit_all_except_first(association: &mut Association) {
if let Some(first) = association.data.sent_queue.front().map(|c| c.tsn) {
if let Some(last) = association.data.sent_queue.back().map(|c| c.tsn) {
if last > first {
retransmit_range(association, first + 1, last);
}
}
}
}
#[derive(Clone, Copy)]
struct SmoothingState {
srtt: Duration, // Smoothed round-trip time
rttvar: Duration, // Round-trip time variation
}
pub struct Measurements {
rtt_measurement: Option<(TSN, Instant)>, // An in-progress RTT measurement.
rtt_smoothing: Option<SmoothingState>,
rto: Duration,
}
/// Clock granularity in nanoseconds. Tokio-timer 0.1 has a granularity of 100ms, and tokio-timer
/// 0.2 has a granularity of 1ms.
/// TODO: Upgrade to tokio-timer 0.2!
const CLOCK_GRANULARITY_NS: u32 = 100_000_000; // 100ms
impl Measurements {
pub fn new() -> Measurements {
Measurements {
rtt_measurement: None,
rtt_smoothing: None,
rto: Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_initial),
}
}
/// This should be called for each fresh outgoing chunk (not on retransmissions), so we can
/// decide whether to start a new RTT measurement or not.
pub fn on_outgoing_chunk(&mut self, chunk_tsn: TSN) {
// Start a RTT measurement if one is not already in progress.
if self.rtt_measurement.is_none() {
self.rtt_measurement = Some((chunk_tsn, Instant::now()));
}
}
/// This should be called for each received SACK, so the Measurements can conclude an RTT
/// measurement, if needed.
pub fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn <= cumulative_tsn_ack {
self.complete_rtt_measurement();
}
}
}
/// This should be called for each gap ack block in each received SACK, so the Measurements
/// can conclude an RTT measurement, if needed.
pub fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn >= start && rtt_tsn <= end {
self.complete_rtt_measurement();
}
}
}
/// Conclude the current RTT measurement and adjust SRTT (smoothed RTT), RTTVAR (RTT variance),
/// and RTO (retransmission timeout) accordingly.
fn complete_rtt_measurement(&mut self) {
// We have received acknowledgement of the receipt of the measurement TSN, so calculate the
// RTT and related variables.
let (_, rtt_start) = self.rtt_measurement.take().unwrap(); // Caller verifies Some(_).
let rtt = rtt_start.elapsed();
let min = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_min);
let max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
match self.rtt_smoothing {
Some(SmoothingState {
mut srtt,
mut rttvar,
}) => {
// Update the SRTT/RTTVAR according to RFC 4960 6.3.1 C3.
#[inline]
fn duration_difference(a: &Duration, b: &Duration) -> Duration {
if *a > *b {
*a - *b
} else {
*b - *a
}
}
let beta = DEFAULT_SCTP_PARAMETERS.rto_beta;
let alpha = DEFAULT_SCTP_PARAMETERS.rto_alpha;
// RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
rttvar = rttvar * (beta.1 - beta.0) / beta.1
+ duration_difference(&srtt, &rtt) * beta.0 / beta.1;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
srtt = srtt * (alpha.1 - alpha.0) / alpha.1 + rtt * alpha.0 / alpha.1;
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
None => {
// No current SRTT/RTTVAR has yet been established, so initialize these according
// to RFC 4960 6.3.1 C2.
// SRTT <- R
let srtt = rtt;
// RTTVAR <- R/2
let mut rttvar = rtt / 2;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
}
if self.rto < min {
self.rto = min;
} else if self.rto > max {
self.rto = max;
}
fn duration_to_us(duration: Duration) -> u32 {
duration.as_secs() as u32 * 1_000_000 + duration.subsec_nanos() / 1_000
}
trace!(
"New RTT measurement: {:?} srtt={:?} rttvar={:?} rto={:?}",
duration_to_us(rtt),
duration_to_us(self.rtt_smoothing.unwrap().srtt),
duration_to_us(self.rtt_smoothing.unwrap().rttvar),
duration_to_us(self.rto),
);
// TODO: [6.3.1] C4-C7 ?
}
} | let rtx_chunk = association.data.sent_queue.front().map(|c| c.clone()); | random_line_split |
retransmission.rs | //! Retransmission is covered in section 6.3 of RFC 4960.
//!
//! 1. Perform round-trip time (RTT) measurements from the time a TSN is sent until it is
//! acknowledged.
//! a) A measurement must be made once per round-trip, but no more. I interpret this to mean
//! that only one measurement may be in progress at a time.
//! b) Measurements must not be made on retransmissions. If the TSN being measured is
//! retransmitted, the measurement must be aborted.
//! 2. Adjust the retransmission timeout (RTO) after every measurement is concluded.
//! a) Use the specified smoothing algorithm to calculate a new RTO.
//! b) Clamp the new RTO to RTO.Min .. Option<RTO.Max>
//! c) If RTTVAR is zero, increase it to the clock granularity.
//! 3. Manage the retransmission timer ("T3-rtx").
//! R1) On any DATA send, if the timer is not running, start the timer with RTO.
//! R2) If all outstanding data has been acknowledged, then cancel the timer.
//! R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
//! // TODO:
//! R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged (e.g. it
//! was dropped by the peer), then start the timer.
//! 4. Handle timer expiration.
//! // TODO:
//! E1) Update congestion control as needed.
//! - adjust ssthresh according to Section 7.2.3
//! - set cwnd to the MTU
//! E2) Double RTO up to RTO.Max to provide back-off.
//! E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet based on
//! the MTU.
//! - Any remaining DATA chunks should be "marked for retransmission" and sent as soon as
//! cwnd allows.
//! E4) Restart the timer according to (R1) above.
use std::time::{Duration, Instant};
use tokio_timer;
use super::Association;
use packet::chunk::{Chunk, GapAckBlock};
use packet::TSN;
use stack::settings::DEFAULT_SCTP_PARAMETERS;
/// The retransmission state that will be embedded in every association.
pub struct State {
pub timer: Option<tokio_timer::Sleep>,
pub measurements: Measurements,
pub tx_high_water_mark: TSN,
}
impl State {
pub fn new(tx_high_water_mark: TSN) -> State {
State {
timer: None,
measurements: Measurements::new(),
tx_high_water_mark,
}
}
}
/// Use a trait to add retransmission functionality to Association.
///
/// This is awkward, but there really is a huge amount of state in an association, with many parts
/// interdependent on many other parts. This makes it difficult to cleanly separate concerns such
/// as retransmission in an obvious and simple way. (I.e. without a lot of Rc<RefCell<_>>, for
/// instance.)
///
/// Most C network stack implementations I've seen just interleave all the concerns together, and
/// (in my opinion) this reduces the readability. So we can at least put retransmission concerns
/// in a different source file, even if doing so is only cosmetic.
///
/// We could also have just added more inherent methods to Association here, but I'm hoping that
/// using a trait is more clear.
pub trait Retransmission {
fn on_outgoing_data(&mut self, chunk_tsn: TSN);
fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN, earliest_outstanding_tsn: Option<TSN>);
fn on_gap_ack_block(&mut self, start: TSN, end: TSN); // TODO remove
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]);
fn on_timeout(&mut self);
}
impl Retransmission for Association {
fn on_outgoing_data(&mut self, chunk_tsn: TSN) {
// On fresh transmissions, perform RTT measurements.
if chunk_tsn > self.rtx.tx_high_water_mark {
// This is a newly sent chunk (not a retransmission), so take a measurement if needed.
self.rtx.measurements.on_outgoing_chunk(chunk_tsn);
// Raise the high water mark.
self.rtx.tx_high_water_mark = chunk_tsn;
}
// R1) On any transmission, start the rtx timer if it is not already running.
if self.rtx.timer.is_none() {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto))
}
}
fn on_cumulative_ack(
&mut self,
cumulative_tsn_ack: TSN,
earliest_outstanding_tsn: Option<TSN>,
) {
// Perform RTT measurements
self.rtx.measurements.on_cumulative_ack(cumulative_tsn_ack);
if self.data.sent_queue.is_empty() && self.rtx.timer.is_some() {
// R2) If all outstanding data has been acknowledged, then cancel the timer.
self.rtx.timer = None;
} else if let Some(earliest_outstanding_tsn) = earliest_outstanding_tsn {
// R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
if cumulative_tsn_ack >= earliest_outstanding_tsn {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto));
}
}
}
// TODO remove
fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// Perform RTT measurements
self.rtx.measurements.on_gap_ack_block(start, end);
}
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]) {
let mut tsn = cumulative_tsn_ack;
for block in gap_ack_blocks {
let ack_start = cumulative_tsn_ack + block.start as u32;
let ack_end = cumulative_tsn_ack + block.end as u32;
// Chunks in the TSN range [ack_start,ack_end] (inclusive) are assumed to
// have been received. However, the receiver has the option of discarding them and
// having us retransmit them, so they must stay in the sent queue until acknowledged
// via the cumulative TSN.
// Perform RTT measurements, if needed
self.rtx.measurements.on_gap_ack_block(ack_start, ack_end);
// This should always be true if the peer is constructing SACKs properly.
if ack_start > tsn + 1 {
let gap_start = tsn + 1;
let gap_end = ack_start - 1;
// This could just be a for loop, whenever std::iter::Step becomes stable.
let mut gap_tsn = gap_start;
loop {
// TODO: Mark this gap chunk for retransmission.
gap_tsn += 1;
if gap_tsn > gap_end {
break;
}
}
}
// TODO: Store received ranges, so we can know if the peer decides to drop them?
// (So we can implement R4.)
tsn = ack_end;
}
// R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged
// (e.g. it was dropped by the peer), then start the timer.
//
// TODO
}
fn on_timeout(&mut self) {
// E1) Update congestion control as needed.
// - adjust ssthresh according to Section 7.2.3
// - set cwnd to the MTU
// TODO
// E2) Double RTO up to RTO.Max to provide back-off.
self.rtx.measurements.rto *= 2;
let rto_max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
self.rtx.measurements.rto = self.rtx.measurements.rto.min(rto_max);
// E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet
// based on the MTU.
retransmit_immediate(self);
// Any remaining DATA chunks should be "marked for retransmission" and sent as soon
// as cwnd allows.
retransmit_all_except_first(self);
}
}
/// Immediately retransmit the earliest unacknowledged sent chunk. Ideally, we would see how many
/// of the earliest chunks could fit into a packet given the current MTU.
fn retransmit_immediate(association: &mut Association) {
// Retrieve the first unacknowledged chunk.
let rtx_chunk = association.data.sent_queue.front().map(|c| c.clone());
if let Some(rtx_chunk) = rtx_chunk {
// Re-transmit chunk
println!("re-sending chunk: {:?}", rtx_chunk);
association.send_chunk(Chunk::Data(rtx_chunk));
// E4) Restart timer
association.rtx.timer = Some(
association
.resources
.timer
.sleep(association.rtx.measurements.rto),
)
}
}
/// "Mark" a range of unacknowledged packets for retransmission.
fn retransmit_range(association: &mut Association, first: TSN, last: TSN) |
/// "Mark" all unacknowledged packets for retransmission.
#[allow(unused)]
fn retransmit_all(association: &mut Association) {
// Re-queue unacknowledged chunks
let bytes = association
.data
.sent_queue
.transfer_all(&mut association.data.send_queue);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission, except for the first. (Which was
/// presumably sent via retransmit_immediate().)
fn retransmit_all_except_first(association: &mut Association) {
if let Some(first) = association.data.sent_queue.front().map(|c| c.tsn) {
if let Some(last) = association.data.sent_queue.back().map(|c| c.tsn) {
if last > first {
retransmit_range(association, first + 1, last);
}
}
}
}
#[derive(Clone, Copy)]
struct SmoothingState {
srtt: Duration, // Smoothed round-trip time
rttvar: Duration, // Round-trip time variation
}
pub struct Measurements {
rtt_measurement: Option<(TSN, Instant)>, // An in-progress RTT measurement.
rtt_smoothing: Option<SmoothingState>,
rto: Duration,
}
/// Clock granularity in nanoseconds. Tokio-timer 0.1 has a granularity of 100ms, and tokio-timer
/// 0.2 has a granularity of 1ms.
/// TODO: Upgrade to tokio-timer 0.2!
const CLOCK_GRANULARITY_NS: u32 = 100_000_000; // 100ms
impl Measurements {
pub fn new() -> Measurements {
Measurements {
rtt_measurement: None,
rtt_smoothing: None,
rto: Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_initial),
}
}
/// This should be called for each fresh outgoing chunk (not on retransmissions), so we can
/// decide whether to start a new RTT measurement or not.
pub fn on_outgoing_chunk(&mut self, chunk_tsn: TSN) {
// Start a RTT measurement if one is not already in progress.
if self.rtt_measurement.is_none() {
self.rtt_measurement = Some((chunk_tsn, Instant::now()));
}
}
/// This should be called for each received SACK, so the Measurements can conclude an RTT
/// measurement, if needed.
pub fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn <= cumulative_tsn_ack {
self.complete_rtt_measurement();
}
}
}
/// This should be called for each gap ack block in each received SACK, so the Measurements
/// can conclude an RTT measurement, if needed.
pub fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn >= start && rtt_tsn <= end {
self.complete_rtt_measurement();
}
}
}
/// Conclude the current RTT measurement and adjust SRTT (smoothed RTT), RTTVAR (RTT variance),
/// and RTO (retransmission timeout) accordingly.
fn complete_rtt_measurement(&mut self) {
// We have received acknowledgement of the receipt of the measurement TSN, so calculate the
// RTT and related variables.
let (_, rtt_start) = self.rtt_measurement.take().unwrap(); // Caller verifies Some(_).
let rtt = rtt_start.elapsed();
let min = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_min);
let max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
match self.rtt_smoothing {
Some(SmoothingState {
mut srtt,
mut rttvar,
}) => {
// Update the SRTT/RTTVAR according to RFC 4960 6.3.1 C3.
#[inline]
fn duration_difference(a: &Duration, b: &Duration) -> Duration {
if *a > *b {
*a - *b
} else {
*b - *a
}
}
let beta = DEFAULT_SCTP_PARAMETERS.rto_beta;
let alpha = DEFAULT_SCTP_PARAMETERS.rto_alpha;
// RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
rttvar = rttvar * (beta.1 - beta.0) / beta.1
+ duration_difference(&srtt, &rtt) * beta.0 / beta.1;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
srtt = srtt * (alpha.1 - alpha.0) / alpha.1 + rtt * alpha.0 / alpha.1;
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
None => {
// No current SRTT/RTTVAR has yet been established, so initialize these according
// to RFC 4960 6.3.1 C2.
// SRTT <- R
let srtt = rtt;
// RTTVAR <- R/2
let mut rttvar = rtt / 2;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
}
if self.rto < min {
self.rto = min;
} else if self.rto > max {
self.rto = max;
}
fn duration_to_us(duration: Duration) -> u32 {
duration.as_secs() as u32 * 1_000_000 + duration.subsec_nanos() / 1_000
}
trace!(
"New RTT measurement: {:?} srtt={:?} rttvar={:?} rto={:?}",
duration_to_us(rtt),
duration_to_us(self.rtt_smoothing.unwrap().srtt),
duration_to_us(self.rtt_smoothing.unwrap().rttvar),
duration_to_us(self.rto),
);
// TODO: [6.3.1] C4-C7 ?
}
}
| {
// TODO: Don't retransmit chunks that were acknowledged in the gap-ack blocks of the most
// recent SACK.
// Re-queue unacknowledged chunks in the specified range.
let bytes =
association
.data
.sent_queue
.transfer_range(&mut association.data.send_queue, first, last);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
} | identifier_body |
retransmission.rs | //! Retransmission is covered in section 6.3 of RFC 4960.
//!
//! 1. Perform round-trip time (RTT) measurements from the time a TSN is sent until it is
//! acknowledged.
//! a) A measurement must be made once per round-trip, but no more. I interpret this to mean
//! that only one measurement may be in progress at a time.
//! b) Measurements must not be made on retransmissions. If the TSN being measured is
//! retransmitted, the measurement must be aborted.
//! 2. Adjust the retransmission timeout (RTO) after every measurement is concluded.
//! a) Use the specified smoothing algorithm to calculate a new RTO.
//! b) Clamp the new RTO to RTO.Min .. Option<RTO.Max>
//! c) If RTTVAR is zero, increase it to the clock granularity.
//! 3. Manage the retransmission timer ("T3-rtx").
//! R1) On any DATA send, if the timer is not running, start the timer with RTO.
//! R2) If all outstanding data has been acknowledged, then cancel the timer.
//! R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
//! // TODO:
//! R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged (e.g. it
//! was dropped by the peer), then start the timer.
//! 4. Handle timer expiration.
//! // TODO:
//! E1) Update congestion control as needed.
//! - adjust ssthresh according to Section 7.2.3
//! - set cwnd to the MTU
//! E2) Double RTO up to RTO.Max to provide back-off.
//! E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet based on
//! the MTU.
//! - Any remaining DATA chunks should be "marked for retransmission" and sent as soon as
//! cwnd allows.
//! E4) Restart the timer according to (R1) above.
use std::time::{Duration, Instant};
use tokio_timer;
use super::Association;
use packet::chunk::{Chunk, GapAckBlock};
use packet::TSN;
use stack::settings::DEFAULT_SCTP_PARAMETERS;
/// The retransmission state that will be embedded in every association.
pub struct State {
pub timer: Option<tokio_timer::Sleep>,
pub measurements: Measurements,
pub tx_high_water_mark: TSN,
}
impl State {
pub fn new(tx_high_water_mark: TSN) -> State {
State {
timer: None,
measurements: Measurements::new(),
tx_high_water_mark,
}
}
}
/// Use a trait to add retransmission functionality to Association.
///
/// This is awkward, but there really is a huge amount of state in an association, with many parts
/// interdependent on many other parts. This makes it difficult to cleanly separate concerns such
/// as retransmission in an obvious and simple way. (I.e. without a lot of Rc<RefCell<_>>, for
/// instance.)
///
/// Most C network stack implementations I've seen just interleave all the concerns together, and
/// (in my opinion) this reduces the readability. So we can at least put retransmission concerns
/// in a different source file, even if doing so is only cosmetic.
///
/// We could also have just added more inherent methods to Association here, but I'm hoping that
/// using a trait is more clear.
pub trait Retransmission {
fn on_outgoing_data(&mut self, chunk_tsn: TSN);
fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN, earliest_outstanding_tsn: Option<TSN>);
fn on_gap_ack_block(&mut self, start: TSN, end: TSN); // TODO remove
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]);
fn on_timeout(&mut self);
}
impl Retransmission for Association {
fn on_outgoing_data(&mut self, chunk_tsn: TSN) {
// On fresh transmissions, perform RTT measurements.
if chunk_tsn > self.rtx.tx_high_water_mark {
// This is a newly sent chunk (not a retransmission), so take a measurement if needed.
self.rtx.measurements.on_outgoing_chunk(chunk_tsn);
// Raise the high water mark.
self.rtx.tx_high_water_mark = chunk_tsn;
}
// R1) On any transmission, start the rtx timer if it is not already running.
if self.rtx.timer.is_none() {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto))
}
}
fn on_cumulative_ack(
&mut self,
cumulative_tsn_ack: TSN,
earliest_outstanding_tsn: Option<TSN>,
) {
// Perform RTT measurements
self.rtx.measurements.on_cumulative_ack(cumulative_tsn_ack);
if self.data.sent_queue.is_empty() && self.rtx.timer.is_some() {
// R2) If all outstanding data has been acknowledged, then cancel the timer.
self.rtx.timer = None;
} else if let Some(earliest_outstanding_tsn) = earliest_outstanding_tsn {
// R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
if cumulative_tsn_ack >= earliest_outstanding_tsn {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto));
}
}
}
// TODO remove
fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// Perform RTT measurements
self.rtx.measurements.on_gap_ack_block(start, end);
}
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]) {
let mut tsn = cumulative_tsn_ack;
for block in gap_ack_blocks {
let ack_start = cumulative_tsn_ack + block.start as u32;
let ack_end = cumulative_tsn_ack + block.end as u32;
// Chunks in the TSN range [ack_start,ack_end] (inclusive) are assumed to
// have been received. However, the receiver has the option of discarding them and
// having us retransmit them, so they must stay in the sent queue until acknowledged
// via the cumulative TSN.
// Perform RTT measurements, if needed
self.rtx.measurements.on_gap_ack_block(ack_start, ack_end);
// This should always be true if the peer is constructing SACKs properly.
if ack_start > tsn + 1 {
let gap_start = tsn + 1;
let gap_end = ack_start - 1;
// This could just be a for loop, whenever std::iter::Step becomes stable.
let mut gap_tsn = gap_start;
loop {
// TODO: Mark this gap chunk for retransmission.
gap_tsn += 1;
if gap_tsn > gap_end {
break;
}
}
}
// TODO: Store received ranges, so we can know if the peer decides to drop them?
// (So we can implement R4.)
tsn = ack_end;
}
// R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged
// (e.g. it was dropped by the peer), then start the timer.
//
// TODO
}
fn on_timeout(&mut self) {
// E1) Update congestion control as needed.
// - adjust ssthresh according to Section 7.2.3
// - set cwnd to the MTU
// TODO
// E2) Double RTO up to RTO.Max to provide back-off.
self.rtx.measurements.rto *= 2;
let rto_max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
self.rtx.measurements.rto = self.rtx.measurements.rto.min(rto_max);
// E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet
// based on the MTU.
retransmit_immediate(self);
// Any remaining DATA chunks should be "marked for retransmission" and sent as soon
// as cwnd allows.
retransmit_all_except_first(self);
}
}
/// Immediately retransmit the earliest unacknowledged sent chunk. Ideally, we would see how many
/// of the earliest chunks could fit into a packet given the current MTU.
fn retransmit_immediate(association: &mut Association) {
// Retrieve the first unacknowledged chunk.
let rtx_chunk = association.data.sent_queue.front().map(|c| c.clone());
if let Some(rtx_chunk) = rtx_chunk {
// Re-transmit chunk
println!("re-sending chunk: {:?}", rtx_chunk);
association.send_chunk(Chunk::Data(rtx_chunk));
// E4) Restart timer
association.rtx.timer = Some(
association
.resources
.timer
.sleep(association.rtx.measurements.rto),
)
}
}
/// "Mark" a range of unacknowledged packets for retransmission.
fn retransmit_range(association: &mut Association, first: TSN, last: TSN) {
// TODO: Don't retransmit chunks that were acknowledged in the gap-ack blocks of the most
// recent SACK.
// Re-queue unacknowledged chunks in the specified range.
let bytes =
association
.data
.sent_queue
.transfer_range(&mut association.data.send_queue, first, last);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission.
#[allow(unused)]
fn retransmit_all(association: &mut Association) {
// Re-queue unacknowledged chunks
let bytes = association
.data
.sent_queue
.transfer_all(&mut association.data.send_queue);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission, except for the first. (Which was
/// presumably sent via retransmit_immediate().)
fn retransmit_all_except_first(association: &mut Association) {
if let Some(first) = association.data.sent_queue.front().map(|c| c.tsn) {
if let Some(last) = association.data.sent_queue.back().map(|c| c.tsn) {
if last > first {
retransmit_range(association, first + 1, last);
}
}
}
}
#[derive(Clone, Copy)]
struct SmoothingState {
srtt: Duration, // Smoothed round-trip time
rttvar: Duration, // Round-trip time variation
}
pub struct Measurements {
rtt_measurement: Option<(TSN, Instant)>, // An in-progress RTT measurement.
rtt_smoothing: Option<SmoothingState>,
rto: Duration,
}
/// Clock granularity in nanoseconds. Tokio-timer 0.1 has a granularity of 100ms, and tokio-timer
/// 0.2 has a granularity of 1ms.
/// TODO: Upgrade to tokio-timer 0.2!
const CLOCK_GRANULARITY_NS: u32 = 100_000_000; // 100ms
impl Measurements {
pub fn new() -> Measurements {
Measurements {
rtt_measurement: None,
rtt_smoothing: None,
rto: Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_initial),
}
}
/// This should be called for each fresh outgoing chunk (not on retransmissions), so we can
/// decide whether to start a new RTT measurement or not.
pub fn on_outgoing_chunk(&mut self, chunk_tsn: TSN) {
// Start a RTT measurement if one is not already in progress.
if self.rtt_measurement.is_none() {
self.rtt_measurement = Some((chunk_tsn, Instant::now()));
}
}
/// This should be called for each received SACK, so the Measurements can conclude an RTT
/// measurement, if needed.
pub fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn <= cumulative_tsn_ack {
self.complete_rtt_measurement();
}
}
}
/// This should be called for each gap ack block in each received SACK, so the Measurements
/// can conclude an RTT measurement, if needed.
pub fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn >= start && rtt_tsn <= end {
self.complete_rtt_measurement();
}
}
}
/// Conclude the current RTT measurement and adjust SRTT (smoothed RTT), RTTVAR (RTT variance),
/// and RTO (retransmission timeout) accordingly.
fn | (&mut self) {
// We have received acknowledgement of the receipt of the measurement TSN, so calculate the
// RTT and related variables.
let (_, rtt_start) = self.rtt_measurement.take().unwrap(); // Caller verifies Some(_).
let rtt = rtt_start.elapsed();
let min = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_min);
let max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
match self.rtt_smoothing {
Some(SmoothingState {
mut srtt,
mut rttvar,
}) => {
// Update the SRTT/RTTVAR according to RFC 4960 6.3.1 C3.
#[inline]
fn duration_difference(a: &Duration, b: &Duration) -> Duration {
if *a > *b {
*a - *b
} else {
*b - *a
}
}
let beta = DEFAULT_SCTP_PARAMETERS.rto_beta;
let alpha = DEFAULT_SCTP_PARAMETERS.rto_alpha;
// RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
rttvar = rttvar * (beta.1 - beta.0) / beta.1
+ duration_difference(&srtt, &rtt) * beta.0 / beta.1;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
srtt = srtt * (alpha.1 - alpha.0) / alpha.1 + rtt * alpha.0 / alpha.1;
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
None => {
// No current SRTT/RTTVAR has yet been established, so initialize these according
// to RFC 4960 6.3.1 C2.
// SRTT <- R
let srtt = rtt;
// RTTVAR <- R/2
let mut rttvar = rtt / 2;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
}
if self.rto < min {
self.rto = min;
} else if self.rto > max {
self.rto = max;
}
fn duration_to_us(duration: Duration) -> u32 {
duration.as_secs() as u32 * 1_000_000 + duration.subsec_nanos() / 1_000
}
trace!(
"New RTT measurement: {:?} srtt={:?} rttvar={:?} rto={:?}",
duration_to_us(rtt),
duration_to_us(self.rtt_smoothing.unwrap().srtt),
duration_to_us(self.rtt_smoothing.unwrap().rttvar),
duration_to_us(self.rto),
);
// TODO: [6.3.1] C4-C7 ?
}
}
| complete_rtt_measurement | identifier_name |
file.rs | //! File operations
//!
//! - read, pread, readv
//! - write, pwrite, writev
//! - lseek
//! - truncate, ftruncate
//! - sendfile, copy_file_range
//! - sync, fsync, fdatasync
//! - ioctl, fcntl
//! - access, faccessat
use super::*;
use linux_object::{process::FsInfo, time::TimeSpec};
impl Syscall<'_> {
/// Reads from a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the opensyscall. Returns bytes read successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer to fill with read contents
/// - len – number of bytes to read
pub async fn sys_read(&self, fd: FileDesc, mut base: UserOutPtr<u8>, len: usize) -> SysResult {
info!("read: fd={:?}, base={:?}, len={:#x}", fd, base, len);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read(&mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// Writes to a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the open syscall. Returns bytes written successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer write
/// - len – number of bytes to write
pub fn sys_write(&self, fd: FileDesc, base: UserInPtr<u8>, len: usize) -> SysResult {
info!("write: fd={:?}, base={:?}, len={:#x}", fd, base, len);
self.linux_process()
.get_file_like(fd)?
.write(base.as_slice(len)?)
}
/// read from or write to a file descriptor at a given offset
/// reads up to count bytes from file descriptor fd at offset offset
/// (from the start of the file) into the buffer starting at buf. The file offset is not changed.
pub async fn sys_pread(
&self,
fd: FileDesc,
mut base: UserOutPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pread: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read_at(offset, &mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// writes up to count bytes from the buffer
/// starting at buf to the file descriptor fd at offset offset. The file offset is not changed.
pub fn sys_pwrite(
&self,
fd: FileDesc,
base: UserInPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pwrite: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
self.linux_process()
.get_file_like(fd)?
.write_at(offset, base.as_slice(len)?)
}
/// works just like read except that multiple buffers are filled.
/// reads iov_count buffers from the file
/// associated with the file descriptor fd into the buffers described by iov ("scatter input")
pub async fn sys_readv(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecOut>,
iov_count: usize,
) -> SysResult {
info!("readv: fd={:?}, iov={:?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate | : UserInPtr<u8>, len: usize) -> SysResult {
let path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// cause the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if !in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if !out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64;
let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if !in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if !out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1) != 0 {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = proc.get_free_fd_from(arg);
self.sys_dup2(fd, new_fd)?;
let dup = proc.get_file_like(new_fd)?;
let mut flags = dup.flags();
if cmd == FcntlCmd::DUPFD_CLOEXEC {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
dup.set_flags(flags)?;
Ok(new_fd.into())
}
_ => Err(LxError::EINVAL),
}
} else {
Err(LxError::EINVAL)
}
}
/// Checks whether the calling process can access the file pathname
pub fn sys_access(&self, path: UserInPtr<u8>, mode: usize) -> SysResult {
self.sys_faccessat(FileDesc::CWD, path, mode, 0)
}
/// Check user's permissions of a file relative to a directory file descriptor
/// TODO: check permissions based on uid/gid
pub fn sys_faccessat(
&self,
dirfd: FileDesc,
path: UserInPtr<u8>,
mode: usize,
flags: usize,
) -> SysResult {
// TODO: check permissions based on uid/gid
let path = path.as_c_str()?;
let flags = AtFlags::from_bits_truncate(flags);
info!(
"faccessat: dirfd={:?}, path={:?}, mode={:#o}, flags={:?}",
dirfd, path, mode, flags
);
let proc = self.linux_process();
let follow = !flags.contains(AtFlags::SYMLINK_NOFOLLOW);
let _inode = proc.lookup_inode_at(dirfd, path, follow)?;
Ok(0)
}
/// change file timestamps with nanosecond precision
pub fn sys_utimensat(
&mut self,
dirfd: FileDesc,
pathname: UserInPtr<u8>,
times: UserInOutPtr<[TimeSpec; 2]>,
flags: usize,
) -> SysResult {
info!(
"utimensat(raw): dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
const UTIME_NOW: usize = 0x3fffffff;
const UTIME_OMIT: usize = 0x3ffffffe;
let proc = self.linux_process();
let mut times = if times.is_null() {
let epoch = TimeSpec::now();
[epoch, epoch]
} else {
let times = times.read()?;
[times[0], times[1]]
};
let inode = if pathname.is_null() {
let fd = dirfd;
info!("futimens: fd: {:?}, times: {:?}", fd, times);
proc.get_file(fd)?.inode()
} else {
let pathname = pathname.as_c_str()?;
info!(
"utimensat: dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
let follow = if flags == 0 {
true
} else if flags == AtFlags::SYMLINK_NOFOLLOW.bits() {
false
} else {
return Err(LxError::EINVAL);
};
proc.lookup_inode_at(dirfd, pathname, follow)?
};
let mut metadata = inode.metadata()?;
if times[0].nsec != UTIME_OMIT {
if times[0].nsec == UTIME_NOW {
times[0] = TimeSpec::now();
}
metadata.atime = rcore_fs::vfs::Timespec {
sec: times[0].sec as i64,
nsec: times[0].nsec as i32,
};
}
if times[1].nsec != UTIME_OMIT {
if times[1].nsec == UTIME_NOW {
times[1] = TimeSpec::now();
}
metadata.mtime = rcore_fs::vfs::Timespec {
sec: times[1].sec as i64,
nsec: times[1].nsec as i32,
};
}
inode.set_metadata(&metadata)?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `statfs` system call returns information about a mounted filesystem.
/// `path` is the pathname of **any file** within the mounted filesystem.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_statfs(&self, path: UserInPtr<u8>, mut buf: UserOutPtr<StatFs>) -> SysResult {
let path = path.as_c_str()?;
info!("statfs: path={:?}, buf={:?}", path, buf);
// TODO
// 现在 `path` 没用到,因为没实现真正的挂载,不可能搞一个非主要文件系统的路径。
// 实现挂载之后,要用 `path` 分辨路径在哪个文件系统里,根据对应文件系统的特性返回统计信息。
// (以及根据挂载选项填写 `StatFs::f_flags`!)
let info = self.linux_process().root_inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `fstatfs` system call returns information about a mounted filesystem.
/// `fd` is the descriptor referencing an open file.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_fstatfs(&self, fd: FileDesc, mut buf: UserOutPtr<StatFs>) -> SysResult {
info!("statfs: fd={:?}, buf={:?}", fd, buf);
let info = self.linux_process().get_file(fd)?.inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
}
const F_LINUX_SPECIFIC_BASE: usize = 1024;
/// The file system statistics struct defined in linux
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
#[repr(C)]
pub struct StatFs {
f_type: i64,
f_bsize: i64,
f_blocks: u64,
f_bfree: u64,
f_bavail: u64,
f_files: u64,
f_ffree: u64,
f_fsid: (i32, i32),
f_namelen: isize,
f_frsize: isize,
f_flags: isize,
f_spare: [isize; 4],
}
// 保证 `StatFs` 的定义和常见的 linux 一致
static_assertions::const_assert_eq!(120, core::mem::size_of::<StatFs>());
impl From<FsInfo> for StatFs {
fn from(info: FsInfo) -> Self {
StatFs {
// TODO 文件系统的魔数,需要 rcore-fs 提供一个渠道获取
// 但是这个似乎并没有什么用处,新的 vfs 相关函数都去掉了,也许永远填个常数就好了
f_type: 0,
f_bsize: info.bsize as _,
f_blocks: info.blocks as _,
f_bfree: info.bfree as _,
f_bavail: info.bavail as _,
f_files: info.files as _,
f_ffree: info.ffree as _,
// 一个由 OS 决定的号码,用于区分文件系统
f_fsid: (0, 0),
f_namelen: info.namemax as _,
f_frsize: info.frsize as _,
// TODO 需要先实现挂载
f_flags: 0,
f_spare: [0; 4],
}
}
}
numeric_enum_macro::numeric_enum! {
#[repr(usize)]
#[allow(non_camel_case_types)]
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
/// fcntl flags
pub enum FcntlCmd {
/// dup
DUPFD = 0,
/// get close_on_exec
GETFD = 1,
/// set/clear close_on_exec
SETFD = 2,
/// get file->f_flags
GETFL = 3,
/// set file->f_flags
SETFL = 4,
/// Get record locking info.
GETLK = 5,
/// Set record locking info (non-blocking).
SETLK = 6,
/// Set record locking info (blocking).
SETLKW = 7,
/// like F_DUPFD, but additionally set the close-on-exec flag
DUPFD_CLOEXEC = F_LINUX_SPECIFIC_BASE + 6,
}
}
| (&self, path | identifier_name |
file.rs | //! File operations
//!
//! - read, pread, readv
//! - write, pwrite, writev
//! - lseek
//! - truncate, ftruncate
//! - sendfile, copy_file_range
//! - sync, fsync, fdatasync
//! - ioctl, fcntl
//! - access, faccessat
use super::*;
use linux_object::{process::FsInfo, time::TimeSpec};
impl Syscall<'_> {
/// Reads from a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the opensyscall. Returns bytes read successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer to fill with read contents
/// - len – number of bytes to read
pub async fn sys_read(&self, fd: FileDesc, mut base: UserOutPtr<u8>, len: usize) -> SysResult {
info!("read: fd={:?}, base={:?}, len={:#x}", fd, base, len);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read(&mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// Writes to a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the open syscall. Returns bytes written successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer write
/// - len – number of bytes to write
pub fn sys_write(&self, fd: FileDesc, base: UserInPtr<u8>, len: usize) -> SysResult {
info!("write: fd={:?}, base={:?}, len={:#x}", fd, base, len);
self.linux_process()
.get_file_like(fd)?
.write(base.as_slice(len)?)
}
/// read from or write to a file descriptor at a given offset
/// reads up to count bytes from file descriptor fd at offset offset
/// (from the start of the file) into the buffer starting at buf. The file offset is not changed.
pub async fn sys_pread(
&self,
fd: FileDesc,
mut base: UserOutPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pread: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read_at(offset, &mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// writes up to count bytes from the buffer
/// starting at buf to the file descriptor fd at offset offset. The file offset is not changed.
pub fn sys_pwrite(
&self,
fd: FileDesc,
base: UserInPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pwrite: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
self.linux_process()
.get_file_like(fd)?
.write_at(offset, base.as_slice(len)?)
}
/// works just like read except that multiple buffers are filled.
/// reads iov_count buffers from the file
/// associated with the file descriptor fd into the buffers described by iov ("scatter input")
pub async fn sys_readv(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecOut>,
iov_count: usize,
) -> SysResult {
info!("readv: fd={:?}, iov={:?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate(&self, path: UserInPtr<u8>, len: usize) -> SysResult {
let path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// cause the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if !in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if !out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64; | let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if !in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if !out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1) != 0 {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = proc.get_free_fd_from(arg);
self.sys_dup2(fd, new_fd)?;
let dup = proc.get_file_like(new_fd)?;
let mut flags = dup.flags();
if cmd == FcntlCmd::DUPFD_CLOEXEC {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
dup.set_flags(flags)?;
Ok(new_fd.into())
}
_ => Err(LxError::EINVAL),
}
} else {
Err(LxError::EINVAL)
}
}
/// Checks whether the calling process can access the file pathname
pub fn sys_access(&self, path: UserInPtr<u8>, mode: usize) -> SysResult {
self.sys_faccessat(FileDesc::CWD, path, mode, 0)
}
/// Check user's permissions of a file relative to a directory file descriptor
/// TODO: check permissions based on uid/gid
pub fn sys_faccessat(
&self,
dirfd: FileDesc,
path: UserInPtr<u8>,
mode: usize,
flags: usize,
) -> SysResult {
// TODO: check permissions based on uid/gid
let path = path.as_c_str()?;
let flags = AtFlags::from_bits_truncate(flags);
info!(
"faccessat: dirfd={:?}, path={:?}, mode={:#o}, flags={:?}",
dirfd, path, mode, flags
);
let proc = self.linux_process();
let follow = !flags.contains(AtFlags::SYMLINK_NOFOLLOW);
let _inode = proc.lookup_inode_at(dirfd, path, follow)?;
Ok(0)
}
/// change file timestamps with nanosecond precision
pub fn sys_utimensat(
&mut self,
dirfd: FileDesc,
pathname: UserInPtr<u8>,
times: UserInOutPtr<[TimeSpec; 2]>,
flags: usize,
) -> SysResult {
info!(
"utimensat(raw): dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
const UTIME_NOW: usize = 0x3fffffff;
const UTIME_OMIT: usize = 0x3ffffffe;
let proc = self.linux_process();
let mut times = if times.is_null() {
let epoch = TimeSpec::now();
[epoch, epoch]
} else {
let times = times.read()?;
[times[0], times[1]]
};
let inode = if pathname.is_null() {
let fd = dirfd;
info!("futimens: fd: {:?}, times: {:?}", fd, times);
proc.get_file(fd)?.inode()
} else {
let pathname = pathname.as_c_str()?;
info!(
"utimensat: dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
let follow = if flags == 0 {
true
} else if flags == AtFlags::SYMLINK_NOFOLLOW.bits() {
false
} else {
return Err(LxError::EINVAL);
};
proc.lookup_inode_at(dirfd, pathname, follow)?
};
let mut metadata = inode.metadata()?;
if times[0].nsec != UTIME_OMIT {
if times[0].nsec == UTIME_NOW {
times[0] = TimeSpec::now();
}
metadata.atime = rcore_fs::vfs::Timespec {
sec: times[0].sec as i64,
nsec: times[0].nsec as i32,
};
}
if times[1].nsec != UTIME_OMIT {
if times[1].nsec == UTIME_NOW {
times[1] = TimeSpec::now();
}
metadata.mtime = rcore_fs::vfs::Timespec {
sec: times[1].sec as i64,
nsec: times[1].nsec as i32,
};
}
inode.set_metadata(&metadata)?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `statfs` system call returns information about a mounted filesystem.
/// `path` is the pathname of **any file** within the mounted filesystem.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_statfs(&self, path: UserInPtr<u8>, mut buf: UserOutPtr<StatFs>) -> SysResult {
let path = path.as_c_str()?;
info!("statfs: path={:?}, buf={:?}", path, buf);
// TODO
// 现在 `path` 没用到,因为没实现真正的挂载,不可能搞一个非主要文件系统的路径。
// 实现挂载之后,要用 `path` 分辨路径在哪个文件系统里,根据对应文件系统的特性返回统计信息。
// (以及根据挂载选项填写 `StatFs::f_flags`!)
let info = self.linux_process().root_inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `fstatfs` system call returns information about a mounted filesystem.
/// `fd` is the descriptor referencing an open file.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_fstatfs(&self, fd: FileDesc, mut buf: UserOutPtr<StatFs>) -> SysResult {
info!("statfs: fd={:?}, buf={:?}", fd, buf);
let info = self.linux_process().get_file(fd)?.inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
}
const F_LINUX_SPECIFIC_BASE: usize = 1024;
/// The file system statistics struct defined in linux
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
#[repr(C)]
pub struct StatFs {
f_type: i64,
f_bsize: i64,
f_blocks: u64,
f_bfree: u64,
f_bavail: u64,
f_files: u64,
f_ffree: u64,
f_fsid: (i32, i32),
f_namelen: isize,
f_frsize: isize,
f_flags: isize,
f_spare: [isize; 4],
}
// 保证 `StatFs` 的定义和常见的 linux 一致
static_assertions::const_assert_eq!(120, core::mem::size_of::<StatFs>());
impl From<FsInfo> for StatFs {
fn from(info: FsInfo) -> Self {
StatFs {
// TODO 文件系统的魔数,需要 rcore-fs 提供一个渠道获取
// 但是这个似乎并没有什么用处,新的 vfs 相关函数都去掉了,也许永远填个常数就好了
f_type: 0,
f_bsize: info.bsize as _,
f_blocks: info.blocks as _,
f_bfree: info.bfree as _,
f_bavail: info.bavail as _,
f_files: info.files as _,
f_ffree: info.ffree as _,
// 一个由 OS 决定的号码,用于区分文件系统
f_fsid: (0, 0),
f_namelen: info.namemax as _,
f_frsize: info.frsize as _,
// TODO 需要先实现挂载
f_flags: 0,
f_spare: [0; 4],
}
}
}
numeric_enum_macro::numeric_enum! {
#[repr(usize)]
#[allow(non_camel_case_types)]
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
/// fcntl flags
pub enum FcntlCmd {
/// dup
DUPFD = 0,
/// get close_on_exec
GETFD = 1,
/// set/clear close_on_exec
SETFD = 2,
/// get file->f_flags
GETFL = 3,
/// set file->f_flags
SETFL = 4,
/// Get record locking info.
GETLK = 5,
/// Set record locking info (non-blocking).
SETLK = 6,
/// Set record locking info (blocking).
SETLKW = 7,
/// like F_DUPFD, but additionally set the close-on-exec flag
DUPFD_CLOEXEC = F_LINUX_SPECIFIC_BASE + 6,
}
} | random_line_split | |
file.rs | //! File operations
//!
//! - read, pread, readv
//! - write, pwrite, writev
//! - lseek
//! - truncate, ftruncate
//! - sendfile, copy_file_range
//! - sync, fsync, fdatasync
//! - ioctl, fcntl
//! - access, faccessat
use super::*;
use linux_object::{process::FsInfo, time::TimeSpec};
impl Syscall<'_> {
/// Reads from a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the opensyscall. Returns bytes read successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer to fill with read contents
/// - len – number of bytes to read
pub async fn sys_read(&self, fd: FileDesc, mut base: UserOutPtr<u8>, len: usize) -> SysResult {
info!("read: fd={:?}, base={:?}, len={:#x}", fd, base, len);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read(&mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// Writes to a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the open syscall. Returns bytes written successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer write
/// - len – number of bytes to write
pub fn sys_write(&self, fd: FileDesc, base: UserInPtr<u8>, len: usize) -> SysResult {
info!("write: fd={:?}, base={:?}, len={:#x}", fd, base, len);
self.linux_process()
.get_file_like(fd)?
.write(base.as_slice(len)?)
}
/// read from or write to a file descriptor at a given offset
/// reads up to count bytes from file descriptor fd at offset offset
/// (from the start of the file) into the buffer starting at buf. The file offset is not changed.
pub async fn sys_pread(
&self,
fd: FileDesc,
mut base: UserOutPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pread: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read_at(offset, &mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// writes up to count bytes from the buffer
/// starting at buf to the file descriptor fd at offset offset. The file offset is not changed.
pub fn sys_pwrite(
&self,
fd: FileDesc,
base: UserInPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pwrite: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
self.linux_process()
.get_file_like(fd)?
.write_at(offset, base.as_slice(len)?)
}
/// works just like read except that multiple buffers are filled.
/// reads iov_count buffers from the file
/// associated with the file descriptor fd into the buffers described by iov ("scatter input")
pub async fn sys_readv(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecOut>,
iov_count: usize,
) -> SysResult {
info!("readv: fd={:?}, iov={:?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate(&self, path: UserInPtr<u8>, len: usize) -> SysResult {
le | use the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if !in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if !out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64;
let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if !in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if !out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1) != 0 {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = proc.get_free_fd_from(arg);
self.sys_dup2(fd, new_fd)?;
let dup = proc.get_file_like(new_fd)?;
let mut flags = dup.flags();
if cmd == FcntlCmd::DUPFD_CLOEXEC {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
dup.set_flags(flags)?;
Ok(new_fd.into())
}
_ => Err(LxError::EINVAL),
}
} else {
Err(LxError::EINVAL)
}
}
/// Checks whether the calling process can access the file pathname
pub fn sys_access(&self, path: UserInPtr<u8>, mode: usize) -> SysResult {
self.sys_faccessat(FileDesc::CWD, path, mode, 0)
}
/// Check user's permissions of a file relative to a directory file descriptor
/// TODO: check permissions based on uid/gid
pub fn sys_faccessat(
&self,
dirfd: FileDesc,
path: UserInPtr<u8>,
mode: usize,
flags: usize,
) -> SysResult {
// TODO: check permissions based on uid/gid
let path = path.as_c_str()?;
let flags = AtFlags::from_bits_truncate(flags);
info!(
"faccessat: dirfd={:?}, path={:?}, mode={:#o}, flags={:?}",
dirfd, path, mode, flags
);
let proc = self.linux_process();
let follow = !flags.contains(AtFlags::SYMLINK_NOFOLLOW);
let _inode = proc.lookup_inode_at(dirfd, path, follow)?;
Ok(0)
}
/// change file timestamps with nanosecond precision
pub fn sys_utimensat(
&mut self,
dirfd: FileDesc,
pathname: UserInPtr<u8>,
times: UserInOutPtr<[TimeSpec; 2]>,
flags: usize,
) -> SysResult {
info!(
"utimensat(raw): dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
const UTIME_NOW: usize = 0x3fffffff;
const UTIME_OMIT: usize = 0x3ffffffe;
let proc = self.linux_process();
let mut times = if times.is_null() {
let epoch = TimeSpec::now();
[epoch, epoch]
} else {
let times = times.read()?;
[times[0], times[1]]
};
let inode = if pathname.is_null() {
let fd = dirfd;
info!("futimens: fd: {:?}, times: {:?}", fd, times);
proc.get_file(fd)?.inode()
} else {
let pathname = pathname.as_c_str()?;
info!(
"utimensat: dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
let follow = if flags == 0 {
true
} else if flags == AtFlags::SYMLINK_NOFOLLOW.bits() {
false
} else {
return Err(LxError::EINVAL);
};
proc.lookup_inode_at(dirfd, pathname, follow)?
};
let mut metadata = inode.metadata()?;
if times[0].nsec != UTIME_OMIT {
if times[0].nsec == UTIME_NOW {
times[0] = TimeSpec::now();
}
metadata.atime = rcore_fs::vfs::Timespec {
sec: times[0].sec as i64,
nsec: times[0].nsec as i32,
};
}
if times[1].nsec != UTIME_OMIT {
if times[1].nsec == UTIME_NOW {
times[1] = TimeSpec::now();
}
metadata.mtime = rcore_fs::vfs::Timespec {
sec: times[1].sec as i64,
nsec: times[1].nsec as i32,
};
}
inode.set_metadata(&metadata)?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `statfs` system call returns information about a mounted filesystem.
/// `path` is the pathname of **any file** within the mounted filesystem.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_statfs(&self, path: UserInPtr<u8>, mut buf: UserOutPtr<StatFs>) -> SysResult {
let path = path.as_c_str()?;
info!("statfs: path={:?}, buf={:?}", path, buf);
// TODO
// 现在 `path` 没用到,因为没实现真正的挂载,不可能搞一个非主要文件系统的路径。
// 实现挂载之后,要用 `path` 分辨路径在哪个文件系统里,根据对应文件系统的特性返回统计信息。
// (以及根据挂载选项填写 `StatFs::f_flags`!)
let info = self.linux_process().root_inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `fstatfs` system call returns information about a mounted filesystem.
/// `fd` is the descriptor referencing an open file.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_fstatfs(&self, fd: FileDesc, mut buf: UserOutPtr<StatFs>) -> SysResult {
info!("statfs: fd={:?}, buf={:?}", fd, buf);
let info = self.linux_process().get_file(fd)?.inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
}
const F_LINUX_SPECIFIC_BASE: usize = 1024;
/// The file system statistics struct defined in linux
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
#[repr(C)]
pub struct StatFs {
f_type: i64,
f_bsize: i64,
f_blocks: u64,
f_bfree: u64,
f_bavail: u64,
f_files: u64,
f_ffree: u64,
f_fsid: (i32, i32),
f_namelen: isize,
f_frsize: isize,
f_flags: isize,
f_spare: [isize; 4],
}
// 保证 `StatFs` 的定义和常见的 linux 一致
static_assertions::const_assert_eq!(120, core::mem::size_of::<StatFs>());
impl From<FsInfo> for StatFs {
fn from(info: FsInfo) -> Self {
StatFs {
// TODO 文件系统的魔数,需要 rcore-fs 提供一个渠道获取
// 但是这个似乎并没有什么用处,新的 vfs 相关函数都去掉了,也许永远填个常数就好了
f_type: 0,
f_bsize: info.bsize as _,
f_blocks: info.blocks as _,
f_bfree: info.bfree as _,
f_bavail: info.bavail as _,
f_files: info.files as _,
f_ffree: info.ffree as _,
// 一个由 OS 决定的号码,用于区分文件系统
f_fsid: (0, 0),
f_namelen: info.namemax as _,
f_frsize: info.frsize as _,
// TODO 需要先实现挂载
f_flags: 0,
f_spare: [0; 4],
}
}
}
numeric_enum_macro::numeric_enum! {
#[repr(usize)]
#[allow(non_camel_case_types)]
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
/// fcntl flags
pub enum FcntlCmd {
/// dup
DUPFD = 0,
/// get close_on_exec
GETFD = 1,
/// set/clear close_on_exec
SETFD = 2,
/// get file->f_flags
GETFL = 3,
/// set file->f_flags
SETFL = 4,
/// Get record locking info.
GETLK = 5,
/// Set record locking info (non-blocking).
SETLK = 6,
/// Set record locking info (blocking).
SETLKW = 7,
/// like F_DUPFD, but additionally set the close-on-exec flag
DUPFD_CLOEXEC = F_LINUX_SPECIFIC_BASE + 6,
}
}
| t path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// ca | identifier_body |
file.rs | //! File operations
//!
//! - read, pread, readv
//! - write, pwrite, writev
//! - lseek
//! - truncate, ftruncate
//! - sendfile, copy_file_range
//! - sync, fsync, fdatasync
//! - ioctl, fcntl
//! - access, faccessat
use super::*;
use linux_object::{process::FsInfo, time::TimeSpec};
impl Syscall<'_> {
/// Reads from a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the opensyscall. Returns bytes read successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer to fill with read contents
/// - len – number of bytes to read
pub async fn sys_read(&self, fd: FileDesc, mut base: UserOutPtr<u8>, len: usize) -> SysResult {
info!("read: fd={:?}, base={:?}, len={:#x}", fd, base, len);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read(&mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// Writes to a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the open syscall. Returns bytes written successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer write
/// - len – number of bytes to write
pub fn sys_write(&self, fd: FileDesc, base: UserInPtr<u8>, len: usize) -> SysResult {
info!("write: fd={:?}, base={:?}, len={:#x}", fd, base, len);
self.linux_process()
.get_file_like(fd)?
.write(base.as_slice(len)?)
}
/// read from or write to a file descriptor at a given offset
/// reads up to count bytes from file descriptor fd at offset offset
/// (from the start of the file) into the buffer starting at buf. The file offset is not changed.
pub async fn sys_pread(
&self,
fd: FileDesc,
mut base: UserOutPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pread: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read_at(offset, &mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// writes up to count bytes from the buffer
/// starting at buf to the file descriptor fd at offset offset. The file offset is not changed.
pub fn sys_pwrite(
&self,
fd: FileDesc,
base: UserInPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pwrite: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
self.linux_process()
.get_file_like(fd)?
.write_at(offset, base.as_slice(len)?)
}
/// works just like read except that multiple buffers are filled.
/// reads iov_count buffers from the file
/// associated with the file descriptor fd into the buffers described by iov ("scatter input")
pub async fn sys_readv(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecOut>,
iov_count: usize,
) -> SysResult {
info!("readv: fd={:?}, iov={:?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate(&self, path: UserInPtr<u8>, len: usize) -> SysResult {
let path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// cause the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if !in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if !out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64;
let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if !in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if !out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1) != 0 {
| flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = proc.get_free_fd_from(arg);
self.sys_dup2(fd, new_fd)?;
let dup = proc.get_file_like(new_fd)?;
let mut flags = dup.flags();
if cmd == FcntlCmd::DUPFD_CLOEXEC {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
dup.set_flags(flags)?;
Ok(new_fd.into())
}
_ => Err(LxError::EINVAL),
}
} else {
Err(LxError::EINVAL)
}
}
/// Checks whether the calling process can access the file pathname
pub fn sys_access(&self, path: UserInPtr<u8>, mode: usize) -> SysResult {
self.sys_faccessat(FileDesc::CWD, path, mode, 0)
}
/// Check user's permissions of a file relative to a directory file descriptor
/// TODO: check permissions based on uid/gid
pub fn sys_faccessat(
&self,
dirfd: FileDesc,
path: UserInPtr<u8>,
mode: usize,
flags: usize,
) -> SysResult {
// TODO: check permissions based on uid/gid
let path = path.as_c_str()?;
let flags = AtFlags::from_bits_truncate(flags);
info!(
"faccessat: dirfd={:?}, path={:?}, mode={:#o}, flags={:?}",
dirfd, path, mode, flags
);
let proc = self.linux_process();
let follow = !flags.contains(AtFlags::SYMLINK_NOFOLLOW);
let _inode = proc.lookup_inode_at(dirfd, path, follow)?;
Ok(0)
}
/// change file timestamps with nanosecond precision
pub fn sys_utimensat(
&mut self,
dirfd: FileDesc,
pathname: UserInPtr<u8>,
times: UserInOutPtr<[TimeSpec; 2]>,
flags: usize,
) -> SysResult {
info!(
"utimensat(raw): dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
const UTIME_NOW: usize = 0x3fffffff;
const UTIME_OMIT: usize = 0x3ffffffe;
let proc = self.linux_process();
let mut times = if times.is_null() {
let epoch = TimeSpec::now();
[epoch, epoch]
} else {
let times = times.read()?;
[times[0], times[1]]
};
let inode = if pathname.is_null() {
let fd = dirfd;
info!("futimens: fd: {:?}, times: {:?}", fd, times);
proc.get_file(fd)?.inode()
} else {
let pathname = pathname.as_c_str()?;
info!(
"utimensat: dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
let follow = if flags == 0 {
true
} else if flags == AtFlags::SYMLINK_NOFOLLOW.bits() {
false
} else {
return Err(LxError::EINVAL);
};
proc.lookup_inode_at(dirfd, pathname, follow)?
};
let mut metadata = inode.metadata()?;
if times[0].nsec != UTIME_OMIT {
if times[0].nsec == UTIME_NOW {
times[0] = TimeSpec::now();
}
metadata.atime = rcore_fs::vfs::Timespec {
sec: times[0].sec as i64,
nsec: times[0].nsec as i32,
};
}
if times[1].nsec != UTIME_OMIT {
if times[1].nsec == UTIME_NOW {
times[1] = TimeSpec::now();
}
metadata.mtime = rcore_fs::vfs::Timespec {
sec: times[1].sec as i64,
nsec: times[1].nsec as i32,
};
}
inode.set_metadata(&metadata)?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `statfs` system call returns information about a mounted filesystem.
/// `path` is the pathname of **any file** within the mounted filesystem.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_statfs(&self, path: UserInPtr<u8>, mut buf: UserOutPtr<StatFs>) -> SysResult {
let path = path.as_c_str()?;
info!("statfs: path={:?}, buf={:?}", path, buf);
// TODO
// 现在 `path` 没用到,因为没实现真正的挂载,不可能搞一个非主要文件系统的路径。
// 实现挂载之后,要用 `path` 分辨路径在哪个文件系统里,根据对应文件系统的特性返回统计信息。
// (以及根据挂载选项填写 `StatFs::f_flags`!)
let info = self.linux_process().root_inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `fstatfs` system call returns information about a mounted filesystem.
/// `fd` is the descriptor referencing an open file.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_fstatfs(&self, fd: FileDesc, mut buf: UserOutPtr<StatFs>) -> SysResult {
info!("statfs: fd={:?}, buf={:?}", fd, buf);
let info = self.linux_process().get_file(fd)?.inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
}
const F_LINUX_SPECIFIC_BASE: usize = 1024;
/// The file system statistics struct defined in linux
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
#[repr(C)]
pub struct StatFs {
f_type: i64,
f_bsize: i64,
f_blocks: u64,
f_bfree: u64,
f_bavail: u64,
f_files: u64,
f_ffree: u64,
f_fsid: (i32, i32),
f_namelen: isize,
f_frsize: isize,
f_flags: isize,
f_spare: [isize; 4],
}
// 保证 `StatFs` 的定义和常见的 linux 一致
static_assertions::const_assert_eq!(120, core::mem::size_of::<StatFs>());
impl From<FsInfo> for StatFs {
fn from(info: FsInfo) -> Self {
StatFs {
// TODO 文件系统的魔数,需要 rcore-fs 提供一个渠道获取
// 但是这个似乎并没有什么用处,新的 vfs 相关函数都去掉了,也许永远填个常数就好了
f_type: 0,
f_bsize: info.bsize as _,
f_blocks: info.blocks as _,
f_bfree: info.bfree as _,
f_bavail: info.bavail as _,
f_files: info.files as _,
f_ffree: info.ffree as _,
// 一个由 OS 决定的号码,用于区分文件系统
f_fsid: (0, 0),
f_namelen: info.namemax as _,
f_frsize: info.frsize as _,
// TODO 需要先实现挂载
f_flags: 0,
f_spare: [0; 4],
}
}
}
numeric_enum_macro::numeric_enum! {
#[repr(usize)]
#[allow(non_camel_case_types)]
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
/// fcntl flags
pub enum FcntlCmd {
/// dup
DUPFD = 0,
/// get close_on_exec
GETFD = 1,
/// set/clear close_on_exec
SETFD = 2,
/// get file->f_flags
GETFL = 3,
/// set file->f_flags
SETFL = 4,
/// Get record locking info.
GETLK = 5,
/// Set record locking info (non-blocking).
SETLK = 6,
/// Set record locking info (blocking).
SETLKW = 7,
/// like F_DUPFD, but additionally set the close-on-exec flag
DUPFD_CLOEXEC = F_LINUX_SPECIFIC_BASE + 6,
}
}
| flags |= OpenFlags::CLOEXEC;
} else {
| conditional_block |
cluster.go | package cluster
import (
"context"
"encoding/json"
errors2 "errors"
"fmt"
"reflect"
"github.com/rancher/rancher/pkg/kontainer-engine/logstream"
"github.com/rancher/rancher/pkg/kontainer-engine/types"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
)
const (
PreCreating = "Pre-Creating"
Creating = "Creating"
PostCheck = "Post-Checking"
Running = "Running"
Error = "Error"
Updating = "Updating"
Init = "Init"
)
var (
// ErrClusterExists This error is checked in rancher, don't change the string
ErrClusterExists = errors2.New("cluster already exists")
)
// Cluster represents a kubernetes cluster
type Cluster struct {
// The cluster driver to provision cluster
Driver types.CloseableDriver `json:"-"`
// The name of the cluster driver
DriverName string `json:"driverName,omitempty" yaml:"driver_name,omitempty"`
// The name of the cluster
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// The status of the cluster
Status string `json:"status,omitempty" yaml:"status,omitempty"`
// specific info about kubernetes cluster
// Kubernetes cluster version
Version string `json:"version,omitempty" yaml:"version,omitempty"`
// Service account token to access kubernetes API
ServiceAccountToken string `json:"serviceAccountToken,omitempty" yaml:"service_account_token,omitempty"`
// Kubernetes API master endpoint
Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
// Username for http basic authentication
Username string `json:"username,omitempty" yaml:"username,omitempty"`
// Password for http basic authentication
Password string `json:"password,omitempty" yaml:"password,omitempty"`
// Root CaCertificate for API server(base64 encoded)
RootCACert string `json:"rootCACert,omitempty" yaml:"root_ca_cert,omitempty"`
// Client Certificate(base64 encoded)
ClientCertificate string `json:"clientCertificate,omitempty" yaml:"client_certificate,omitempty"`
// Client private key(base64 encoded)
ClientKey string `json:"clientKey,omitempty" yaml:"client_key,omitempty"`
// Node count in the cluster
NodeCount int64 `json:"nodeCount,omitempty" yaml:"node_count,omitempty"`
// Metadata store specific driver options per cloud provider
Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PersistStore PersistentStore `json:"-" yaml:"-"`
ConfigGetter ConfigGetter `json:"-" yaml:"-"`
Logger logstream.Logger `json:"-" yaml:"-"`
}
// PersistentStore defines the interface for persist options like check and store
type PersistentStore interface {
GetStatus(name string) (string, error)
Get(name string) (Cluster, error)
Remove(name string) error
Store(cluster Cluster) error
PersistStatus(cluster Cluster, status string) error
}
// ConfigGetter defines the interface for getting the driver options.
type ConfigGetter interface {
GetConfig() (types.DriverOptions, error)
}
// Create creates a cluster
func (c *Cluster) Create(ctx context.Context) error {
if c.RootCACert != "" && c.Status == "" {
c.PersistStore.PersistStatus(*c, Init)
}
err := c.createInner(ctx)
if err != nil {
if err == ErrClusterExists {
c.PersistStore.PersistStatus(*c, Running)
} else {
c.PersistStore.PersistStatus(*c, Error)
}
return err
}
return c.PersistStore.PersistStatus(*c, Running)
}
func (c *Cluster) create(ctx context.Context, clusterInfo *types.ClusterInfo) error {
if c.Status == PostCheck {
return nil
}
if err := c.PersistStore.PersistStatus(*c, PreCreating); err != nil {
return err
}
// get cluster config from cli flags or json config
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
// also set metadata value to retrieve the cluster info
for k, v := range c.Metadata {
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Creating); err != nil {
return err
}
// create cluster
info, err := c.Driver.Create(ctx, &driverOpts, clusterInfo)
if info != nil {
transformClusterInfo(c, info)
}
return err
}
func (c *Cluster) PostCheck(ctx context.Context) error {
if err := c.PersistStore.PersistStatus(*c, PostCheck); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) GenerateServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) RemoveLegacyServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
return c.Driver.RemoveLegacyServiceAccount(ctx, toInfo(c))
}
func (c *Cluster) createInner(ctx context.Context) error {
// check if it is already created
c.restore()
var info *types.ClusterInfo
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
info = toInfo(c)
}
if c.Status == Updating || c.Status == Running || c.Status == PostCheck || c.Status == Init {
logrus.Infof("Cluster %s already exists.", c.Name)
return ErrClusterExists
}
if err := c.create(ctx, info); err != nil {
return err
}
return c.PostCheck(ctx)
}
// Update updates a cluster
func (c *Cluster) Update(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
return c.Create(ctx)
}
if c.Status == PreCreating || c.Status == Creating {
logrus.Errorf("Cluster %s has not been created.", c.Name)
return fmt.Errorf("cluster %s has not been created", c.Name)
}
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
driverOpts.StringOptions["name"] = c.Name
for k, v := range c.Metadata {
if k == "state" {
state := make(map[string]interface{})
if err := json.Unmarshal([]byte(v), &state); err == nil {
flattenIfNotExist(state, &driverOpts)
}
continue
}
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Updating); err != nil {
return err
}
info := toInfo(c)
info, err = c.Driver.Update(ctx, info, &driverOpts)
if err != nil {
return err
}
transformClusterInfo(c, info)
return c.PostCheck(ctx)
}
func (c *Cluster) GetVersion(ctx context.Context) (*types.KubernetesVersion, error) {
return c.Driver.GetVersion(ctx, toInfo(c))
}
func (c *Cluster) SetVersion(ctx context.Context, version *types.KubernetesVersion) error {
return c.Driver.SetVersion(ctx, toInfo(c), version)
}
func (c *Cluster) GetClusterSize(ctx context.Context) (*types.NodeCount, error) {
return c.Driver.GetClusterSize(ctx, toInfo(c))
}
func (c *Cluster) SetClusterSize(ctx context.Context, count *types.NodeCount) error {
return c.Driver.SetClusterSize(ctx, toInfo(c), count)
}
func transformClusterInfo(c *Cluster, clusterInfo *types.ClusterInfo) {
c.ClientCertificate = clusterInfo.ClientCertificate
c.ClientKey = clusterInfo.ClientKey
c.RootCACert = clusterInfo.RootCaCertificate
c.Username = clusterInfo.Username
c.Password = clusterInfo.Password
c.Version = clusterInfo.Version
c.Endpoint = clusterInfo.Endpoint
c.NodeCount = clusterInfo.NodeCount
c.Metadata = clusterInfo.Metadata
c.ServiceAccountToken = clusterInfo.ServiceAccountToken
c.Status = clusterInfo.Status
}
func toInfo(c *Cluster) *types.ClusterInfo {
return &types.ClusterInfo{
ClientCertificate: c.ClientCertificate,
ClientKey: c.ClientKey, | RootCaCertificate: c.RootCACert,
Username: c.Username,
Password: c.Password,
Version: c.Version,
Endpoint: c.Endpoint,
NodeCount: c.NodeCount,
Metadata: c.Metadata,
ServiceAccountToken: c.ServiceAccountToken,
Status: c.Status,
}
}
// Remove removes a cluster
func (c *Cluster) Remove(ctx context.Context, forceRemove bool) error {
if err := c.restore(); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
if err := c.Driver.Remove(ctx, toInfo(c)); err != nil {
// Persist store removal must take place despite error to prevent cluster from being stuck in remove state
// TODO: We should add a "forceRemove" action to cluster and then revert this to return an error, so that
// the user can see the problem and take appropriate action
if !forceRemove {
return fmt.Errorf("Error removing cluster [%s] with driver [%s]: %v", c.Name, c.DriverName, err)
}
logrus.Errorf("Error removing cluster [%s] with driver [%s]. Check for stray resources on cloud provider: %v", c.Name, c.DriverName, err)
}
return c.PersistStore.Remove(c.Name)
}
func (c *Cluster) GetCapabilities(ctx context.Context) (*types.Capabilities, error) {
return c.Driver.GetCapabilities(ctx)
}
func (c *Cluster) GetK8SCapabilities(ctx context.Context) (*types.K8SCapabilities, error) {
options, err := c.ConfigGetter.GetConfig()
if err != nil {
return nil, err
}
return c.Driver.GetK8SCapabilities(ctx, &options)
}
func (c *Cluster) GetDriverCreateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverCreateOptions(ctx)
}
func (c *Cluster) GetDriverUpdateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverUpdateOptions(ctx)
}
// Store persists cluster information
func (c *Cluster) Store() error {
return c.PersistStore.Store(*c)
}
func (c *Cluster) restore() error {
cluster, err := c.PersistStore.Get(c.Name)
if err != nil {
return err
}
info := toInfo(&cluster)
transformClusterInfo(c, info)
return nil
}
// NewCluster create a cluster interface to do operations
func NewCluster(driverName, name, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {
rpcClient, err := types.NewClient(driverName, addr)
if err != nil {
return nil, err
}
return &Cluster{
Driver: rpcClient,
DriverName: driverName,
Name: name,
ConfigGetter: configGetter,
PersistStore: persistStore,
}, nil
}
func FromCluster(cluster *Cluster, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {
rpcClient, err := types.NewClient(cluster.DriverName, addr)
if err != nil {
return nil, err
}
cluster.Driver = rpcClient
cluster.ConfigGetter = configGetter
cluster.PersistStore = persistStore
return cluster, nil
}
// flattenIfNotExist take a map into driverOptions, if the key not exist
func flattenIfNotExist(data map[string]interface{}, driverOptions *types.DriverOptions) {
for k, v := range data {
switch v.(type) {
case float64:
if _, exist := driverOptions.IntOptions[k]; !exist {
driverOptions.IntOptions[k] = int64(v.(float64))
}
case string:
if _, exist := driverOptions.StringOptions[k]; !exist {
driverOptions.StringOptions[k] = v.(string)
}
case bool:
if _, exist := driverOptions.BoolOptions[k]; !exist {
driverOptions.BoolOptions[k] = v.(bool)
}
case []interface{}:
// lists of strings come across as lists of interfaces, have to convert them manually
var stringArray []string
for _, stringInterface := range v.([]interface{}) {
switch stringInterface.(type) {
case string:
stringArray = append(stringArray, stringInterface.(string))
}
}
// if the length is 0 then it must not have been an array of strings
if len(stringArray) != 0 {
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: stringArray}
}
}
case []string:
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: v.([]string)}
}
case map[string]interface{}:
// hack for labels
if k == "tags" {
r := make([]string, 0, 4)
for key1, value1 := range v.(map[string]interface{}) {
r = append(r, fmt.Sprintf("%v=%v", key1, value1))
}
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: r}
}
} else {
flattenIfNotExist(v.(map[string]interface{}), driverOptions)
}
case nil:
logrus.Debugf("could not convert %v because value is nil %v=%v", reflect.TypeOf(v), k, v)
default:
logrus.Warnf("could not convert %v %v=%v", reflect.TypeOf(v), k, v)
}
}
} | random_line_split | |
cluster.go | package cluster
import (
"context"
"encoding/json"
errors2 "errors"
"fmt"
"reflect"
"github.com/rancher/rancher/pkg/kontainer-engine/logstream"
"github.com/rancher/rancher/pkg/kontainer-engine/types"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
)
const (
PreCreating = "Pre-Creating"
Creating = "Creating"
PostCheck = "Post-Checking"
Running = "Running"
Error = "Error"
Updating = "Updating"
Init = "Init"
)
var (
// ErrClusterExists This error is checked in rancher, don't change the string
ErrClusterExists = errors2.New("cluster already exists")
)
// Cluster represents a kubernetes cluster
type Cluster struct {
// The cluster driver to provision cluster
Driver types.CloseableDriver `json:"-"`
// The name of the cluster driver
DriverName string `json:"driverName,omitempty" yaml:"driver_name,omitempty"`
// The name of the cluster
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// The status of the cluster
Status string `json:"status,omitempty" yaml:"status,omitempty"`
// specific info about kubernetes cluster
// Kubernetes cluster version
Version string `json:"version,omitempty" yaml:"version,omitempty"`
// Service account token to access kubernetes API
ServiceAccountToken string `json:"serviceAccountToken,omitempty" yaml:"service_account_token,omitempty"`
// Kubernetes API master endpoint
Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
// Username for http basic authentication
Username string `json:"username,omitempty" yaml:"username,omitempty"`
// Password for http basic authentication
Password string `json:"password,omitempty" yaml:"password,omitempty"`
// Root CaCertificate for API server(base64 encoded)
RootCACert string `json:"rootCACert,omitempty" yaml:"root_ca_cert,omitempty"`
// Client Certificate(base64 encoded)
ClientCertificate string `json:"clientCertificate,omitempty" yaml:"client_certificate,omitempty"`
// Client private key(base64 encoded)
ClientKey string `json:"clientKey,omitempty" yaml:"client_key,omitempty"`
// Node count in the cluster
NodeCount int64 `json:"nodeCount,omitempty" yaml:"node_count,omitempty"`
// Metadata store specific driver options per cloud provider
Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PersistStore PersistentStore `json:"-" yaml:"-"`
ConfigGetter ConfigGetter `json:"-" yaml:"-"`
Logger logstream.Logger `json:"-" yaml:"-"`
}
// PersistentStore defines the interface for persist options like check and store
type PersistentStore interface {
GetStatus(name string) (string, error)
Get(name string) (Cluster, error)
Remove(name string) error
Store(cluster Cluster) error
PersistStatus(cluster Cluster, status string) error
}
// ConfigGetter defines the interface for getting the driver options.
type ConfigGetter interface {
GetConfig() (types.DriverOptions, error)
}
// Create creates a cluster
func (c *Cluster) Create(ctx context.Context) error {
if c.RootCACert != "" && c.Status == "" {
c.PersistStore.PersistStatus(*c, Init)
}
err := c.createInner(ctx)
if err != nil {
if err == ErrClusterExists {
c.PersistStore.PersistStatus(*c, Running)
} else {
c.PersistStore.PersistStatus(*c, Error)
}
return err
}
return c.PersistStore.PersistStatus(*c, Running)
}
func (c *Cluster) create(ctx context.Context, clusterInfo *types.ClusterInfo) error {
if c.Status == PostCheck {
return nil
}
if err := c.PersistStore.PersistStatus(*c, PreCreating); err != nil {
return err
}
// get cluster config from cli flags or json config
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
// also set metadata value to retrieve the cluster info
for k, v := range c.Metadata {
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Creating); err != nil {
return err
}
// create cluster
info, err := c.Driver.Create(ctx, &driverOpts, clusterInfo)
if info != nil {
transformClusterInfo(c, info)
}
return err
}
func (c *Cluster) PostCheck(ctx context.Context) error {
if err := c.PersistStore.PersistStatus(*c, PostCheck); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) GenerateServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) RemoveLegacyServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
return c.Driver.RemoveLegacyServiceAccount(ctx, toInfo(c))
}
func (c *Cluster) | (ctx context.Context) error {
// check if it is already created
c.restore()
var info *types.ClusterInfo
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
info = toInfo(c)
}
if c.Status == Updating || c.Status == Running || c.Status == PostCheck || c.Status == Init {
logrus.Infof("Cluster %s already exists.", c.Name)
return ErrClusterExists
}
if err := c.create(ctx, info); err != nil {
return err
}
return c.PostCheck(ctx)
}
// Update updates a cluster
func (c *Cluster) Update(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
return c.Create(ctx)
}
if c.Status == PreCreating || c.Status == Creating {
logrus.Errorf("Cluster %s has not been created.", c.Name)
return fmt.Errorf("cluster %s has not been created", c.Name)
}
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
driverOpts.StringOptions["name"] = c.Name
for k, v := range c.Metadata {
if k == "state" {
state := make(map[string]interface{})
if err := json.Unmarshal([]byte(v), &state); err == nil {
flattenIfNotExist(state, &driverOpts)
}
continue
}
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Updating); err != nil {
return err
}
info := toInfo(c)
info, err = c.Driver.Update(ctx, info, &driverOpts)
if err != nil {
return err
}
transformClusterInfo(c, info)
return c.PostCheck(ctx)
}
func (c *Cluster) GetVersion(ctx context.Context) (*types.KubernetesVersion, error) {
return c.Driver.GetVersion(ctx, toInfo(c))
}
func (c *Cluster) SetVersion(ctx context.Context, version *types.KubernetesVersion) error {
return c.Driver.SetVersion(ctx, toInfo(c), version)
}
func (c *Cluster) GetClusterSize(ctx context.Context) (*types.NodeCount, error) {
return c.Driver.GetClusterSize(ctx, toInfo(c))
}
func (c *Cluster) SetClusterSize(ctx context.Context, count *types.NodeCount) error {
return c.Driver.SetClusterSize(ctx, toInfo(c), count)
}
func transformClusterInfo(c *Cluster, clusterInfo *types.ClusterInfo) {
c.ClientCertificate = clusterInfo.ClientCertificate
c.ClientKey = clusterInfo.ClientKey
c.RootCACert = clusterInfo.RootCaCertificate
c.Username = clusterInfo.Username
c.Password = clusterInfo.Password
c.Version = clusterInfo.Version
c.Endpoint = clusterInfo.Endpoint
c.NodeCount = clusterInfo.NodeCount
c.Metadata = clusterInfo.Metadata
c.ServiceAccountToken = clusterInfo.ServiceAccountToken
c.Status = clusterInfo.Status
}
func toInfo(c *Cluster) *types.ClusterInfo {
return &types.ClusterInfo{
ClientCertificate: c.ClientCertificate,
ClientKey: c.ClientKey,
RootCaCertificate: c.RootCACert,
Username: c.Username,
Password: c.Password,
Version: c.Version,
Endpoint: c.Endpoint,
NodeCount: c.NodeCount,
Metadata: c.Metadata,
ServiceAccountToken: c.ServiceAccountToken,
Status: c.Status,
}
}
// Remove removes a cluster
func (c *Cluster) Remove(ctx context.Context, forceRemove bool) error {
if err := c.restore(); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
if err := c.Driver.Remove(ctx, toInfo(c)); err != nil {
// Persist store removal must take place despite error to prevent cluster from being stuck in remove state
// TODO: We should add a "forceRemove" action to cluster and then revert this to return an error, so that
// the user can see the problem and take appropriate action
if !forceRemove {
return fmt.Errorf("Error removing cluster [%s] with driver [%s]: %v", c.Name, c.DriverName, err)
}
logrus.Errorf("Error removing cluster [%s] with driver [%s]. Check for stray resources on cloud provider: %v", c.Name, c.DriverName, err)
}
return c.PersistStore.Remove(c.Name)
}
func (c *Cluster) GetCapabilities(ctx context.Context) (*types.Capabilities, error) {
return c.Driver.GetCapabilities(ctx)
}
func (c *Cluster) GetK8SCapabilities(ctx context.Context) (*types.K8SCapabilities, error) {
options, err := c.ConfigGetter.GetConfig()
if err != nil {
return nil, err
}
return c.Driver.GetK8SCapabilities(ctx, &options)
}
func (c *Cluster) GetDriverCreateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverCreateOptions(ctx)
}
func (c *Cluster) GetDriverUpdateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverUpdateOptions(ctx)
}
// Store persists cluster information
func (c *Cluster) Store() error {
return c.PersistStore.Store(*c)
}
func (c *Cluster) restore() error {
cluster, err := c.PersistStore.Get(c.Name)
if err != nil {
return err
}
info := toInfo(&cluster)
transformClusterInfo(c, info)
return nil
}
// NewCluster create a cluster interface to do operations
func NewCluster(driverName, name, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {
rpcClient, err := types.NewClient(driverName, addr)
if err != nil {
return nil, err
}
return &Cluster{
Driver: rpcClient,
DriverName: driverName,
Name: name,
ConfigGetter: configGetter,
PersistStore: persistStore,
}, nil
}
func FromCluster(cluster *Cluster, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {
rpcClient, err := types.NewClient(cluster.DriverName, addr)
if err != nil {
return nil, err
}
cluster.Driver = rpcClient
cluster.ConfigGetter = configGetter
cluster.PersistStore = persistStore
return cluster, nil
}
// flattenIfNotExist take a map into driverOptions, if the key not exist
func flattenIfNotExist(data map[string]interface{}, driverOptions *types.DriverOptions) {
for k, v := range data {
switch v.(type) {
case float64:
if _, exist := driverOptions.IntOptions[k]; !exist {
driverOptions.IntOptions[k] = int64(v.(float64))
}
case string:
if _, exist := driverOptions.StringOptions[k]; !exist {
driverOptions.StringOptions[k] = v.(string)
}
case bool:
if _, exist := driverOptions.BoolOptions[k]; !exist {
driverOptions.BoolOptions[k] = v.(bool)
}
case []interface{}:
// lists of strings come across as lists of interfaces, have to convert them manually
var stringArray []string
for _, stringInterface := range v.([]interface{}) {
switch stringInterface.(type) {
case string:
stringArray = append(stringArray, stringInterface.(string))
}
}
// if the length is 0 then it must not have been an array of strings
if len(stringArray) != 0 {
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: stringArray}
}
}
case []string:
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: v.([]string)}
}
case map[string]interface{}:
// hack for labels
if k == "tags" {
r := make([]string, 0, 4)
for key1, value1 := range v.(map[string]interface{}) {
r = append(r, fmt.Sprintf("%v=%v", key1, value1))
}
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: r}
}
} else {
flattenIfNotExist(v.(map[string]interface{}), driverOptions)
}
case nil:
logrus.Debugf("could not convert %v because value is nil %v=%v", reflect.TypeOf(v), k, v)
default:
logrus.Warnf("could not convert %v %v=%v", reflect.TypeOf(v), k, v)
}
}
}
| createInner | identifier_name |
cluster.go | package cluster
import (
"context"
"encoding/json"
errors2 "errors"
"fmt"
"reflect"
"github.com/rancher/rancher/pkg/kontainer-engine/logstream"
"github.com/rancher/rancher/pkg/kontainer-engine/types"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
)
const (
PreCreating = "Pre-Creating"
Creating = "Creating"
PostCheck = "Post-Checking"
Running = "Running"
Error = "Error"
Updating = "Updating"
Init = "Init"
)
var (
// ErrClusterExists This error is checked in rancher, don't change the string
ErrClusterExists = errors2.New("cluster already exists")
)
// Cluster represents a kubernetes cluster
type Cluster struct {
// The cluster driver to provision cluster
Driver types.CloseableDriver `json:"-"`
// The name of the cluster driver
DriverName string `json:"driverName,omitempty" yaml:"driver_name,omitempty"`
// The name of the cluster
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// The status of the cluster
Status string `json:"status,omitempty" yaml:"status,omitempty"`
// specific info about kubernetes cluster
// Kubernetes cluster version
Version string `json:"version,omitempty" yaml:"version,omitempty"`
// Service account token to access kubernetes API
ServiceAccountToken string `json:"serviceAccountToken,omitempty" yaml:"service_account_token,omitempty"`
// Kubernetes API master endpoint
Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
// Username for http basic authentication
Username string `json:"username,omitempty" yaml:"username,omitempty"`
// Password for http basic authentication
Password string `json:"password,omitempty" yaml:"password,omitempty"`
// Root CaCertificate for API server(base64 encoded)
RootCACert string `json:"rootCACert,omitempty" yaml:"root_ca_cert,omitempty"`
// Client Certificate(base64 encoded)
ClientCertificate string `json:"clientCertificate,omitempty" yaml:"client_certificate,omitempty"`
// Client private key(base64 encoded)
ClientKey string `json:"clientKey,omitempty" yaml:"client_key,omitempty"`
// Node count in the cluster
NodeCount int64 `json:"nodeCount,omitempty" yaml:"node_count,omitempty"`
// Metadata store specific driver options per cloud provider
Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PersistStore PersistentStore `json:"-" yaml:"-"`
ConfigGetter ConfigGetter `json:"-" yaml:"-"`
Logger logstream.Logger `json:"-" yaml:"-"`
}
// PersistentStore defines the interface for persist options like check and store
type PersistentStore interface {
GetStatus(name string) (string, error)
Get(name string) (Cluster, error)
Remove(name string) error
Store(cluster Cluster) error
PersistStatus(cluster Cluster, status string) error
}
// ConfigGetter defines the interface for getting the driver options.
type ConfigGetter interface {
GetConfig() (types.DriverOptions, error)
}
// Create creates a cluster
func (c *Cluster) Create(ctx context.Context) error {
if c.RootCACert != "" && c.Status == "" {
c.PersistStore.PersistStatus(*c, Init)
}
err := c.createInner(ctx)
if err != nil {
if err == ErrClusterExists {
c.PersistStore.PersistStatus(*c, Running)
} else {
c.PersistStore.PersistStatus(*c, Error)
}
return err
}
return c.PersistStore.PersistStatus(*c, Running)
}
func (c *Cluster) create(ctx context.Context, clusterInfo *types.ClusterInfo) error {
if c.Status == PostCheck {
return nil
}
if err := c.PersistStore.PersistStatus(*c, PreCreating); err != nil {
return err
}
// get cluster config from cli flags or json config
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
// also set metadata value to retrieve the cluster info
for k, v := range c.Metadata {
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Creating); err != nil {
return err
}
// create cluster
info, err := c.Driver.Create(ctx, &driverOpts, clusterInfo)
if info != nil {
transformClusterInfo(c, info)
}
return err
}
func (c *Cluster) PostCheck(ctx context.Context) error {
if err := c.PersistStore.PersistStatus(*c, PostCheck); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) GenerateServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) RemoveLegacyServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
return c.Driver.RemoveLegacyServiceAccount(ctx, toInfo(c))
}
func (c *Cluster) createInner(ctx context.Context) error |
// Update updates a cluster
func (c *Cluster) Update(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
return c.Create(ctx)
}
if c.Status == PreCreating || c.Status == Creating {
logrus.Errorf("Cluster %s has not been created.", c.Name)
return fmt.Errorf("cluster %s has not been created", c.Name)
}
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
driverOpts.StringOptions["name"] = c.Name
for k, v := range c.Metadata {
if k == "state" {
state := make(map[string]interface{})
if err := json.Unmarshal([]byte(v), &state); err == nil {
flattenIfNotExist(state, &driverOpts)
}
continue
}
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Updating); err != nil {
return err
}
info := toInfo(c)
info, err = c.Driver.Update(ctx, info, &driverOpts)
if err != nil {
return err
}
transformClusterInfo(c, info)
return c.PostCheck(ctx)
}
func (c *Cluster) GetVersion(ctx context.Context) (*types.KubernetesVersion, error) {
return c.Driver.GetVersion(ctx, toInfo(c))
}
func (c *Cluster) SetVersion(ctx context.Context, version *types.KubernetesVersion) error {
return c.Driver.SetVersion(ctx, toInfo(c), version)
}
func (c *Cluster) GetClusterSize(ctx context.Context) (*types.NodeCount, error) {
return c.Driver.GetClusterSize(ctx, toInfo(c))
}
func (c *Cluster) SetClusterSize(ctx context.Context, count *types.NodeCount) error {
return c.Driver.SetClusterSize(ctx, toInfo(c), count)
}
func transformClusterInfo(c *Cluster, clusterInfo *types.ClusterInfo) {
c.ClientCertificate = clusterInfo.ClientCertificate
c.ClientKey = clusterInfo.ClientKey
c.RootCACert = clusterInfo.RootCaCertificate
c.Username = clusterInfo.Username
c.Password = clusterInfo.Password
c.Version = clusterInfo.Version
c.Endpoint = clusterInfo.Endpoint
c.NodeCount = clusterInfo.NodeCount
c.Metadata = clusterInfo.Metadata
c.ServiceAccountToken = clusterInfo.ServiceAccountToken
c.Status = clusterInfo.Status
}
func toInfo(c *Cluster) *types.ClusterInfo {
return &types.ClusterInfo{
ClientCertificate: c.ClientCertificate,
ClientKey: c.ClientKey,
RootCaCertificate: c.RootCACert,
Username: c.Username,
Password: c.Password,
Version: c.Version,
Endpoint: c.Endpoint,
NodeCount: c.NodeCount,
Metadata: c.Metadata,
ServiceAccountToken: c.ServiceAccountToken,
Status: c.Status,
}
}
// Remove removes a cluster
func (c *Cluster) Remove(ctx context.Context, forceRemove bool) error {
if err := c.restore(); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
if err := c.Driver.Remove(ctx, toInfo(c)); err != nil {
// Persist store removal must take place despite error to prevent cluster from being stuck in remove state
// TODO: We should add a "forceRemove" action to cluster and then revert this to return an error, so that
// the user can see the problem and take appropriate action
if !forceRemove {
return fmt.Errorf("Error removing cluster [%s] with driver [%s]: %v", c.Name, c.DriverName, err)
}
logrus.Errorf("Error removing cluster [%s] with driver [%s]. Check for stray resources on cloud provider: %v", c.Name, c.DriverName, err)
}
return c.PersistStore.Remove(c.Name)
}
func (c *Cluster) GetCapabilities(ctx context.Context) (*types.Capabilities, error) {
return c.Driver.GetCapabilities(ctx)
}
func (c *Cluster) GetK8SCapabilities(ctx context.Context) (*types.K8SCapabilities, error) {
options, err := c.ConfigGetter.GetConfig()
if err != nil {
return nil, err
}
return c.Driver.GetK8SCapabilities(ctx, &options)
}
func (c *Cluster) GetDriverCreateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverCreateOptions(ctx)
}
func (c *Cluster) GetDriverUpdateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverUpdateOptions(ctx)
}
// Store persists cluster information
func (c *Cluster) Store() error {
return c.PersistStore.Store(*c)
}
func (c *Cluster) restore() error {
cluster, err := c.PersistStore.Get(c.Name)
if err != nil {
return err
}
info := toInfo(&cluster)
transformClusterInfo(c, info)
return nil
}
// NewCluster create a cluster interface to do operations
func NewCluster(driverName, name, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {
rpcClient, err := types.NewClient(driverName, addr)
if err != nil {
return nil, err
}
return &Cluster{
Driver: rpcClient,
DriverName: driverName,
Name: name,
ConfigGetter: configGetter,
PersistStore: persistStore,
}, nil
}
func FromCluster(cluster *Cluster, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {
rpcClient, err := types.NewClient(cluster.DriverName, addr)
if err != nil {
return nil, err
}
cluster.Driver = rpcClient
cluster.ConfigGetter = configGetter
cluster.PersistStore = persistStore
return cluster, nil
}
// flattenIfNotExist take a map into driverOptions, if the key not exist
func flattenIfNotExist(data map[string]interface{}, driverOptions *types.DriverOptions) {
for k, v := range data {
switch v.(type) {
case float64:
if _, exist := driverOptions.IntOptions[k]; !exist {
driverOptions.IntOptions[k] = int64(v.(float64))
}
case string:
if _, exist := driverOptions.StringOptions[k]; !exist {
driverOptions.StringOptions[k] = v.(string)
}
case bool:
if _, exist := driverOptions.BoolOptions[k]; !exist {
driverOptions.BoolOptions[k] = v.(bool)
}
case []interface{}:
// lists of strings come across as lists of interfaces, have to convert them manually
var stringArray []string
for _, stringInterface := range v.([]interface{}) {
switch stringInterface.(type) {
case string:
stringArray = append(stringArray, stringInterface.(string))
}
}
// if the length is 0 then it must not have been an array of strings
if len(stringArray) != 0 {
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: stringArray}
}
}
case []string:
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: v.([]string)}
}
case map[string]interface{}:
// hack for labels
if k == "tags" {
r := make([]string, 0, 4)
for key1, value1 := range v.(map[string]interface{}) {
r = append(r, fmt.Sprintf("%v=%v", key1, value1))
}
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: r}
}
} else {
flattenIfNotExist(v.(map[string]interface{}), driverOptions)
}
case nil:
logrus.Debugf("could not convert %v because value is nil %v=%v", reflect.TypeOf(v), k, v)
default:
logrus.Warnf("could not convert %v %v=%v", reflect.TypeOf(v), k, v)
}
}
}
| {
// check if it is already created
c.restore()
var info *types.ClusterInfo
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
info = toInfo(c)
}
if c.Status == Updating || c.Status == Running || c.Status == PostCheck || c.Status == Init {
logrus.Infof("Cluster %s already exists.", c.Name)
return ErrClusterExists
}
if err := c.create(ctx, info); err != nil {
return err
}
return c.PostCheck(ctx)
} | identifier_body |
cluster.go | package cluster
import (
"context"
"encoding/json"
errors2 "errors"
"fmt"
"reflect"
"github.com/rancher/rancher/pkg/kontainer-engine/logstream"
"github.com/rancher/rancher/pkg/kontainer-engine/types"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/errors"
)
const (
PreCreating = "Pre-Creating"
Creating = "Creating"
PostCheck = "Post-Checking"
Running = "Running"
Error = "Error"
Updating = "Updating"
Init = "Init"
)
var (
// ErrClusterExists This error is checked in rancher, don't change the string
ErrClusterExists = errors2.New("cluster already exists")
)
// Cluster represents a kubernetes cluster
type Cluster struct {
// The cluster driver to provision cluster
Driver types.CloseableDriver `json:"-"`
// The name of the cluster driver
DriverName string `json:"driverName,omitempty" yaml:"driver_name,omitempty"`
// The name of the cluster
Name string `json:"name,omitempty" yaml:"name,omitempty"`
// The status of the cluster
Status string `json:"status,omitempty" yaml:"status,omitempty"`
// specific info about kubernetes cluster
// Kubernetes cluster version
Version string `json:"version,omitempty" yaml:"version,omitempty"`
// Service account token to access kubernetes API
ServiceAccountToken string `json:"serviceAccountToken,omitempty" yaml:"service_account_token,omitempty"`
// Kubernetes API master endpoint
Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
// Username for http basic authentication
Username string `json:"username,omitempty" yaml:"username,omitempty"`
// Password for http basic authentication
Password string `json:"password,omitempty" yaml:"password,omitempty"`
// Root CaCertificate for API server(base64 encoded)
RootCACert string `json:"rootCACert,omitempty" yaml:"root_ca_cert,omitempty"`
// Client Certificate(base64 encoded)
ClientCertificate string `json:"clientCertificate,omitempty" yaml:"client_certificate,omitempty"`
// Client private key(base64 encoded)
ClientKey string `json:"clientKey,omitempty" yaml:"client_key,omitempty"`
// Node count in the cluster
NodeCount int64 `json:"nodeCount,omitempty" yaml:"node_count,omitempty"`
// Metadata store specific driver options per cloud provider
Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PersistStore PersistentStore `json:"-" yaml:"-"`
ConfigGetter ConfigGetter `json:"-" yaml:"-"`
Logger logstream.Logger `json:"-" yaml:"-"`
}
// PersistentStore defines the interface for persist options like check and store
type PersistentStore interface {
GetStatus(name string) (string, error)
Get(name string) (Cluster, error)
Remove(name string) error
Store(cluster Cluster) error
PersistStatus(cluster Cluster, status string) error
}
// ConfigGetter defines the interface for getting the driver options.
type ConfigGetter interface {
GetConfig() (types.DriverOptions, error)
}
// Create creates a cluster
func (c *Cluster) Create(ctx context.Context) error {
if c.RootCACert != "" && c.Status == "" {
c.PersistStore.PersistStatus(*c, Init)
}
err := c.createInner(ctx)
if err != nil {
if err == ErrClusterExists {
c.PersistStore.PersistStatus(*c, Running)
} else {
c.PersistStore.PersistStatus(*c, Error)
}
return err
}
return c.PersistStore.PersistStatus(*c, Running)
}
func (c *Cluster) create(ctx context.Context, clusterInfo *types.ClusterInfo) error {
if c.Status == PostCheck {
return nil
}
if err := c.PersistStore.PersistStatus(*c, PreCreating); err != nil {
return err
}
// get cluster config from cli flags or json config
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
// also set metadata value to retrieve the cluster info
for k, v := range c.Metadata {
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Creating); err != nil {
return err
}
// create cluster
info, err := c.Driver.Create(ctx, &driverOpts, clusterInfo)
if info != nil {
transformClusterInfo(c, info)
}
return err
}
func (c *Cluster) PostCheck(ctx context.Context) error {
if err := c.PersistStore.PersistStatus(*c, PostCheck); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) GenerateServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) RemoveLegacyServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
return c.Driver.RemoveLegacyServiceAccount(ctx, toInfo(c))
}
func (c *Cluster) createInner(ctx context.Context) error {
// check if it is already created
c.restore()
var info *types.ClusterInfo
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
info = toInfo(c)
}
if c.Status == Updating || c.Status == Running || c.Status == PostCheck || c.Status == Init {
logrus.Infof("Cluster %s already exists.", c.Name)
return ErrClusterExists
}
if err := c.create(ctx, info); err != nil {
return err
}
return c.PostCheck(ctx)
}
// Update updates a cluster
func (c *Cluster) Update(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
return c.Create(ctx)
}
if c.Status == PreCreating || c.Status == Creating {
logrus.Errorf("Cluster %s has not been created.", c.Name)
return fmt.Errorf("cluster %s has not been created", c.Name)
}
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
driverOpts.StringOptions["name"] = c.Name
for k, v := range c.Metadata {
if k == "state" {
state := make(map[string]interface{})
if err := json.Unmarshal([]byte(v), &state); err == nil {
flattenIfNotExist(state, &driverOpts)
}
continue
}
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Updating); err != nil {
return err
}
info := toInfo(c)
info, err = c.Driver.Update(ctx, info, &driverOpts)
if err != nil |
transformClusterInfo(c, info)
return c.PostCheck(ctx)
}
func (c *Cluster) GetVersion(ctx context.Context) (*types.KubernetesVersion, error) {
return c.Driver.GetVersion(ctx, toInfo(c))
}
func (c *Cluster) SetVersion(ctx context.Context, version *types.KubernetesVersion) error {
return c.Driver.SetVersion(ctx, toInfo(c), version)
}
func (c *Cluster) GetClusterSize(ctx context.Context) (*types.NodeCount, error) {
return c.Driver.GetClusterSize(ctx, toInfo(c))
}
func (c *Cluster) SetClusterSize(ctx context.Context, count *types.NodeCount) error {
return c.Driver.SetClusterSize(ctx, toInfo(c), count)
}
func transformClusterInfo(c *Cluster, clusterInfo *types.ClusterInfo) {
c.ClientCertificate = clusterInfo.ClientCertificate
c.ClientKey = clusterInfo.ClientKey
c.RootCACert = clusterInfo.RootCaCertificate
c.Username = clusterInfo.Username
c.Password = clusterInfo.Password
c.Version = clusterInfo.Version
c.Endpoint = clusterInfo.Endpoint
c.NodeCount = clusterInfo.NodeCount
c.Metadata = clusterInfo.Metadata
c.ServiceAccountToken = clusterInfo.ServiceAccountToken
c.Status = clusterInfo.Status
}
func toInfo(c *Cluster) *types.ClusterInfo {
return &types.ClusterInfo{
ClientCertificate: c.ClientCertificate,
ClientKey: c.ClientKey,
RootCaCertificate: c.RootCACert,
Username: c.Username,
Password: c.Password,
Version: c.Version,
Endpoint: c.Endpoint,
NodeCount: c.NodeCount,
Metadata: c.Metadata,
ServiceAccountToken: c.ServiceAccountToken,
Status: c.Status,
}
}
// Remove removes a cluster
func (c *Cluster) Remove(ctx context.Context, forceRemove bool) error {
if err := c.restore(); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
if err := c.Driver.Remove(ctx, toInfo(c)); err != nil {
// Persist store removal must take place despite error to prevent cluster from being stuck in remove state
// TODO: We should add a "forceRemove" action to cluster and then revert this to return an error, so that
// the user can see the problem and take appropriate action
if !forceRemove {
return fmt.Errorf("Error removing cluster [%s] with driver [%s]: %v", c.Name, c.DriverName, err)
}
logrus.Errorf("Error removing cluster [%s] with driver [%s]. Check for stray resources on cloud provider: %v", c.Name, c.DriverName, err)
}
return c.PersistStore.Remove(c.Name)
}
func (c *Cluster) GetCapabilities(ctx context.Context) (*types.Capabilities, error) {
return c.Driver.GetCapabilities(ctx)
}
func (c *Cluster) GetK8SCapabilities(ctx context.Context) (*types.K8SCapabilities, error) {
options, err := c.ConfigGetter.GetConfig()
if err != nil {
return nil, err
}
return c.Driver.GetK8SCapabilities(ctx, &options)
}
func (c *Cluster) GetDriverCreateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverCreateOptions(ctx)
}
func (c *Cluster) GetDriverUpdateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverUpdateOptions(ctx)
}
// Store persists cluster information
func (c *Cluster) Store() error {
return c.PersistStore.Store(*c)
}
func (c *Cluster) restore() error {
cluster, err := c.PersistStore.Get(c.Name)
if err != nil {
return err
}
info := toInfo(&cluster)
transformClusterInfo(c, info)
return nil
}
// NewCluster create a cluster interface to do operations
func NewCluster(driverName, name, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {
rpcClient, err := types.NewClient(driverName, addr)
if err != nil {
return nil, err
}
return &Cluster{
Driver: rpcClient,
DriverName: driverName,
Name: name,
ConfigGetter: configGetter,
PersistStore: persistStore,
}, nil
}
func FromCluster(cluster *Cluster, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {
rpcClient, err := types.NewClient(cluster.DriverName, addr)
if err != nil {
return nil, err
}
cluster.Driver = rpcClient
cluster.ConfigGetter = configGetter
cluster.PersistStore = persistStore
return cluster, nil
}
// flattenIfNotExist take a map into driverOptions, if the key not exist
func flattenIfNotExist(data map[string]interface{}, driverOptions *types.DriverOptions) {
for k, v := range data {
switch v.(type) {
case float64:
if _, exist := driverOptions.IntOptions[k]; !exist {
driverOptions.IntOptions[k] = int64(v.(float64))
}
case string:
if _, exist := driverOptions.StringOptions[k]; !exist {
driverOptions.StringOptions[k] = v.(string)
}
case bool:
if _, exist := driverOptions.BoolOptions[k]; !exist {
driverOptions.BoolOptions[k] = v.(bool)
}
case []interface{}:
// lists of strings come across as lists of interfaces, have to convert them manually
var stringArray []string
for _, stringInterface := range v.([]interface{}) {
switch stringInterface.(type) {
case string:
stringArray = append(stringArray, stringInterface.(string))
}
}
// if the length is 0 then it must not have been an array of strings
if len(stringArray) != 0 {
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: stringArray}
}
}
case []string:
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: v.([]string)}
}
case map[string]interface{}:
// hack for labels
if k == "tags" {
r := make([]string, 0, 4)
for key1, value1 := range v.(map[string]interface{}) {
r = append(r, fmt.Sprintf("%v=%v", key1, value1))
}
if _, exist := driverOptions.StringSliceOptions[k]; !exist {
driverOptions.StringSliceOptions[k] = &types.StringSlice{Value: r}
}
} else {
flattenIfNotExist(v.(map[string]interface{}), driverOptions)
}
case nil:
logrus.Debugf("could not convert %v because value is nil %v=%v", reflect.TypeOf(v), k, v)
default:
logrus.Warnf("could not convert %v %v=%v", reflect.TypeOf(v), k, v)
}
}
}
| {
return err
} | conditional_block |
http.class.js | /**
* HttpBox - http functions
*
*
* JavaScript
*
* @author Sergii Beskorovainyi <bsa2657@yandex.ru>
* @license MIT <http://www.opensource.org/licenses/mit-license.php>
* @link https://github.com/bsa-git/vuetify-nuxt-start/
*/
import qs from 'qs'
import useragent from 'express-useragent'
class HttpBox {
constructor(context) {
this.context = context;
this.request = context.req;
this.response = context.res;
this.params = context.params;
this.query = context.query;
this.redirect = context.redirect;
this.route = context.route;
this.error = context.error;
}
static httpConst() {
return {
HTTP_CONTINUE: 100,
HTTP_SWITCHING_PROTOCOLS: 101,
HTTP_PROCESSING: 102, // RFC2518
HTTP_OK: 200,
HTTP_CREATED: 201,
HTTP_ACCEPTED: 202,
HTTP_NON_AUTHORITATIVE_INFORMATION: 203,
HTTP_NO_CONTENT: 204,
HTTP_RESET_CONTENT: 205,
HTTP_PARTIAL_CONTENT: 206,
HTTP_MULTI_STATUS: 207, // RFC4918
HTTP_ALREADY_REPORTED: 208, // RFC5842
HTTP_IM_USED: 226, // RFC3229
HTTP_MULTIPLE_CHOICES: 300,
HTTP_MOVED_PERMANENTLY: 301,
HTTP_FOUND: 302,
HTTP_SEE_OTHER: 303,
HTTP_NOT_MODIFIED: 304,
HTTP_USE_PROXY: 305,
HTTP_RESERVED: 306,
HTTP_TEMPORARY_REDIRECT: 307,
HTTP_PERMANENTLY_REDIRECT: 308, // RFC7238
HTTP_BAD_REQUEST: 400,
HTTP_UNAUTHORIZED: 401,
HTTP_PAYMENT_REQUIRED: 402,
HTTP_FORBIDDEN: 403,
HTTP_NOT_FOUND: 404,
HTTP_METHOD_NOT_ALLOWED: 405,
HTTP_NOT_ACCEPTABLE: 406,
HTTP_PROXY_AUTHENTICATION_REQUIRED: 407,
HTTP_REQUEST_TIMEOUT: 408,
HTTP_CONFLICT: 409,
HTTP_GONE: 410,
HTTP_LENGTH_REQUIRED: 411,
HTTP_PRECONDITION_FAILED: 412,
HTTP_REQUEST_ENTITY_TOO_LARGE: 413,
HTTP_REQUEST_URI_TOO_LONG: 414,
HTTP_UNSUPPORTED_MEDIA_TYPE: 415,
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE: 416,
HTTP_EXPECTATION_FAILED: 417,
HTTP_I_AM_A_TEAPOT: 418, // RFC2324
HTTP_UNPROCESSABLE_ENTITY: 422, // RFC4918
HTTP_LOCKED: 423, // RFC4918
HTTP_FAILED_DEPENDENCY: 424, // RFC4918
HTTP_RESERVED_FOR_WEBDAV_ADVANCED_COLLECTIONS_EXPIRED_PROPOSAL: 425, // RFC2817
HTTP_UPGRADE_REQUIRED: 426, // RFC2817
HTTP_PRECONDITION_REQUIRED: 428, // RFC6585
HTTP_TOO_MANY_REQUESTS: 429, // RFC6585
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE: 431, // RFC6585
HTTP_INTERNAL_SERVER_ERROR: 500,
HTTP_NOT_IMPLEMENTED: 501,
HTTP_BAD_GATEWAY: 502,
HTTP_SERVICE_UNAVAILABLE: 503,
HTTP_GATEWAY_TIMEOUT: 504,
HTTP_VERSION_NOT_SUPPORTED: 505,
HTTP_VARIANT_ALSO_NEGOTIATES_EXPERIMENTAL: 506, // RFC2295
HTTP_INSUFFICIENT_STORAGE: 507, // RFC4918
HTTP_LOOP_DETECTED: 508, // RFC5842
HTTP_NOT_EXTENDED: 510, // RFC2774
HTTP_NETWORK_AUTHENTICATION_REQUIRED: 511
};
}
static httpCodes() {
return {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
}
static mimeTypes() {
return {
txt: 'text/plain',
htm: 'text/html',
html: 'text/html',
php: 'text/html',
css: 'text/css',
js: 'application/javascript',
json: 'application/json',
xml: 'application/xml',
swf: 'application/x-shockwave-flash',
flv: 'video/x-flv',
// images
png: 'image/png',
jpe: 'image/jpeg',
jpeg: 'image/jpeg',
jpg: 'image/jpeg',
gif: 'image/gif',
bmp: 'image/bmp',
ico: 'image/vnd.microsoft.icon',
tiff: 'image/tiff',
tif: 'image/tiff',
svg: 'image/svg+xml',
svgz: 'image/svg+xml',
// archives
zip: 'application/zip',
rar: 'application/x-rar-compressed',
exe: 'application/x-msdownload',
msi: 'application/x-msdownload',
cab: 'application/vnd.ms-cab-compressed',
// audio/video
mp3: 'audio/mpeg',
qt: 'video/quicktime',
mov: 'video/quicktime',
// adobe
pdf: 'application/pdf',
psd: 'image/vnd.adobe.photoshop',
ai: 'application/postscript',
eps: 'application/postscript',
ps: 'application/postscript',
// ms office
doc: 'application/msword',
rtf: 'application/rtf',
xls: 'application/vnd.ms-excel',
ppt: 'application/vnd.ms-powerpoint',
// open office
odt: 'application/vnd.oasis.opendocument.text',
ods: 'application/vnd.oasis.opendocument.spreadsheet'
}
}
static getHttpCode(code) {
return HttpBox.httpCodes[code]
}
static getMimeType(type) {
return HttpBox.mimeTypes[type]
}
/**
* Get UserAgent info for client
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
static getUserAgentForClient() {
return useragent.parse(navigator.userAgent);
}
getMethod() |
getHeaders() {
return this.request.headers
}
isGet() {
return (this.request.method === 'GET')
}
isPost() {
return (this.request.method === 'POST')
}
isPut() {
return (this.request.method === 'PUT')
}
isDelete() {
return (this.request.method === 'DELETE')
}
isJson() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/json');
}
isXml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/xml');
}
isHtml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'text/html');
}
isForm() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/x-www-form-urlencoded');
}
isAjax() {
const requestType = this.request.headers['HTTP_X_REQUESTED_WITH'];
return _.startsWith(_.trim(requestType), 'XMLHttpRequest');
}
isClient() {
return this.context.isClient;
}
isServerHost() {
return this.context.isServer && this.request;
}
isServerStatic() {
return this.context.isServer && !this.request;
}
/**
* Get request data
*
* @return Promise
*/
getRequestData() {
const self = this;
//----------------------------
return new Promise((resolve, reject) => {
let body = '';
let postData = {};
// -------------------------------------
self.request.on('data', function (data) {
body += data;
// Too much POST data, kill the connection!
// 1e6 === 1 * Math.pow(10, 6) === 1 * 1000000 ~~~ 1MB
if (body.length > 1e6) {
// FLOOD ATTACK OR FAULTY CLIENT, NUKE REQUEST
self.request.connection.destroy()
reject('Http Error: Too much GET data.');
}
});
self.request.on('end', function () {
if (self.isJson()) {
postData = JSON.parse(body);
} else {
postData = qs.parse(body);
}
resolve(postData)
})
})
}
/**
* Get UserAgent info for server
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
getUserAgent() {
return useragent.parse(this.request.headers['user-agent']);
}
}
export default HttpBox
| {
return this.request.method
} | identifier_body |
http.class.js | /**
* HttpBox - http functions
*
*
* JavaScript
*
* @author Sergii Beskorovainyi <bsa2657@yandex.ru>
* @license MIT <http://www.opensource.org/licenses/mit-license.php>
* @link https://github.com/bsa-git/vuetify-nuxt-start/
*/
import qs from 'qs'
import useragent from 'express-useragent'
class HttpBox {
constructor(context) {
this.context = context;
this.request = context.req;
this.response = context.res;
this.params = context.params;
this.query = context.query;
this.redirect = context.redirect;
this.route = context.route;
this.error = context.error;
}
static httpConst() {
return {
HTTP_CONTINUE: 100,
HTTP_SWITCHING_PROTOCOLS: 101,
HTTP_PROCESSING: 102, // RFC2518
HTTP_OK: 200,
HTTP_CREATED: 201,
HTTP_ACCEPTED: 202,
HTTP_NON_AUTHORITATIVE_INFORMATION: 203,
HTTP_NO_CONTENT: 204,
HTTP_RESET_CONTENT: 205,
HTTP_PARTIAL_CONTENT: 206,
HTTP_MULTI_STATUS: 207, // RFC4918
HTTP_ALREADY_REPORTED: 208, // RFC5842
HTTP_IM_USED: 226, // RFC3229
HTTP_MULTIPLE_CHOICES: 300,
HTTP_MOVED_PERMANENTLY: 301,
HTTP_FOUND: 302,
HTTP_SEE_OTHER: 303,
HTTP_NOT_MODIFIED: 304,
HTTP_USE_PROXY: 305,
HTTP_RESERVED: 306,
HTTP_TEMPORARY_REDIRECT: 307,
HTTP_PERMANENTLY_REDIRECT: 308, // RFC7238
HTTP_BAD_REQUEST: 400,
HTTP_UNAUTHORIZED: 401,
HTTP_PAYMENT_REQUIRED: 402,
HTTP_FORBIDDEN: 403,
HTTP_NOT_FOUND: 404,
HTTP_METHOD_NOT_ALLOWED: 405,
HTTP_NOT_ACCEPTABLE: 406,
HTTP_PROXY_AUTHENTICATION_REQUIRED: 407,
HTTP_REQUEST_TIMEOUT: 408,
HTTP_CONFLICT: 409,
HTTP_GONE: 410,
HTTP_LENGTH_REQUIRED: 411,
HTTP_PRECONDITION_FAILED: 412,
HTTP_REQUEST_ENTITY_TOO_LARGE: 413,
HTTP_REQUEST_URI_TOO_LONG: 414,
HTTP_UNSUPPORTED_MEDIA_TYPE: 415,
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE: 416,
HTTP_EXPECTATION_FAILED: 417,
HTTP_I_AM_A_TEAPOT: 418, // RFC2324
HTTP_UNPROCESSABLE_ENTITY: 422, // RFC4918
HTTP_LOCKED: 423, // RFC4918
HTTP_FAILED_DEPENDENCY: 424, // RFC4918
HTTP_RESERVED_FOR_WEBDAV_ADVANCED_COLLECTIONS_EXPIRED_PROPOSAL: 425, // RFC2817
HTTP_UPGRADE_REQUIRED: 426, // RFC2817
HTTP_PRECONDITION_REQUIRED: 428, // RFC6585
HTTP_TOO_MANY_REQUESTS: 429, // RFC6585
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE: 431, // RFC6585
HTTP_INTERNAL_SERVER_ERROR: 500,
HTTP_NOT_IMPLEMENTED: 501,
HTTP_BAD_GATEWAY: 502,
HTTP_SERVICE_UNAVAILABLE: 503,
HTTP_GATEWAY_TIMEOUT: 504,
HTTP_VERSION_NOT_SUPPORTED: 505,
HTTP_VARIANT_ALSO_NEGOTIATES_EXPERIMENTAL: 506, // RFC2295
HTTP_INSUFFICIENT_STORAGE: 507, // RFC4918
HTTP_LOOP_DETECTED: 508, // RFC5842
HTTP_NOT_EXTENDED: 510, // RFC2774
HTTP_NETWORK_AUTHENTICATION_REQUIRED: 511
};
}
static httpCodes() {
return {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
}
static mimeTypes() {
return {
txt: 'text/plain',
htm: 'text/html',
html: 'text/html',
php: 'text/html',
css: 'text/css',
js: 'application/javascript',
json: 'application/json',
xml: 'application/xml',
swf: 'application/x-shockwave-flash',
flv: 'video/x-flv',
// images
png: 'image/png',
jpe: 'image/jpeg',
jpeg: 'image/jpeg',
jpg: 'image/jpeg',
gif: 'image/gif',
bmp: 'image/bmp',
ico: 'image/vnd.microsoft.icon',
tiff: 'image/tiff',
tif: 'image/tiff',
svg: 'image/svg+xml',
svgz: 'image/svg+xml',
// archives
zip: 'application/zip',
rar: 'application/x-rar-compressed',
exe: 'application/x-msdownload',
msi: 'application/x-msdownload',
cab: 'application/vnd.ms-cab-compressed',
// audio/video
mp3: 'audio/mpeg',
qt: 'video/quicktime',
mov: 'video/quicktime',
// adobe
pdf: 'application/pdf',
psd: 'image/vnd.adobe.photoshop',
ai: 'application/postscript',
eps: 'application/postscript',
ps: 'application/postscript',
// ms office
doc: 'application/msword',
rtf: 'application/rtf',
xls: 'application/vnd.ms-excel',
ppt: 'application/vnd.ms-powerpoint',
// open office
odt: 'application/vnd.oasis.opendocument.text',
ods: 'application/vnd.oasis.opendocument.spreadsheet'
}
}
static getHttpCode(code) {
return HttpBox.httpCodes[code]
}
static getMimeType(type) {
return HttpBox.mimeTypes[type]
}
/**
* Get UserAgent info for client
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
static getUserAgentForClient() {
return useragent.parse(navigator.userAgent);
}
getMethod() {
return this.request.method
}
getHeaders() {
return this.request.headers
}
isGet() {
return (this.request.method === 'GET')
}
isPost() { | return (this.request.method === 'PUT')
}
isDelete() {
return (this.request.method === 'DELETE')
}
isJson() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/json');
}
isXml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/xml');
}
isHtml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'text/html');
}
isForm() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/x-www-form-urlencoded');
}
isAjax() {
const requestType = this.request.headers['HTTP_X_REQUESTED_WITH'];
return _.startsWith(_.trim(requestType), 'XMLHttpRequest');
}
isClient() {
return this.context.isClient;
}
isServerHost() {
return this.context.isServer && this.request;
}
isServerStatic() {
return this.context.isServer && !this.request;
}
/**
* Get request data
*
* @return Promise
*/
getRequestData() {
const self = this;
//----------------------------
return new Promise((resolve, reject) => {
let body = '';
let postData = {};
// -------------------------------------
self.request.on('data', function (data) {
body += data;
// Too much POST data, kill the connection!
// 1e6 === 1 * Math.pow(10, 6) === 1 * 1000000 ~~~ 1MB
if (body.length > 1e6) {
// FLOOD ATTACK OR FAULTY CLIENT, NUKE REQUEST
self.request.connection.destroy()
reject('Http Error: Too much GET data.');
}
});
self.request.on('end', function () {
if (self.isJson()) {
postData = JSON.parse(body);
} else {
postData = qs.parse(body);
}
resolve(postData)
})
})
}
/**
* Get UserAgent info for server
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
getUserAgent() {
return useragent.parse(this.request.headers['user-agent']);
}
}
export default HttpBox | return (this.request.method === 'POST')
}
isPut() { | random_line_split |
http.class.js | /**
* HttpBox - http functions
*
*
* JavaScript
*
* @author Sergii Beskorovainyi <bsa2657@yandex.ru>
* @license MIT <http://www.opensource.org/licenses/mit-license.php>
* @link https://github.com/bsa-git/vuetify-nuxt-start/
*/
import qs from 'qs'
import useragent from 'express-useragent'
class HttpBox {
constructor(context) {
this.context = context;
this.request = context.req;
this.response = context.res;
this.params = context.params;
this.query = context.query;
this.redirect = context.redirect;
this.route = context.route;
this.error = context.error;
}
static httpConst() {
return {
HTTP_CONTINUE: 100,
HTTP_SWITCHING_PROTOCOLS: 101,
HTTP_PROCESSING: 102, // RFC2518
HTTP_OK: 200,
HTTP_CREATED: 201,
HTTP_ACCEPTED: 202,
HTTP_NON_AUTHORITATIVE_INFORMATION: 203,
HTTP_NO_CONTENT: 204,
HTTP_RESET_CONTENT: 205,
HTTP_PARTIAL_CONTENT: 206,
HTTP_MULTI_STATUS: 207, // RFC4918
HTTP_ALREADY_REPORTED: 208, // RFC5842
HTTP_IM_USED: 226, // RFC3229
HTTP_MULTIPLE_CHOICES: 300,
HTTP_MOVED_PERMANENTLY: 301,
HTTP_FOUND: 302,
HTTP_SEE_OTHER: 303,
HTTP_NOT_MODIFIED: 304,
HTTP_USE_PROXY: 305,
HTTP_RESERVED: 306,
HTTP_TEMPORARY_REDIRECT: 307,
HTTP_PERMANENTLY_REDIRECT: 308, // RFC7238
HTTP_BAD_REQUEST: 400,
HTTP_UNAUTHORIZED: 401,
HTTP_PAYMENT_REQUIRED: 402,
HTTP_FORBIDDEN: 403,
HTTP_NOT_FOUND: 404,
HTTP_METHOD_NOT_ALLOWED: 405,
HTTP_NOT_ACCEPTABLE: 406,
HTTP_PROXY_AUTHENTICATION_REQUIRED: 407,
HTTP_REQUEST_TIMEOUT: 408,
HTTP_CONFLICT: 409,
HTTP_GONE: 410,
HTTP_LENGTH_REQUIRED: 411,
HTTP_PRECONDITION_FAILED: 412,
HTTP_REQUEST_ENTITY_TOO_LARGE: 413,
HTTP_REQUEST_URI_TOO_LONG: 414,
HTTP_UNSUPPORTED_MEDIA_TYPE: 415,
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE: 416,
HTTP_EXPECTATION_FAILED: 417,
HTTP_I_AM_A_TEAPOT: 418, // RFC2324
HTTP_UNPROCESSABLE_ENTITY: 422, // RFC4918
HTTP_LOCKED: 423, // RFC4918
HTTP_FAILED_DEPENDENCY: 424, // RFC4918
HTTP_RESERVED_FOR_WEBDAV_ADVANCED_COLLECTIONS_EXPIRED_PROPOSAL: 425, // RFC2817
HTTP_UPGRADE_REQUIRED: 426, // RFC2817
HTTP_PRECONDITION_REQUIRED: 428, // RFC6585
HTTP_TOO_MANY_REQUESTS: 429, // RFC6585
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE: 431, // RFC6585
HTTP_INTERNAL_SERVER_ERROR: 500,
HTTP_NOT_IMPLEMENTED: 501,
HTTP_BAD_GATEWAY: 502,
HTTP_SERVICE_UNAVAILABLE: 503,
HTTP_GATEWAY_TIMEOUT: 504,
HTTP_VERSION_NOT_SUPPORTED: 505,
HTTP_VARIANT_ALSO_NEGOTIATES_EXPERIMENTAL: 506, // RFC2295
HTTP_INSUFFICIENT_STORAGE: 507, // RFC4918
HTTP_LOOP_DETECTED: 508, // RFC5842
HTTP_NOT_EXTENDED: 510, // RFC2774
HTTP_NETWORK_AUTHENTICATION_REQUIRED: 511
};
}
static httpCodes() {
return {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
}
static mimeTypes() {
return {
txt: 'text/plain',
htm: 'text/html',
html: 'text/html',
php: 'text/html',
css: 'text/css',
js: 'application/javascript',
json: 'application/json',
xml: 'application/xml',
swf: 'application/x-shockwave-flash',
flv: 'video/x-flv',
// images
png: 'image/png',
jpe: 'image/jpeg',
jpeg: 'image/jpeg',
jpg: 'image/jpeg',
gif: 'image/gif',
bmp: 'image/bmp',
ico: 'image/vnd.microsoft.icon',
tiff: 'image/tiff',
tif: 'image/tiff',
svg: 'image/svg+xml',
svgz: 'image/svg+xml',
// archives
zip: 'application/zip',
rar: 'application/x-rar-compressed',
exe: 'application/x-msdownload',
msi: 'application/x-msdownload',
cab: 'application/vnd.ms-cab-compressed',
// audio/video
mp3: 'audio/mpeg',
qt: 'video/quicktime',
mov: 'video/quicktime',
// adobe
pdf: 'application/pdf',
psd: 'image/vnd.adobe.photoshop',
ai: 'application/postscript',
eps: 'application/postscript',
ps: 'application/postscript',
// ms office
doc: 'application/msword',
rtf: 'application/rtf',
xls: 'application/vnd.ms-excel',
ppt: 'application/vnd.ms-powerpoint',
// open office
odt: 'application/vnd.oasis.opendocument.text',
ods: 'application/vnd.oasis.opendocument.spreadsheet'
}
}
static getHttpCode(code) {
return HttpBox.httpCodes[code]
}
static getMimeType(type) {
return HttpBox.mimeTypes[type]
}
/**
* Get UserAgent info for client
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
static getUserAgentForClient() {
return useragent.parse(navigator.userAgent);
}
getMethod() {
return this.request.method
}
getHeaders() {
return this.request.headers
}
isGet() {
return (this.request.method === 'GET')
}
isPost() {
return (this.request.method === 'POST')
}
isPut() {
return (this.request.method === 'PUT')
}
isDelete() {
return (this.request.method === 'DELETE')
}
isJson() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/json');
}
isXml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/xml');
}
isHtml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'text/html');
}
isForm() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/x-www-form-urlencoded');
}
isAjax() {
const requestType = this.request.headers['HTTP_X_REQUESTED_WITH'];
return _.startsWith(_.trim(requestType), 'XMLHttpRequest');
}
isClient() {
return this.context.isClient;
}
isServerHost() {
return this.context.isServer && this.request;
}
isServerStatic() {
return this.context.isServer && !this.request;
}
/**
* Get request data
*
* @return Promise
*/
getRequestData() {
const self = this;
//----------------------------
return new Promise((resolve, reject) => {
let body = '';
let postData = {};
// -------------------------------------
self.request.on('data', function (data) {
body += data;
// Too much POST data, kill the connection!
// 1e6 === 1 * Math.pow(10, 6) === 1 * 1000000 ~~~ 1MB
if (body.length > 1e6) {
// FLOOD ATTACK OR FAULTY CLIENT, NUKE REQUEST
self.request.connection.destroy()
reject('Http Error: Too much GET data.');
}
});
self.request.on('end', function () {
if (self.isJson()) {
postData = JSON.parse(body);
} else {
postData = qs.parse(body);
}
resolve(postData)
})
})
}
/**
* Get UserAgent info for server
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
| () {
return useragent.parse(this.request.headers['user-agent']);
}
}
export default HttpBox
| getUserAgent | identifier_name |
http.class.js | /**
* HttpBox - http functions
*
*
* JavaScript
*
* @author Sergii Beskorovainyi <bsa2657@yandex.ru>
* @license MIT <http://www.opensource.org/licenses/mit-license.php>
* @link https://github.com/bsa-git/vuetify-nuxt-start/
*/
import qs from 'qs'
import useragent from 'express-useragent'
class HttpBox {
constructor(context) {
this.context = context;
this.request = context.req;
this.response = context.res;
this.params = context.params;
this.query = context.query;
this.redirect = context.redirect;
this.route = context.route;
this.error = context.error;
}
static httpConst() {
return {
HTTP_CONTINUE: 100,
HTTP_SWITCHING_PROTOCOLS: 101,
HTTP_PROCESSING: 102, // RFC2518
HTTP_OK: 200,
HTTP_CREATED: 201,
HTTP_ACCEPTED: 202,
HTTP_NON_AUTHORITATIVE_INFORMATION: 203,
HTTP_NO_CONTENT: 204,
HTTP_RESET_CONTENT: 205,
HTTP_PARTIAL_CONTENT: 206,
HTTP_MULTI_STATUS: 207, // RFC4918
HTTP_ALREADY_REPORTED: 208, // RFC5842
HTTP_IM_USED: 226, // RFC3229
HTTP_MULTIPLE_CHOICES: 300,
HTTP_MOVED_PERMANENTLY: 301,
HTTP_FOUND: 302,
HTTP_SEE_OTHER: 303,
HTTP_NOT_MODIFIED: 304,
HTTP_USE_PROXY: 305,
HTTP_RESERVED: 306,
HTTP_TEMPORARY_REDIRECT: 307,
HTTP_PERMANENTLY_REDIRECT: 308, // RFC7238
HTTP_BAD_REQUEST: 400,
HTTP_UNAUTHORIZED: 401,
HTTP_PAYMENT_REQUIRED: 402,
HTTP_FORBIDDEN: 403,
HTTP_NOT_FOUND: 404,
HTTP_METHOD_NOT_ALLOWED: 405,
HTTP_NOT_ACCEPTABLE: 406,
HTTP_PROXY_AUTHENTICATION_REQUIRED: 407,
HTTP_REQUEST_TIMEOUT: 408,
HTTP_CONFLICT: 409,
HTTP_GONE: 410,
HTTP_LENGTH_REQUIRED: 411,
HTTP_PRECONDITION_FAILED: 412,
HTTP_REQUEST_ENTITY_TOO_LARGE: 413,
HTTP_REQUEST_URI_TOO_LONG: 414,
HTTP_UNSUPPORTED_MEDIA_TYPE: 415,
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE: 416,
HTTP_EXPECTATION_FAILED: 417,
HTTP_I_AM_A_TEAPOT: 418, // RFC2324
HTTP_UNPROCESSABLE_ENTITY: 422, // RFC4918
HTTP_LOCKED: 423, // RFC4918
HTTP_FAILED_DEPENDENCY: 424, // RFC4918
HTTP_RESERVED_FOR_WEBDAV_ADVANCED_COLLECTIONS_EXPIRED_PROPOSAL: 425, // RFC2817
HTTP_UPGRADE_REQUIRED: 426, // RFC2817
HTTP_PRECONDITION_REQUIRED: 428, // RFC6585
HTTP_TOO_MANY_REQUESTS: 429, // RFC6585
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE: 431, // RFC6585
HTTP_INTERNAL_SERVER_ERROR: 500,
HTTP_NOT_IMPLEMENTED: 501,
HTTP_BAD_GATEWAY: 502,
HTTP_SERVICE_UNAVAILABLE: 503,
HTTP_GATEWAY_TIMEOUT: 504,
HTTP_VERSION_NOT_SUPPORTED: 505,
HTTP_VARIANT_ALSO_NEGOTIATES_EXPERIMENTAL: 506, // RFC2295
HTTP_INSUFFICIENT_STORAGE: 507, // RFC4918
HTTP_LOOP_DETECTED: 508, // RFC5842
HTTP_NOT_EXTENDED: 510, // RFC2774
HTTP_NETWORK_AUTHENTICATION_REQUIRED: 511
};
}
static httpCodes() {
return {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
}
static mimeTypes() {
return {
txt: 'text/plain',
htm: 'text/html',
html: 'text/html',
php: 'text/html',
css: 'text/css',
js: 'application/javascript',
json: 'application/json',
xml: 'application/xml',
swf: 'application/x-shockwave-flash',
flv: 'video/x-flv',
// images
png: 'image/png',
jpe: 'image/jpeg',
jpeg: 'image/jpeg',
jpg: 'image/jpeg',
gif: 'image/gif',
bmp: 'image/bmp',
ico: 'image/vnd.microsoft.icon',
tiff: 'image/tiff',
tif: 'image/tiff',
svg: 'image/svg+xml',
svgz: 'image/svg+xml',
// archives
zip: 'application/zip',
rar: 'application/x-rar-compressed',
exe: 'application/x-msdownload',
msi: 'application/x-msdownload',
cab: 'application/vnd.ms-cab-compressed',
// audio/video
mp3: 'audio/mpeg',
qt: 'video/quicktime',
mov: 'video/quicktime',
// adobe
pdf: 'application/pdf',
psd: 'image/vnd.adobe.photoshop',
ai: 'application/postscript',
eps: 'application/postscript',
ps: 'application/postscript',
// ms office
doc: 'application/msword',
rtf: 'application/rtf',
xls: 'application/vnd.ms-excel',
ppt: 'application/vnd.ms-powerpoint',
// open office
odt: 'application/vnd.oasis.opendocument.text',
ods: 'application/vnd.oasis.opendocument.spreadsheet'
}
}
static getHttpCode(code) {
return HttpBox.httpCodes[code]
}
static getMimeType(type) {
return HttpBox.mimeTypes[type]
}
/**
* Get UserAgent info for client
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
static getUserAgentForClient() {
return useragent.parse(navigator.userAgent);
}
getMethod() {
return this.request.method
}
getHeaders() {
return this.request.headers
}
isGet() {
return (this.request.method === 'GET')
}
isPost() {
return (this.request.method === 'POST')
}
isPut() {
return (this.request.method === 'PUT')
}
isDelete() {
return (this.request.method === 'DELETE')
}
isJson() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/json');
}
isXml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/xml');
}
isHtml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'text/html');
}
isForm() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/x-www-form-urlencoded');
}
isAjax() {
const requestType = this.request.headers['HTTP_X_REQUESTED_WITH'];
return _.startsWith(_.trim(requestType), 'XMLHttpRequest');
}
isClient() {
return this.context.isClient;
}
isServerHost() {
return this.context.isServer && this.request;
}
isServerStatic() {
return this.context.isServer && !this.request;
}
/**
* Get request data
*
* @return Promise
*/
getRequestData() {
const self = this;
//----------------------------
return new Promise((resolve, reject) => {
let body = '';
let postData = {};
// -------------------------------------
self.request.on('data', function (data) {
body += data;
// Too much POST data, kill the connection!
// 1e6 === 1 * Math.pow(10, 6) === 1 * 1000000 ~~~ 1MB
if (body.length > 1e6) {
// FLOOD ATTACK OR FAULTY CLIENT, NUKE REQUEST
self.request.connection.destroy()
reject('Http Error: Too much GET data.');
}
});
self.request.on('end', function () {
if (self.isJson()) | else {
postData = qs.parse(body);
}
resolve(postData)
})
})
}
/**
* Get UserAgent info for server
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
getUserAgent() {
return useragent.parse(this.request.headers['user-agent']);
}
}
export default HttpBox
| {
postData = JSON.parse(body);
} | conditional_block |
generator.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package generator implements functionality to generate various patient related information, including, but not limited to:
// - person information, ie: name, surname, ethnicity, address, etc.,
// - patient type and class,
// - orders and test results,
// - allergies,
// - diagnosis,
// - procedures.
//
// The data is generated based on information provided in the pathway.
package generator
import (
"context"
"fmt"
"math/rand"
"time"
"github.com/google/simhospital/pkg/clock"
"github.com/google/simhospital/pkg/config"
"github.com/google/simhospital/pkg/doctor"
"github.com/google/simhospital/pkg/gender"
"github.com/google/simhospital/pkg/generator/address"
"github.com/google/simhospital/pkg/generator/codedelement"
"github.com/google/simhospital/pkg/generator/document"
"github.com/google/simhospital/pkg/generator/header"
"github.com/google/simhospital/pkg/generator/id"
"github.com/google/simhospital/pkg/generator/names"
"github.com/google/simhospital/pkg/generator/notes"
"github.com/google/simhospital/pkg/generator/order"
"github.com/google/simhospital/pkg/generator/person"
"github.com/google/simhospital/pkg/generator/text"
"github.com/google/simhospital/pkg/ir"
"github.com/google/simhospital/pkg/logging"
"github.com/google/simhospital/pkg/message"
"github.com/google/simhospital/pkg/orderprofile"
"github.com/google/simhospital/pkg/pathway"
"github.com/google/simhospital/pkg/state"
)
var log = logging.ForCallerPackage()
type randomIDGenerator struct{}
func (g *randomIDGenerator) NewID() string {
return fmt.Sprintf("%d", rand.Uint32())
}
// Generator implements functionality to generate various patient related information based on the information provided
// in the pathway.
type Generator struct {
personGenerator *person.Generator
patientClassGenerator patientClassGenerator
allergyGenerator *codedelement.AllergyGenerator
diagnosisGenerator diagnosisOrProcedureGenerator
procedureGenerator diagnosisOrProcedureGenerator
messageConfig *config.HL7Config
doctors *doctor.Doctors
headerGenerator *header.Generator
orderGenerator *order.Generator
documentGenerator *document.Generator
}
type diagnosisOrProcedureGenerator interface {
RandomOrFromPathway(*pathway.DateTime, *pathway.DiagnosisOrProcedure) *ir.DiagnosisOrProcedure
}
// NewPerson returns a new person based on pathway.Person.
func (g Generator) NewPerson(pathwayPerson *pathway.Person) *ir.Person {
return g.personGenerator.NewPerson(pathwayPerson)
}
// UpdateFromPathway updates PatientInfo with information from pathway.
// It Updates:
// - person information
// - diagnoses
// - procedures
// - allergies
func (g Generator) UpdateFromPathway(patientInfo *ir.PatientInfo, updatePerson *pathway.UpdatePerson) {
if updatePerson.Person != nil {
g.personGenerator.UpdatePersonFromPathway(patientInfo.Person, updatePerson.Person)
}
g.setDiagnoses(patientInfo, updatePerson.Diagnoses)
g.setProcedures(patientInfo, updatePerson.Procedures)
g.AddAllergies(patientInfo, updatePerson.Allergies)
}
// NewPatient returns a new patient based on Person information and a doctor provided.
func (g Generator) NewPatient(person *ir.Person, doctor *ir.Doctor) *state.Patient {
p := &state.Patient{
PatientInfo: &ir.PatientInfo{
Class: g.messageConfig.PatientClass.Outpatient,
Person: person,
// The Hospital Service might be overridden later with the doctor's specialty.
HospitalService: g.messageConfig.HospitalService,
AttendingDoctor: doctor,
},
// The code downstream assumes that Orders exists.
Orders: make(map[string]*ir.Order),
Documents: make(map[string]*ir.Document),
}
// If none of the g.messageConfig.PrimaryFacility fields is set, we want the resulting HL7 message to have the entire
// PD1.3 Patient Primary Facility field empty. This is achieved by leaving p.PatientInfo.PrimaryFacility nil.
if g.messageConfig.PrimaryFacility != nil |
if doctor != nil {
docWithSpecialty := g.doctors.GetByID(doctor.ID)
if docWithSpecialty != nil && docWithSpecialty.Specialty != "" {
p.PatientInfo.HospitalService = docWithSpecialty.Specialty
}
}
return p
}
// NewDoctor returns a new doctor based on the Consultant information from the pathway.
// If consultant is not specified, it returns a random doctor.
// Otherwise, it attempts to lookup an existic doctor basd on consultant ID. If any doctor is found, it returns it.
// Othwerise creates a new doctor from Consultant information, with the default speciality defined in
// messageConfig.HospitalService.
func (g Generator) NewDoctor(c *pathway.Consultant) *ir.Doctor {
if c == nil {
return g.doctors.GetRandomDoctor()
}
if doctor := g.doctors.GetByID(*c.ID); doctor != nil {
return doctor
}
newDoctor := &ir.Doctor{
// A valid pathway.Consultant has all the fields set, so we can just dereference.
ID: *c.ID,
Surname: *c.Surname,
Prefix: *c.Prefix,
FirstName: *c.FirstName,
Specialty: g.messageConfig.HospitalService,
}
g.doctors.Add(newDoctor)
return newDoctor
}
// ResetPatient returns a Patient based on the given Patient.
// Medical History (Orders, Encounters) and general information is kept, but other
// information is cleared as if the patient was a new patient.
func (g Generator) ResetPatient(p *state.Patient) *state.Patient {
newP := g.NewPatient(p.PatientInfo.Person, p.PatientInfo.AttendingDoctor)
newP.Orders = p.Orders
newP.PatientInfo.HospitalService = p.PatientInfo.HospitalService
newP.PatientInfo.Encounters = p.PatientInfo.Encounters
newP.PastVisits = p.PastVisits
newP.PatientInfo.PrimaryFacility = p.PatientInfo.PrimaryFacility
newP.PatientInfo.Allergies = p.PatientInfo.Allergies
return newP
}
// AddAllergies adds allergies specified in the pathway to the patientInfo:
// - If there are any allergies specified in the pathways, they are always added to existing allergies on the patientInfo.
// - If the allergies were not specified in the pathway (ie. allergies is nil) and the allergies on the patientInfo
// have not been initialised yet (ie are also nil), initialise them to an empty slice (to make sure we'll not make an
// attempt to generate them on the next ADT-like event, as that would increase the likelihood of the patient having
// allergies), and then generate them.
// - If the allergies from the pathway are explicitly set to empty slice, the allergies on the patient info are also set
// to empty slice.
func (g Generator) AddAllergies(patientInfo *ir.PatientInfo, allergies []pathway.Allergy) {
switch {
case len(allergies) > 0:
// If the pathway allergies are set, add them to the existing ones.
if patientInfo.Allergies == nil {
patientInfo.Allergies = []*ir.Allergy{}
}
patientInfo.Allergies = append(patientInfo.Allergies, g.getDedupedAllergiesFromPathway(patientInfo, allergies)...)
case allergies == nil && patientInfo.Allergies == nil:
// Initialise the allergies to an empty slice so that they're not nil anymore.
patientInfo.Allergies = []*ir.Allergy{}
patientInfo.Allergies = append(patientInfo.Allergies, g.allergyGenerator.GenerateRandomDistinctAllergies()...)
case allergies != nil && len(allergies) == 0:
// Allergies were set explicitly as an empty slice in the pathway.
patientInfo.Allergies = []*ir.Allergy{}
}
}
// getDedupedAllergiesFromPathway returns the list of allergies from the pathway after de-duplication:
// if the allergy is set twice in the pathway, it's added to the list only once. If the allergy
// already exists for the patient, it's not added to the list.
// Note: if the same Allergy is specified with eg. different severity or reaction, it'll be added to
// the list, as there is no way of deleting / amending existing pathwayAllergies.
func (g Generator) getDedupedAllergiesFromPathway(patientInfo *ir.PatientInfo, pathwayAllergies []pathway.Allergy) []*ir.Allergy {
var dedupedAllergies []*ir.Allergy
existing := make(map[ir.Allergy]bool)
for _, a := range patientInfo.Allergies {
existing[*a] = true
}
for _, a := range pathwayAllergies {
code, description := g.allergyGenerator.DeriveCodeAndDescription(a.Code, a.Description)
idt := g.allergyGenerator.DeriveIdentificationDateTime(a)
cs := g.allergyGenerator.DeriveCodingSystem(g.messageConfig.Allergy, a)
allergy := &ir.Allergy{
Type: a.Type,
Description: ir.CodedElement{
ID: code,
Text: description,
CodingSystem: cs,
},
Severity: a.Severity,
Reaction: a.Reaction,
IdentificationDateTime: idt,
}
if !existing[*allergy] {
existing[*allergy] = true
dedupedAllergies = append(dedupedAllergies, allergy)
}
}
return dedupedAllergies
}
func (g Generator) setDiagnoses(patientInfo *ir.PatientInfo, diagnoses []*pathway.DiagnosisOrProcedure) {
patientInfo.Diagnoses = make([]*ir.DiagnosisOrProcedure, len(diagnoses))
g.setDiagnosesOrProcedures(patientInfo.Diagnoses, diagnoses, g.diagnosisGenerator)
}
func (g Generator) setProcedures(patientInfo *ir.PatientInfo, procedures []*pathway.DiagnosisOrProcedure) {
patientInfo.Procedures = make([]*ir.DiagnosisOrProcedure, len(procedures))
g.setDiagnosesOrProcedures(patientInfo.Procedures, procedures, g.procedureGenerator)
}
func (g Generator) setDiagnosesOrProcedures(diagnosisOrProcedure []*ir.DiagnosisOrProcedure, fromPathway []*pathway.DiagnosisOrProcedure, dpg diagnosisOrProcedureGenerator) {
for i, p := range fromPathway {
diagnosisOrProcedure[i] = dpg.RandomOrFromPathway(p.DateTime, p)
// By design, diagnoses and procedures don't reuse the clinician from the pathway.
// Past diagnoses and procedures could have been done by other clinicians, not the current one,
// so we do not want to use the pathway's clinician.
diagnosisOrProcedure[i].Clinician = g.NewDoctor(nil)
}
}
// NewRegistrationPatientClassAndType returns a PatientClassAndType for a patient newly registered.
func (g Generator) NewRegistrationPatientClassAndType() *config.PatientClassAndType {
return g.patientClassGenerator.Random()
}
// NewOrder returns a new order based on order information from the pathway and eventTime.
func (g Generator) NewOrder(o *pathway.Order, eventTime time.Time) *ir.Order {
return g.orderGenerator.NewOrder(o, eventTime)
}
// OrderWithClinicalNote creates an order with a Clinical Note based on the pathway.
func (g Generator) OrderWithClinicalNote(ctx context.Context, o *ir.Order, n *pathway.ClinicalNote, eventTime time.Time) (*ir.Order, error) {
return g.orderGenerator.OrderWithClinicalNote(ctx, o, n, eventTime)
}
// SetResults sets results on an existing Order based on the results information from the pathway.
// If order is nil, this also creates an Order using details in pathway.Result.
// Returns an error of the retults cannot be created.
func (g Generator) SetResults(o *ir.Order, r *pathway.Results, eventTime time.Time) (*ir.Order, error) {
return g.orderGenerator.SetResults(o, r, eventTime)
}
// NewVisitID generates a new visit identifier.
func (g Generator) NewVisitID() uint64 {
return rand.Uint64()
}
// NewHeader returns a new header for the given step.
func (g *Generator) NewHeader(step *pathway.Step) *message.HeaderInfo {
return g.headerGenerator.NewHeader(step)
}
// NewDocument returns a NewDocument for MDM^T02 messages.
func (g Generator) NewDocument(eventTime time.Time, d *pathway.Document) *ir.Document {
return g.documentGenerator.Document(eventTime, d)
}
// UpdateDocumentContent updates the given document for MDM^T02 messages.
func (g Generator) UpdateDocumentContent(dm *ir.Document, dp *pathway.Document) error {
return g.documentGenerator.UpdateDocumentContent(dm, dp)
}
// Config contains the configuration for Generator.
type Config struct {
Clock clock.Clock
HL7Config *config.HL7Config
Header *config.Header
AddressGenerator person.AddressGenerator
MRNGenerator id.Generator
PlacerGenerator id.Generator
FillerGenerator id.Generator
textGenerator text.Generator
NotesGenerator order.NotesGenerator
DateGenerator codedelement.DateGenerator
Data *config.Data
Doctors *doctor.Doctors
MsgCtrlGenerator *header.MessageControlGenerator
OrderProfiles *orderprofile.OrderProfiles
}
// NewGenerator creates a new Generator.
func NewGenerator(cfg Config) *Generator {
ag := cfg.AddressGenerator
if ag == nil {
ag = &address.Generator{Nouns: cfg.Data.Nouns, Address: cfg.Data.Address}
}
mrnGenerator := cfg.MRNGenerator
if mrnGenerator == nil {
mrnGenerator = &randomIDGenerator{}
}
placerGenerator := cfg.PlacerGenerator
if placerGenerator == nil {
placerGenerator = &randomIDGenerator{}
}
fillerGenerator := cfg.FillerGenerator
if fillerGenerator == nil {
fillerGenerator = &randomIDGenerator{}
}
tg := cfg.textGenerator
if tg == nil {
tg = &text.NounGenerator{Nouns: cfg.Data.Nouns}
}
ng := cfg.NotesGenerator
if ng == nil {
ng = notes.NewGenerator(cfg.Data, tg)
}
dg := cfg.DateGenerator
if dg == nil {
dg = &codedelement.SimpleDateGenerator{}
}
personGenerator := &person.Generator{
Clock: cfg.Clock,
NameGenerator: &names.Generator{Data: cfg.Data},
GenderConvertor: gender.NewConvertor(cfg.HL7Config),
EthnicityGenerator: person.NewEthnicityGenerator(cfg.Data),
AddressGenerator: ag,
MRNGenerator: mrnGenerator,
Country: cfg.Data.Address.Country,
}
orderGenerator := &order.Generator{
MessageConfig: cfg.HL7Config,
OrderProfiles: cfg.OrderProfiles,
NoteGenerator: ng,
PlacerGenerator: placerGenerator,
FillerGenerator: fillerGenerator,
AbnormalFlagConvertor: order.NewAbnormalFlagConvertor(cfg.HL7Config),
Doctors: cfg.Doctors,
}
return &Generator{
personGenerator: personGenerator,
patientClassGenerator: newPatientClassAndTypeGenerator(cfg.Data),
messageConfig: cfg.HL7Config,
doctors: cfg.Doctors,
allergyGenerator: codedelement.NewAllergyGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
diagnosisGenerator: codedelement.NewDiagnosisGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
procedureGenerator: codedelement.NewProcedureGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
headerGenerator: &header.Generator{Header: cfg.Header, MsgCtrlGen: cfg.MsgCtrlGenerator},
orderGenerator: orderGenerator,
documentGenerator: &document.Generator{DocumentConfig: &cfg.HL7Config.Document, TextGenerator: tg},
}
}
| {
p.PatientInfo.PrimaryFacility = &ir.PrimaryFacility{
Organization: g.messageConfig.PrimaryFacility.OrganizationName,
ID: g.messageConfig.PrimaryFacility.IDNumber,
}
} | conditional_block |
generator.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package generator implements functionality to generate various patient related information, including, but not limited to:
// - person information, ie: name, surname, ethnicity, address, etc.,
// - patient type and class,
// - orders and test results,
// - allergies,
// - diagnosis,
// - procedures.
//
// The data is generated based on information provided in the pathway.
package generator
import (
"context"
"fmt"
"math/rand"
"time"
"github.com/google/simhospital/pkg/clock"
"github.com/google/simhospital/pkg/config"
"github.com/google/simhospital/pkg/doctor"
"github.com/google/simhospital/pkg/gender"
"github.com/google/simhospital/pkg/generator/address"
"github.com/google/simhospital/pkg/generator/codedelement"
"github.com/google/simhospital/pkg/generator/document"
"github.com/google/simhospital/pkg/generator/header"
"github.com/google/simhospital/pkg/generator/id"
"github.com/google/simhospital/pkg/generator/names"
"github.com/google/simhospital/pkg/generator/notes"
"github.com/google/simhospital/pkg/generator/order"
"github.com/google/simhospital/pkg/generator/person"
"github.com/google/simhospital/pkg/generator/text"
"github.com/google/simhospital/pkg/ir"
"github.com/google/simhospital/pkg/logging"
"github.com/google/simhospital/pkg/message"
"github.com/google/simhospital/pkg/orderprofile"
"github.com/google/simhospital/pkg/pathway"
"github.com/google/simhospital/pkg/state"
)
var log = logging.ForCallerPackage()
type randomIDGenerator struct{}
func (g *randomIDGenerator) NewID() string {
return fmt.Sprintf("%d", rand.Uint32())
}
// Generator implements functionality to generate various patient related information based on the information provided
// in the pathway.
type Generator struct {
personGenerator *person.Generator
patientClassGenerator patientClassGenerator
allergyGenerator *codedelement.AllergyGenerator
diagnosisGenerator diagnosisOrProcedureGenerator
procedureGenerator diagnosisOrProcedureGenerator
messageConfig *config.HL7Config
doctors *doctor.Doctors
headerGenerator *header.Generator
orderGenerator *order.Generator
documentGenerator *document.Generator
}
type diagnosisOrProcedureGenerator interface {
RandomOrFromPathway(*pathway.DateTime, *pathway.DiagnosisOrProcedure) *ir.DiagnosisOrProcedure
}
// NewPerson returns a new person based on pathway.Person.
func (g Generator) NewPerson(pathwayPerson *pathway.Person) *ir.Person {
return g.personGenerator.NewPerson(pathwayPerson)
}
// UpdateFromPathway updates PatientInfo with information from pathway.
// It Updates:
// - person information
// - diagnoses
// - procedures
// - allergies
func (g Generator) UpdateFromPathway(patientInfo *ir.PatientInfo, updatePerson *pathway.UpdatePerson) {
if updatePerson.Person != nil {
g.personGenerator.UpdatePersonFromPathway(patientInfo.Person, updatePerson.Person)
}
g.setDiagnoses(patientInfo, updatePerson.Diagnoses)
g.setProcedures(patientInfo, updatePerson.Procedures)
g.AddAllergies(patientInfo, updatePerson.Allergies)
}
// NewPatient returns a new patient based on Person information and a doctor provided.
func (g Generator) NewPatient(person *ir.Person, doctor *ir.Doctor) *state.Patient {
p := &state.Patient{
PatientInfo: &ir.PatientInfo{
Class: g.messageConfig.PatientClass.Outpatient,
Person: person,
// The Hospital Service might be overridden later with the doctor's specialty.
HospitalService: g.messageConfig.HospitalService,
AttendingDoctor: doctor,
},
// The code downstream assumes that Orders exists.
Orders: make(map[string]*ir.Order),
Documents: make(map[string]*ir.Document),
}
// If none of the g.messageConfig.PrimaryFacility fields is set, we want the resulting HL7 message to have the entire
// PD1.3 Patient Primary Facility field empty. This is achieved by leaving p.PatientInfo.PrimaryFacility nil.
if g.messageConfig.PrimaryFacility != nil {
p.PatientInfo.PrimaryFacility = &ir.PrimaryFacility{
Organization: g.messageConfig.PrimaryFacility.OrganizationName,
ID: g.messageConfig.PrimaryFacility.IDNumber,
}
}
if doctor != nil {
docWithSpecialty := g.doctors.GetByID(doctor.ID)
if docWithSpecialty != nil && docWithSpecialty.Specialty != "" {
p.PatientInfo.HospitalService = docWithSpecialty.Specialty
}
}
return p
}
// NewDoctor returns a new doctor based on the Consultant information from the pathway.
// If consultant is not specified, it returns a random doctor.
// Otherwise, it attempts to lookup an existic doctor basd on consultant ID. If any doctor is found, it returns it.
// Othwerise creates a new doctor from Consultant information, with the default speciality defined in
// messageConfig.HospitalService.
func (g Generator) NewDoctor(c *pathway.Consultant) *ir.Doctor {
if c == nil {
return g.doctors.GetRandomDoctor()
}
if doctor := g.doctors.GetByID(*c.ID); doctor != nil {
return doctor
}
newDoctor := &ir.Doctor{
// A valid pathway.Consultant has all the fields set, so we can just dereference.
ID: *c.ID,
Surname: *c.Surname,
Prefix: *c.Prefix,
FirstName: *c.FirstName,
Specialty: g.messageConfig.HospitalService,
}
g.doctors.Add(newDoctor)
return newDoctor
}
// ResetPatient returns a Patient based on the given Patient.
// Medical History (Orders, Encounters) and general information is kept, but other
// information is cleared as if the patient was a new patient.
func (g Generator) ResetPatient(p *state.Patient) *state.Patient {
newP := g.NewPatient(p.PatientInfo.Person, p.PatientInfo.AttendingDoctor)
newP.Orders = p.Orders
newP.PatientInfo.HospitalService = p.PatientInfo.HospitalService
newP.PatientInfo.Encounters = p.PatientInfo.Encounters
newP.PastVisits = p.PastVisits
newP.PatientInfo.PrimaryFacility = p.PatientInfo.PrimaryFacility
newP.PatientInfo.Allergies = p.PatientInfo.Allergies
return newP
}
// AddAllergies adds allergies specified in the pathway to the patientInfo:
// - If there are any allergies specified in the pathways, they are always added to existing allergies on the patientInfo.
// - If the allergies were not specified in the pathway (ie. allergies is nil) and the allergies on the patientInfo
// have not been initialised yet (ie are also nil), initialise them to an empty slice (to make sure we'll not make an
// attempt to generate them on the next ADT-like event, as that would increase the likelihood of the patient having
// allergies), and then generate them.
// - If the allergies from the pathway are explicitly set to empty slice, the allergies on the patient info are also set
// to empty slice.
func (g Generator) AddAllergies(patientInfo *ir.PatientInfo, allergies []pathway.Allergy) {
switch {
case len(allergies) > 0:
// If the pathway allergies are set, add them to the existing ones.
if patientInfo.Allergies == nil {
patientInfo.Allergies = []*ir.Allergy{}
}
patientInfo.Allergies = append(patientInfo.Allergies, g.getDedupedAllergiesFromPathway(patientInfo, allergies)...)
case allergies == nil && patientInfo.Allergies == nil:
// Initialise the allergies to an empty slice so that they're not nil anymore.
patientInfo.Allergies = []*ir.Allergy{}
patientInfo.Allergies = append(patientInfo.Allergies, g.allergyGenerator.GenerateRandomDistinctAllergies()...)
case allergies != nil && len(allergies) == 0:
// Allergies were set explicitly as an empty slice in the pathway.
patientInfo.Allergies = []*ir.Allergy{}
}
}
// getDedupedAllergiesFromPathway returns the list of allergies from the pathway after de-duplication:
// if the allergy is set twice in the pathway, it's added to the list only once. If the allergy
// already exists for the patient, it's not added to the list.
// Note: if the same Allergy is specified with eg. different severity or reaction, it'll be added to
// the list, as there is no way of deleting / amending existing pathwayAllergies.
func (g Generator) getDedupedAllergiesFromPathway(patientInfo *ir.PatientInfo, pathwayAllergies []pathway.Allergy) []*ir.Allergy {
var dedupedAllergies []*ir.Allergy
existing := make(map[ir.Allergy]bool)
for _, a := range patientInfo.Allergies {
existing[*a] = true
}
for _, a := range pathwayAllergies {
code, description := g.allergyGenerator.DeriveCodeAndDescription(a.Code, a.Description)
idt := g.allergyGenerator.DeriveIdentificationDateTime(a)
cs := g.allergyGenerator.DeriveCodingSystem(g.messageConfig.Allergy, a)
allergy := &ir.Allergy{
Type: a.Type,
Description: ir.CodedElement{
ID: code,
Text: description,
CodingSystem: cs,
},
Severity: a.Severity,
Reaction: a.Reaction,
IdentificationDateTime: idt,
}
if !existing[*allergy] {
existing[*allergy] = true
dedupedAllergies = append(dedupedAllergies, allergy)
}
}
return dedupedAllergies
}
func (g Generator) setDiagnoses(patientInfo *ir.PatientInfo, diagnoses []*pathway.DiagnosisOrProcedure) {
patientInfo.Diagnoses = make([]*ir.DiagnosisOrProcedure, len(diagnoses))
g.setDiagnosesOrProcedures(patientInfo.Diagnoses, diagnoses, g.diagnosisGenerator)
}
func (g Generator) setProcedures(patientInfo *ir.PatientInfo, procedures []*pathway.DiagnosisOrProcedure) {
patientInfo.Procedures = make([]*ir.DiagnosisOrProcedure, len(procedures))
g.setDiagnosesOrProcedures(patientInfo.Procedures, procedures, g.procedureGenerator)
}
func (g Generator) setDiagnosesOrProcedures(diagnosisOrProcedure []*ir.DiagnosisOrProcedure, fromPathway []*pathway.DiagnosisOrProcedure, dpg diagnosisOrProcedureGenerator) {
for i, p := range fromPathway {
diagnosisOrProcedure[i] = dpg.RandomOrFromPathway(p.DateTime, p)
// By design, diagnoses and procedures don't reuse the clinician from the pathway.
// Past diagnoses and procedures could have been done by other clinicians, not the current one,
// so we do not want to use the pathway's clinician.
diagnosisOrProcedure[i].Clinician = g.NewDoctor(nil)
}
}
// NewRegistrationPatientClassAndType returns a PatientClassAndType for a patient newly registered.
func (g Generator) NewRegistrationPatientClassAndType() *config.PatientClassAndType {
return g.patientClassGenerator.Random()
}
// NewOrder returns a new order based on order information from the pathway and eventTime.
func (g Generator) NewOrder(o *pathway.Order, eventTime time.Time) *ir.Order {
return g.orderGenerator.NewOrder(o, eventTime)
}
// OrderWithClinicalNote creates an order with a Clinical Note based on the pathway.
func (g Generator) OrderWithClinicalNote(ctx context.Context, o *ir.Order, n *pathway.ClinicalNote, eventTime time.Time) (*ir.Order, error) {
return g.orderGenerator.OrderWithClinicalNote(ctx, o, n, eventTime)
}
// SetResults sets results on an existing Order based on the results information from the pathway.
// If order is nil, this also creates an Order using details in pathway.Result.
// Returns an error of the retults cannot be created.
func (g Generator) SetResults(o *ir.Order, r *pathway.Results, eventTime time.Time) (*ir.Order, error) {
return g.orderGenerator.SetResults(o, r, eventTime)
}
// NewVisitID generates a new visit identifier.
func (g Generator) NewVisitID() uint64 {
return rand.Uint64()
}
// NewHeader returns a new header for the given step.
func (g *Generator) NewHeader(step *pathway.Step) *message.HeaderInfo {
return g.headerGenerator.NewHeader(step)
}
// NewDocument returns a NewDocument for MDM^T02 messages.
func (g Generator) NewDocument(eventTime time.Time, d *pathway.Document) *ir.Document {
return g.documentGenerator.Document(eventTime, d)
}
// UpdateDocumentContent updates the given document for MDM^T02 messages.
func (g Generator) UpdateDocumentContent(dm *ir.Document, dp *pathway.Document) error {
return g.documentGenerator.UpdateDocumentContent(dm, dp)
}
// Config contains the configuration for Generator.
type Config struct {
Clock clock.Clock
HL7Config *config.HL7Config
Header *config.Header
AddressGenerator person.AddressGenerator
MRNGenerator id.Generator
PlacerGenerator id.Generator
FillerGenerator id.Generator
textGenerator text.Generator
NotesGenerator order.NotesGenerator
DateGenerator codedelement.DateGenerator
Data *config.Data
Doctors *doctor.Doctors
MsgCtrlGenerator *header.MessageControlGenerator
OrderProfiles *orderprofile.OrderProfiles
}
// NewGenerator creates a new Generator.
func NewGenerator(cfg Config) *Generator | {
ag := cfg.AddressGenerator
if ag == nil {
ag = &address.Generator{Nouns: cfg.Data.Nouns, Address: cfg.Data.Address}
}
mrnGenerator := cfg.MRNGenerator
if mrnGenerator == nil {
mrnGenerator = &randomIDGenerator{}
}
placerGenerator := cfg.PlacerGenerator
if placerGenerator == nil {
placerGenerator = &randomIDGenerator{}
}
fillerGenerator := cfg.FillerGenerator
if fillerGenerator == nil {
fillerGenerator = &randomIDGenerator{}
}
tg := cfg.textGenerator
if tg == nil {
tg = &text.NounGenerator{Nouns: cfg.Data.Nouns}
}
ng := cfg.NotesGenerator
if ng == nil {
ng = notes.NewGenerator(cfg.Data, tg)
}
dg := cfg.DateGenerator
if dg == nil {
dg = &codedelement.SimpleDateGenerator{}
}
personGenerator := &person.Generator{
Clock: cfg.Clock,
NameGenerator: &names.Generator{Data: cfg.Data},
GenderConvertor: gender.NewConvertor(cfg.HL7Config),
EthnicityGenerator: person.NewEthnicityGenerator(cfg.Data),
AddressGenerator: ag,
MRNGenerator: mrnGenerator,
Country: cfg.Data.Address.Country,
}
orderGenerator := &order.Generator{
MessageConfig: cfg.HL7Config,
OrderProfiles: cfg.OrderProfiles,
NoteGenerator: ng,
PlacerGenerator: placerGenerator,
FillerGenerator: fillerGenerator,
AbnormalFlagConvertor: order.NewAbnormalFlagConvertor(cfg.HL7Config),
Doctors: cfg.Doctors,
}
return &Generator{
personGenerator: personGenerator,
patientClassGenerator: newPatientClassAndTypeGenerator(cfg.Data),
messageConfig: cfg.HL7Config,
doctors: cfg.Doctors,
allergyGenerator: codedelement.NewAllergyGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
diagnosisGenerator: codedelement.NewDiagnosisGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
procedureGenerator: codedelement.NewProcedureGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
headerGenerator: &header.Generator{Header: cfg.Header, MsgCtrlGen: cfg.MsgCtrlGenerator},
orderGenerator: orderGenerator,
documentGenerator: &document.Generator{DocumentConfig: &cfg.HL7Config.Document, TextGenerator: tg},
}
} | identifier_body | |
generator.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package generator implements functionality to generate various patient related information, including, but not limited to:
// - person information, ie: name, surname, ethnicity, address, etc.,
// - patient type and class,
// - orders and test results,
// - allergies,
// - diagnosis,
// - procedures.
//
// The data is generated based on information provided in the pathway.
package generator
import (
"context"
"fmt"
"math/rand"
"time"
"github.com/google/simhospital/pkg/clock"
"github.com/google/simhospital/pkg/config"
"github.com/google/simhospital/pkg/doctor"
"github.com/google/simhospital/pkg/gender"
"github.com/google/simhospital/pkg/generator/address"
"github.com/google/simhospital/pkg/generator/codedelement"
"github.com/google/simhospital/pkg/generator/document"
"github.com/google/simhospital/pkg/generator/header"
"github.com/google/simhospital/pkg/generator/id"
"github.com/google/simhospital/pkg/generator/names"
"github.com/google/simhospital/pkg/generator/notes"
"github.com/google/simhospital/pkg/generator/order"
"github.com/google/simhospital/pkg/generator/person"
"github.com/google/simhospital/pkg/generator/text"
"github.com/google/simhospital/pkg/ir"
"github.com/google/simhospital/pkg/logging"
"github.com/google/simhospital/pkg/message"
"github.com/google/simhospital/pkg/orderprofile"
"github.com/google/simhospital/pkg/pathway"
"github.com/google/simhospital/pkg/state"
)
var log = logging.ForCallerPackage()
type randomIDGenerator struct{}
func (g *randomIDGenerator) NewID() string {
return fmt.Sprintf("%d", rand.Uint32())
}
// Generator implements functionality to generate various patient related information based on the information provided
// in the pathway.
type Generator struct {
personGenerator *person.Generator
patientClassGenerator patientClassGenerator
allergyGenerator *codedelement.AllergyGenerator
diagnosisGenerator diagnosisOrProcedureGenerator
procedureGenerator diagnosisOrProcedureGenerator
messageConfig *config.HL7Config
doctors *doctor.Doctors
headerGenerator *header.Generator
orderGenerator *order.Generator
documentGenerator *document.Generator
}
type diagnosisOrProcedureGenerator interface {
RandomOrFromPathway(*pathway.DateTime, *pathway.DiagnosisOrProcedure) *ir.DiagnosisOrProcedure
}
// NewPerson returns a new person based on pathway.Person.
func (g Generator) NewPerson(pathwayPerson *pathway.Person) *ir.Person {
return g.personGenerator.NewPerson(pathwayPerson)
}
// UpdateFromPathway updates PatientInfo with information from pathway.
// It Updates:
// - person information
// - diagnoses
// - procedures
// - allergies
func (g Generator) UpdateFromPathway(patientInfo *ir.PatientInfo, updatePerson *pathway.UpdatePerson) {
if updatePerson.Person != nil {
g.personGenerator.UpdatePersonFromPathway(patientInfo.Person, updatePerson.Person)
}
g.setDiagnoses(patientInfo, updatePerson.Diagnoses)
g.setProcedures(patientInfo, updatePerson.Procedures)
g.AddAllergies(patientInfo, updatePerson.Allergies)
}
// NewPatient returns a new patient based on Person information and a doctor provided.
func (g Generator) NewPatient(person *ir.Person, doctor *ir.Doctor) *state.Patient {
p := &state.Patient{
PatientInfo: &ir.PatientInfo{
Class: g.messageConfig.PatientClass.Outpatient,
Person: person,
// The Hospital Service might be overridden later with the doctor's specialty.
HospitalService: g.messageConfig.HospitalService,
AttendingDoctor: doctor,
},
// The code downstream assumes that Orders exists.
Orders: make(map[string]*ir.Order),
Documents: make(map[string]*ir.Document),
}
// If none of the g.messageConfig.PrimaryFacility fields is set, we want the resulting HL7 message to have the entire | Organization: g.messageConfig.PrimaryFacility.OrganizationName,
ID: g.messageConfig.PrimaryFacility.IDNumber,
}
}
if doctor != nil {
docWithSpecialty := g.doctors.GetByID(doctor.ID)
if docWithSpecialty != nil && docWithSpecialty.Specialty != "" {
p.PatientInfo.HospitalService = docWithSpecialty.Specialty
}
}
return p
}
// NewDoctor returns a new doctor based on the Consultant information from the pathway.
// If consultant is not specified, it returns a random doctor.
// Otherwise, it attempts to lookup an existic doctor basd on consultant ID. If any doctor is found, it returns it.
// Othwerise creates a new doctor from Consultant information, with the default speciality defined in
// messageConfig.HospitalService.
func (g Generator) NewDoctor(c *pathway.Consultant) *ir.Doctor {
if c == nil {
return g.doctors.GetRandomDoctor()
}
if doctor := g.doctors.GetByID(*c.ID); doctor != nil {
return doctor
}
newDoctor := &ir.Doctor{
// A valid pathway.Consultant has all the fields set, so we can just dereference.
ID: *c.ID,
Surname: *c.Surname,
Prefix: *c.Prefix,
FirstName: *c.FirstName,
Specialty: g.messageConfig.HospitalService,
}
g.doctors.Add(newDoctor)
return newDoctor
}
// ResetPatient returns a Patient based on the given Patient.
// Medical History (Orders, Encounters) and general information is kept, but other
// information is cleared as if the patient was a new patient.
func (g Generator) ResetPatient(p *state.Patient) *state.Patient {
newP := g.NewPatient(p.PatientInfo.Person, p.PatientInfo.AttendingDoctor)
newP.Orders = p.Orders
newP.PatientInfo.HospitalService = p.PatientInfo.HospitalService
newP.PatientInfo.Encounters = p.PatientInfo.Encounters
newP.PastVisits = p.PastVisits
newP.PatientInfo.PrimaryFacility = p.PatientInfo.PrimaryFacility
newP.PatientInfo.Allergies = p.PatientInfo.Allergies
return newP
}
// AddAllergies adds allergies specified in the pathway to the patientInfo:
// - If there are any allergies specified in the pathways, they are always added to existing allergies on the patientInfo.
// - If the allergies were not specified in the pathway (ie. allergies is nil) and the allergies on the patientInfo
// have not been initialised yet (ie are also nil), initialise them to an empty slice (to make sure we'll not make an
// attempt to generate them on the next ADT-like event, as that would increase the likelihood of the patient having
// allergies), and then generate them.
// - If the allergies from the pathway are explicitly set to empty slice, the allergies on the patient info are also set
// to empty slice.
func (g Generator) AddAllergies(patientInfo *ir.PatientInfo, allergies []pathway.Allergy) {
switch {
case len(allergies) > 0:
// If the pathway allergies are set, add them to the existing ones.
if patientInfo.Allergies == nil {
patientInfo.Allergies = []*ir.Allergy{}
}
patientInfo.Allergies = append(patientInfo.Allergies, g.getDedupedAllergiesFromPathway(patientInfo, allergies)...)
case allergies == nil && patientInfo.Allergies == nil:
// Initialise the allergies to an empty slice so that they're not nil anymore.
patientInfo.Allergies = []*ir.Allergy{}
patientInfo.Allergies = append(patientInfo.Allergies, g.allergyGenerator.GenerateRandomDistinctAllergies()...)
case allergies != nil && len(allergies) == 0:
// Allergies were set explicitly as an empty slice in the pathway.
patientInfo.Allergies = []*ir.Allergy{}
}
}
// getDedupedAllergiesFromPathway returns the list of allergies from the pathway after de-duplication:
// if the allergy is set twice in the pathway, it's added to the list only once. If the allergy
// already exists for the patient, it's not added to the list.
// Note: if the same Allergy is specified with eg. different severity or reaction, it'll be added to
// the list, as there is no way of deleting / amending existing pathwayAllergies.
func (g Generator) getDedupedAllergiesFromPathway(patientInfo *ir.PatientInfo, pathwayAllergies []pathway.Allergy) []*ir.Allergy {
var dedupedAllergies []*ir.Allergy
existing := make(map[ir.Allergy]bool)
for _, a := range patientInfo.Allergies {
existing[*a] = true
}
for _, a := range pathwayAllergies {
code, description := g.allergyGenerator.DeriveCodeAndDescription(a.Code, a.Description)
idt := g.allergyGenerator.DeriveIdentificationDateTime(a)
cs := g.allergyGenerator.DeriveCodingSystem(g.messageConfig.Allergy, a)
allergy := &ir.Allergy{
Type: a.Type,
Description: ir.CodedElement{
ID: code,
Text: description,
CodingSystem: cs,
},
Severity: a.Severity,
Reaction: a.Reaction,
IdentificationDateTime: idt,
}
if !existing[*allergy] {
existing[*allergy] = true
dedupedAllergies = append(dedupedAllergies, allergy)
}
}
return dedupedAllergies
}
func (g Generator) setDiagnoses(patientInfo *ir.PatientInfo, diagnoses []*pathway.DiagnosisOrProcedure) {
patientInfo.Diagnoses = make([]*ir.DiagnosisOrProcedure, len(diagnoses))
g.setDiagnosesOrProcedures(patientInfo.Diagnoses, diagnoses, g.diagnosisGenerator)
}
func (g Generator) setProcedures(patientInfo *ir.PatientInfo, procedures []*pathway.DiagnosisOrProcedure) {
patientInfo.Procedures = make([]*ir.DiagnosisOrProcedure, len(procedures))
g.setDiagnosesOrProcedures(patientInfo.Procedures, procedures, g.procedureGenerator)
}
func (g Generator) setDiagnosesOrProcedures(diagnosisOrProcedure []*ir.DiagnosisOrProcedure, fromPathway []*pathway.DiagnosisOrProcedure, dpg diagnosisOrProcedureGenerator) {
for i, p := range fromPathway {
diagnosisOrProcedure[i] = dpg.RandomOrFromPathway(p.DateTime, p)
// By design, diagnoses and procedures don't reuse the clinician from the pathway.
// Past diagnoses and procedures could have been done by other clinicians, not the current one,
// so we do not want to use the pathway's clinician.
diagnosisOrProcedure[i].Clinician = g.NewDoctor(nil)
}
}
// NewRegistrationPatientClassAndType returns a PatientClassAndType for a patient newly registered.
func (g Generator) NewRegistrationPatientClassAndType() *config.PatientClassAndType {
return g.patientClassGenerator.Random()
}
// NewOrder returns a new order based on order information from the pathway and eventTime.
func (g Generator) NewOrder(o *pathway.Order, eventTime time.Time) *ir.Order {
return g.orderGenerator.NewOrder(o, eventTime)
}
// OrderWithClinicalNote creates an order with a Clinical Note based on the pathway.
func (g Generator) OrderWithClinicalNote(ctx context.Context, o *ir.Order, n *pathway.ClinicalNote, eventTime time.Time) (*ir.Order, error) {
return g.orderGenerator.OrderWithClinicalNote(ctx, o, n, eventTime)
}
// SetResults sets results on an existing Order based on the results information from the pathway.
// If order is nil, this also creates an Order using details in pathway.Result.
// Returns an error of the retults cannot be created.
func (g Generator) SetResults(o *ir.Order, r *pathway.Results, eventTime time.Time) (*ir.Order, error) {
return g.orderGenerator.SetResults(o, r, eventTime)
}
// NewVisitID generates a new visit identifier.
func (g Generator) NewVisitID() uint64 {
return rand.Uint64()
}
// NewHeader returns a new header for the given step.
func (g *Generator) NewHeader(step *pathway.Step) *message.HeaderInfo {
return g.headerGenerator.NewHeader(step)
}
// NewDocument returns a NewDocument for MDM^T02 messages.
func (g Generator) NewDocument(eventTime time.Time, d *pathway.Document) *ir.Document {
return g.documentGenerator.Document(eventTime, d)
}
// UpdateDocumentContent updates the given document for MDM^T02 messages.
func (g Generator) UpdateDocumentContent(dm *ir.Document, dp *pathway.Document) error {
return g.documentGenerator.UpdateDocumentContent(dm, dp)
}
// Config contains the configuration for Generator.
type Config struct {
Clock clock.Clock
HL7Config *config.HL7Config
Header *config.Header
AddressGenerator person.AddressGenerator
MRNGenerator id.Generator
PlacerGenerator id.Generator
FillerGenerator id.Generator
textGenerator text.Generator
NotesGenerator order.NotesGenerator
DateGenerator codedelement.DateGenerator
Data *config.Data
Doctors *doctor.Doctors
MsgCtrlGenerator *header.MessageControlGenerator
OrderProfiles *orderprofile.OrderProfiles
}
// NewGenerator creates a new Generator.
func NewGenerator(cfg Config) *Generator {
ag := cfg.AddressGenerator
if ag == nil {
ag = &address.Generator{Nouns: cfg.Data.Nouns, Address: cfg.Data.Address}
}
mrnGenerator := cfg.MRNGenerator
if mrnGenerator == nil {
mrnGenerator = &randomIDGenerator{}
}
placerGenerator := cfg.PlacerGenerator
if placerGenerator == nil {
placerGenerator = &randomIDGenerator{}
}
fillerGenerator := cfg.FillerGenerator
if fillerGenerator == nil {
fillerGenerator = &randomIDGenerator{}
}
tg := cfg.textGenerator
if tg == nil {
tg = &text.NounGenerator{Nouns: cfg.Data.Nouns}
}
ng := cfg.NotesGenerator
if ng == nil {
ng = notes.NewGenerator(cfg.Data, tg)
}
dg := cfg.DateGenerator
if dg == nil {
dg = &codedelement.SimpleDateGenerator{}
}
personGenerator := &person.Generator{
Clock: cfg.Clock,
NameGenerator: &names.Generator{Data: cfg.Data},
GenderConvertor: gender.NewConvertor(cfg.HL7Config),
EthnicityGenerator: person.NewEthnicityGenerator(cfg.Data),
AddressGenerator: ag,
MRNGenerator: mrnGenerator,
Country: cfg.Data.Address.Country,
}
orderGenerator := &order.Generator{
MessageConfig: cfg.HL7Config,
OrderProfiles: cfg.OrderProfiles,
NoteGenerator: ng,
PlacerGenerator: placerGenerator,
FillerGenerator: fillerGenerator,
AbnormalFlagConvertor: order.NewAbnormalFlagConvertor(cfg.HL7Config),
Doctors: cfg.Doctors,
}
return &Generator{
personGenerator: personGenerator,
patientClassGenerator: newPatientClassAndTypeGenerator(cfg.Data),
messageConfig: cfg.HL7Config,
doctors: cfg.Doctors,
allergyGenerator: codedelement.NewAllergyGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
diagnosisGenerator: codedelement.NewDiagnosisGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
procedureGenerator: codedelement.NewProcedureGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
headerGenerator: &header.Generator{Header: cfg.Header, MsgCtrlGen: cfg.MsgCtrlGenerator},
orderGenerator: orderGenerator,
documentGenerator: &document.Generator{DocumentConfig: &cfg.HL7Config.Document, TextGenerator: tg},
}
} | // PD1.3 Patient Primary Facility field empty. This is achieved by leaving p.PatientInfo.PrimaryFacility nil.
if g.messageConfig.PrimaryFacility != nil {
p.PatientInfo.PrimaryFacility = &ir.PrimaryFacility{ | random_line_split |
generator.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package generator implements functionality to generate various patient related information, including, but not limited to:
// - person information, ie: name, surname, ethnicity, address, etc.,
// - patient type and class,
// - orders and test results,
// - allergies,
// - diagnosis,
// - procedures.
//
// The data is generated based on information provided in the pathway.
package generator
import (
"context"
"fmt"
"math/rand"
"time"
"github.com/google/simhospital/pkg/clock"
"github.com/google/simhospital/pkg/config"
"github.com/google/simhospital/pkg/doctor"
"github.com/google/simhospital/pkg/gender"
"github.com/google/simhospital/pkg/generator/address"
"github.com/google/simhospital/pkg/generator/codedelement"
"github.com/google/simhospital/pkg/generator/document"
"github.com/google/simhospital/pkg/generator/header"
"github.com/google/simhospital/pkg/generator/id"
"github.com/google/simhospital/pkg/generator/names"
"github.com/google/simhospital/pkg/generator/notes"
"github.com/google/simhospital/pkg/generator/order"
"github.com/google/simhospital/pkg/generator/person"
"github.com/google/simhospital/pkg/generator/text"
"github.com/google/simhospital/pkg/ir"
"github.com/google/simhospital/pkg/logging"
"github.com/google/simhospital/pkg/message"
"github.com/google/simhospital/pkg/orderprofile"
"github.com/google/simhospital/pkg/pathway"
"github.com/google/simhospital/pkg/state"
)
var log = logging.ForCallerPackage()
type randomIDGenerator struct{}
func (g *randomIDGenerator) NewID() string {
return fmt.Sprintf("%d", rand.Uint32())
}
// Generator implements functionality to generate various patient related information based on the information provided
// in the pathway.
type Generator struct {
personGenerator *person.Generator
patientClassGenerator patientClassGenerator
allergyGenerator *codedelement.AllergyGenerator
diagnosisGenerator diagnosisOrProcedureGenerator
procedureGenerator diagnosisOrProcedureGenerator
messageConfig *config.HL7Config
doctors *doctor.Doctors
headerGenerator *header.Generator
orderGenerator *order.Generator
documentGenerator *document.Generator
}
type diagnosisOrProcedureGenerator interface {
RandomOrFromPathway(*pathway.DateTime, *pathway.DiagnosisOrProcedure) *ir.DiagnosisOrProcedure
}
// NewPerson returns a new person based on pathway.Person.
func (g Generator) NewPerson(pathwayPerson *pathway.Person) *ir.Person {
return g.personGenerator.NewPerson(pathwayPerson)
}
// UpdateFromPathway updates PatientInfo with information from pathway.
// It Updates:
// - person information
// - diagnoses
// - procedures
// - allergies
func (g Generator) UpdateFromPathway(patientInfo *ir.PatientInfo, updatePerson *pathway.UpdatePerson) {
if updatePerson.Person != nil {
g.personGenerator.UpdatePersonFromPathway(patientInfo.Person, updatePerson.Person)
}
g.setDiagnoses(patientInfo, updatePerson.Diagnoses)
g.setProcedures(patientInfo, updatePerson.Procedures)
g.AddAllergies(patientInfo, updatePerson.Allergies)
}
// NewPatient returns a new patient based on Person information and a doctor provided.
func (g Generator) NewPatient(person *ir.Person, doctor *ir.Doctor) *state.Patient {
p := &state.Patient{
PatientInfo: &ir.PatientInfo{
Class: g.messageConfig.PatientClass.Outpatient,
Person: person,
// The Hospital Service might be overridden later with the doctor's specialty.
HospitalService: g.messageConfig.HospitalService,
AttendingDoctor: doctor,
},
// The code downstream assumes that Orders exists.
Orders: make(map[string]*ir.Order),
Documents: make(map[string]*ir.Document),
}
// If none of the g.messageConfig.PrimaryFacility fields is set, we want the resulting HL7 message to have the entire
// PD1.3 Patient Primary Facility field empty. This is achieved by leaving p.PatientInfo.PrimaryFacility nil.
if g.messageConfig.PrimaryFacility != nil {
p.PatientInfo.PrimaryFacility = &ir.PrimaryFacility{
Organization: g.messageConfig.PrimaryFacility.OrganizationName,
ID: g.messageConfig.PrimaryFacility.IDNumber,
}
}
if doctor != nil {
docWithSpecialty := g.doctors.GetByID(doctor.ID)
if docWithSpecialty != nil && docWithSpecialty.Specialty != "" {
p.PatientInfo.HospitalService = docWithSpecialty.Specialty
}
}
return p
}
// NewDoctor returns a new doctor based on the Consultant information from the pathway.
// If consultant is not specified, it returns a random doctor.
// Otherwise, it attempts to lookup an existic doctor basd on consultant ID. If any doctor is found, it returns it.
// Othwerise creates a new doctor from Consultant information, with the default speciality defined in
// messageConfig.HospitalService.
func (g Generator) NewDoctor(c *pathway.Consultant) *ir.Doctor {
if c == nil {
return g.doctors.GetRandomDoctor()
}
if doctor := g.doctors.GetByID(*c.ID); doctor != nil {
return doctor
}
newDoctor := &ir.Doctor{
// A valid pathway.Consultant has all the fields set, so we can just dereference.
ID: *c.ID,
Surname: *c.Surname,
Prefix: *c.Prefix,
FirstName: *c.FirstName,
Specialty: g.messageConfig.HospitalService,
}
g.doctors.Add(newDoctor)
return newDoctor
}
// ResetPatient returns a Patient based on the given Patient.
// Medical History (Orders, Encounters) and general information is kept, but other
// information is cleared as if the patient was a new patient.
func (g Generator) ResetPatient(p *state.Patient) *state.Patient {
newP := g.NewPatient(p.PatientInfo.Person, p.PatientInfo.AttendingDoctor)
newP.Orders = p.Orders
newP.PatientInfo.HospitalService = p.PatientInfo.HospitalService
newP.PatientInfo.Encounters = p.PatientInfo.Encounters
newP.PastVisits = p.PastVisits
newP.PatientInfo.PrimaryFacility = p.PatientInfo.PrimaryFacility
newP.PatientInfo.Allergies = p.PatientInfo.Allergies
return newP
}
// AddAllergies adds allergies specified in the pathway to the patientInfo:
// - If there are any allergies specified in the pathways, they are always added to existing allergies on the patientInfo.
// - If the allergies were not specified in the pathway (ie. allergies is nil) and the allergies on the patientInfo
// have not been initialised yet (ie are also nil), initialise them to an empty slice (to make sure we'll not make an
// attempt to generate them on the next ADT-like event, as that would increase the likelihood of the patient having
// allergies), and then generate them.
// - If the allergies from the pathway are explicitly set to empty slice, the allergies on the patient info are also set
// to empty slice.
func (g Generator) AddAllergies(patientInfo *ir.PatientInfo, allergies []pathway.Allergy) {
switch {
case len(allergies) > 0:
// If the pathway allergies are set, add them to the existing ones.
if patientInfo.Allergies == nil {
patientInfo.Allergies = []*ir.Allergy{}
}
patientInfo.Allergies = append(patientInfo.Allergies, g.getDedupedAllergiesFromPathway(patientInfo, allergies)...)
case allergies == nil && patientInfo.Allergies == nil:
// Initialise the allergies to an empty slice so that they're not nil anymore.
patientInfo.Allergies = []*ir.Allergy{}
patientInfo.Allergies = append(patientInfo.Allergies, g.allergyGenerator.GenerateRandomDistinctAllergies()...)
case allergies != nil && len(allergies) == 0:
// Allergies were set explicitly as an empty slice in the pathway.
patientInfo.Allergies = []*ir.Allergy{}
}
}
// getDedupedAllergiesFromPathway returns the list of allergies from the pathway after de-duplication:
// if the allergy is set twice in the pathway, it's added to the list only once. If the allergy
// already exists for the patient, it's not added to the list.
// Note: if the same Allergy is specified with eg. different severity or reaction, it'll be added to
// the list, as there is no way of deleting / amending existing pathwayAllergies.
func (g Generator) getDedupedAllergiesFromPathway(patientInfo *ir.PatientInfo, pathwayAllergies []pathway.Allergy) []*ir.Allergy {
var dedupedAllergies []*ir.Allergy
existing := make(map[ir.Allergy]bool)
for _, a := range patientInfo.Allergies {
existing[*a] = true
}
for _, a := range pathwayAllergies {
code, description := g.allergyGenerator.DeriveCodeAndDescription(a.Code, a.Description)
idt := g.allergyGenerator.DeriveIdentificationDateTime(a)
cs := g.allergyGenerator.DeriveCodingSystem(g.messageConfig.Allergy, a)
allergy := &ir.Allergy{
Type: a.Type,
Description: ir.CodedElement{
ID: code,
Text: description,
CodingSystem: cs,
},
Severity: a.Severity,
Reaction: a.Reaction,
IdentificationDateTime: idt,
}
if !existing[*allergy] {
existing[*allergy] = true
dedupedAllergies = append(dedupedAllergies, allergy)
}
}
return dedupedAllergies
}
func (g Generator) setDiagnoses(patientInfo *ir.PatientInfo, diagnoses []*pathway.DiagnosisOrProcedure) {
patientInfo.Diagnoses = make([]*ir.DiagnosisOrProcedure, len(diagnoses))
g.setDiagnosesOrProcedures(patientInfo.Diagnoses, diagnoses, g.diagnosisGenerator)
}
func (g Generator) | (patientInfo *ir.PatientInfo, procedures []*pathway.DiagnosisOrProcedure) {
patientInfo.Procedures = make([]*ir.DiagnosisOrProcedure, len(procedures))
g.setDiagnosesOrProcedures(patientInfo.Procedures, procedures, g.procedureGenerator)
}
func (g Generator) setDiagnosesOrProcedures(diagnosisOrProcedure []*ir.DiagnosisOrProcedure, fromPathway []*pathway.DiagnosisOrProcedure, dpg diagnosisOrProcedureGenerator) {
for i, p := range fromPathway {
diagnosisOrProcedure[i] = dpg.RandomOrFromPathway(p.DateTime, p)
// By design, diagnoses and procedures don't reuse the clinician from the pathway.
// Past diagnoses and procedures could have been done by other clinicians, not the current one,
// so we do not want to use the pathway's clinician.
diagnosisOrProcedure[i].Clinician = g.NewDoctor(nil)
}
}
// NewRegistrationPatientClassAndType returns a PatientClassAndType for a patient newly registered.
func (g Generator) NewRegistrationPatientClassAndType() *config.PatientClassAndType {
return g.patientClassGenerator.Random()
}
// NewOrder returns a new order based on order information from the pathway and eventTime.
func (g Generator) NewOrder(o *pathway.Order, eventTime time.Time) *ir.Order {
return g.orderGenerator.NewOrder(o, eventTime)
}
// OrderWithClinicalNote creates an order with a Clinical Note based on the pathway.
func (g Generator) OrderWithClinicalNote(ctx context.Context, o *ir.Order, n *pathway.ClinicalNote, eventTime time.Time) (*ir.Order, error) {
return g.orderGenerator.OrderWithClinicalNote(ctx, o, n, eventTime)
}
// SetResults sets results on an existing Order based on the results information from the pathway.
// If order is nil, this also creates an Order using details in pathway.Result.
// Returns an error of the retults cannot be created.
func (g Generator) SetResults(o *ir.Order, r *pathway.Results, eventTime time.Time) (*ir.Order, error) {
return g.orderGenerator.SetResults(o, r, eventTime)
}
// NewVisitID generates a new visit identifier.
func (g Generator) NewVisitID() uint64 {
return rand.Uint64()
}
// NewHeader returns a new header for the given step.
func (g *Generator) NewHeader(step *pathway.Step) *message.HeaderInfo {
return g.headerGenerator.NewHeader(step)
}
// NewDocument returns a NewDocument for MDM^T02 messages.
func (g Generator) NewDocument(eventTime time.Time, d *pathway.Document) *ir.Document {
return g.documentGenerator.Document(eventTime, d)
}
// UpdateDocumentContent updates the given document for MDM^T02 messages.
func (g Generator) UpdateDocumentContent(dm *ir.Document, dp *pathway.Document) error {
return g.documentGenerator.UpdateDocumentContent(dm, dp)
}
// Config contains the configuration for Generator.
type Config struct {
Clock clock.Clock
HL7Config *config.HL7Config
Header *config.Header
AddressGenerator person.AddressGenerator
MRNGenerator id.Generator
PlacerGenerator id.Generator
FillerGenerator id.Generator
textGenerator text.Generator
NotesGenerator order.NotesGenerator
DateGenerator codedelement.DateGenerator
Data *config.Data
Doctors *doctor.Doctors
MsgCtrlGenerator *header.MessageControlGenerator
OrderProfiles *orderprofile.OrderProfiles
}
// NewGenerator creates a new Generator.
func NewGenerator(cfg Config) *Generator {
ag := cfg.AddressGenerator
if ag == nil {
ag = &address.Generator{Nouns: cfg.Data.Nouns, Address: cfg.Data.Address}
}
mrnGenerator := cfg.MRNGenerator
if mrnGenerator == nil {
mrnGenerator = &randomIDGenerator{}
}
placerGenerator := cfg.PlacerGenerator
if placerGenerator == nil {
placerGenerator = &randomIDGenerator{}
}
fillerGenerator := cfg.FillerGenerator
if fillerGenerator == nil {
fillerGenerator = &randomIDGenerator{}
}
tg := cfg.textGenerator
if tg == nil {
tg = &text.NounGenerator{Nouns: cfg.Data.Nouns}
}
ng := cfg.NotesGenerator
if ng == nil {
ng = notes.NewGenerator(cfg.Data, tg)
}
dg := cfg.DateGenerator
if dg == nil {
dg = &codedelement.SimpleDateGenerator{}
}
personGenerator := &person.Generator{
Clock: cfg.Clock,
NameGenerator: &names.Generator{Data: cfg.Data},
GenderConvertor: gender.NewConvertor(cfg.HL7Config),
EthnicityGenerator: person.NewEthnicityGenerator(cfg.Data),
AddressGenerator: ag,
MRNGenerator: mrnGenerator,
Country: cfg.Data.Address.Country,
}
orderGenerator := &order.Generator{
MessageConfig: cfg.HL7Config,
OrderProfiles: cfg.OrderProfiles,
NoteGenerator: ng,
PlacerGenerator: placerGenerator,
FillerGenerator: fillerGenerator,
AbnormalFlagConvertor: order.NewAbnormalFlagConvertor(cfg.HL7Config),
Doctors: cfg.Doctors,
}
return &Generator{
personGenerator: personGenerator,
patientClassGenerator: newPatientClassAndTypeGenerator(cfg.Data),
messageConfig: cfg.HL7Config,
doctors: cfg.Doctors,
allergyGenerator: codedelement.NewAllergyGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
diagnosisGenerator: codedelement.NewDiagnosisGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
procedureGenerator: codedelement.NewProcedureGenerator(cfg.HL7Config, cfg.Data, cfg.Clock, dg),
headerGenerator: &header.Generator{Header: cfg.Header, MsgCtrlGen: cfg.MsgCtrlGenerator},
orderGenerator: orderGenerator,
documentGenerator: &document.Generator{DocumentConfig: &cfg.HL7Config.Document, TextGenerator: tg},
}
}
| setProcedures | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.