file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.py | float(args[10])
dGt3 = float(args[11])
out_file = args[12]
all_file = args[13]
all_kept_mutants = []
all_mutants_tried = []
output_dict = {}
count = 0
initialize_output_files(out_file, all_file)
if available_mutations == 'list':
remaining_mutations = [mut.strip() for mut in list(open('mutations.txt', 'r').readlines())]
else:
remaining_mutations = ['unused', 'unused']
pyros.runFoldxRepair(prefix)
score_ob = pyros.Scores()
score_ob.cleanUp([])
repair_file = glob.glob(prefix + '.clean.pdb')
if len(repair_file) == 1:
shutil.move(repair_file[0], prefix + '.pdb')
else:
raise Exception('No output from RepairPDB.')
pyros.makeInitRepair(prefix)
i = 0
while i < num_mutations and len(remaining_mutations) > 0:
#Make sure the pdb exists
prefix, count, all_kept_mutants, all_mutants_tried, exists = does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried)
if not exists:
continue
if available_mutations == 'random':
(mutation_code, site) = generate_mutation_code(prefix, which_chain)
elif available_mutations == 'list':
(mutation_code, site) = pick_mutation_code_from_list(remaining_mutations)
remaining_mutations.remove(mutation_code)
pyros.runFoldxSimpleMutator(mutation_code, prefix + '.pdb')
(new_mutant_name, old_mutant_name) = recode_mutant_pdb(mutation_code, site, prefix)
print("mut anme",new_mutant_name[0:-4])
print("mut name 2",[new_mutant_name])
pyros.runFoldxRepair(new_mutant_name[0:-4])
repair_file = glob.glob(new_mutant_name[0:-4] + '*.clean.pdb')
shutil.move(repair_file[0], new_mutant_name)
print("runing folx analyze complex")
pyros.runFoldxAnalyzeComplex(new_mutant_name[0:-4] + '_complex', [old_mutant_name, new_mutant_name])
proceed = pyros.checkOutputAnalyzeComplex(new_mutant_name[0:-4])
#See if we got the files we needed from Analyze Complex
if not proceed:
score_ob = pyros.Scores()
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*', '*energies*'])
remaining_mutations.append(mutation_code)
continue
#Declare the score parsing object
score_ob = pyros.Scores()
score_ob.parseAnalyzeComplex()
#Grab the scores to be used in the probability calculations
ids = score_ob.getIds()
stab1 = [score_ob.getStability1()[0], score_ob.getStability2()[0]]
stab2 = [score_ob.getStability1()[1], score_ob.getStability2()[1]]
binding = score_ob.getInteractionEnergies()
thresholds = [dGt1, dGt2, dGt3]
#To this function you need 6 variables: stab1, stab2, binding, N, beta, and threshold
probability = calc_prob(stab1, stab2, binding, population_size, beta, thresholds)
all_mutants_tried.append(new_mutant_name[0:-4])
count += 1
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(all_file, to_file)
if random.random() < probability or selection == 'no_selection':
print('\n\nPassing to the next round...\n')
score_ob.cleanUp(['*energies*', 'WT_*'])
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(out_file, to_file)
shutil.move(new_mutant_name, str(count) + '.pdb')
shutil.move(old_mutant_name, str(count) + '.wt.pdb')
prefix = str(count)
all_kept_mutants.append(new_mutant_name[0:-4])
i+=1
elif available_mutations == 'list':
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
remaining_mutations.append(mutation_code)
if tried_or_fixed == 'tried':
i+=1
else:
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
if tried_or_fixed == 'tried':
i+=1
score_ob.cleanUp(['*energies*'])
def | (out_file, line):
output = open(out_file, 'a')
output.write(line)
output.close()
def does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried):
file_exists = True
if not os.path.isfile(prefix + '.pdb') and i > 0:
all_kept_mutants = all_kept_mutants[0:-1]
prefix = all_kept_mutants[-1]
all_mutants_tried = all_mutants_tried[0:-1]
count -= 1
file_exists = False
return(prefix, count, all_kept_mutants, all_mutants_tried, file_exists)
def initialize_output_files(out_file, all_file):
output = open(out_file, 'w')
to_file = 'file\tmutant\tcount\tstability1\tstability2\tbinding\tprobability\n'
output.write(to_file)
output.close()
output = open(all_file, 'w')
output.write(to_file)
output.close()
def get_pdb_sequence(prefix):
start_name = prefix + '.pdb'
total_length = 0
total_sequence = ''
count = 0
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', start_name)
ppb = PDB.PPBuilder()
for pp in ppb.build_peptides(structure):
total_length += len(pp.get_sequence())
total_sequence += pp.get_sequence()
if count == 0:
first_chain_length = total_length
count += 1
return(total_sequence, total_length, first_chain_length, structure)
def generate_mutation_code(prefix, which_chain):
total_sequence, total_length, first_chain_length, structure = get_pdb_sequence(prefix)
chain = 0
chain_letters = ''
residue_numbers = []
#run with python /home/ateufel/Rosetta/rosetta_bin_linux_2015.39.58186_bundle/tools/renumber_pdb.py -p 4foe.pdb -b 900 -o 4foe_renumb.pdb
print(total_length - 1)
if which_chain == 'both':
site = random.randint(0, total_length - 1)
elif which_chain == '0':
site = random.randint(0, first_chain_length)
elif which_chain == '1':
site = random.randint(first_chain_length, total_length - 1)
if site > first_chain_length - 1:
chain = 1
for chains in structure.get_chains():
chain_letters += chains.get_id()
for chains in structure.get_residues():
residue_numbers.append(str(chains.get_id()[1]))
mutation = total_sequence[site]
while( mutation == total_sequence[site] ):
mutation = random.choice(pyros.rev_resdict.keys())
mutation_code = total_sequence[site] + chain_letters[chain] + residue_numbers[site] + mutation
#mutation_code = residue_numbers[site] + mutation
return(mutation_code, residue_numbers[site])
def pick_mutation_code_from_list(remaining_mutations):
mutation_code = random.choice(remaining_mutations)
return(mutation_code, mutation_code[2:-1])
def calc_prob(stab1, stab2, binding, N, beta, thresholds):
'''In order to use this function, you need to provide a number of parameters.
The stab1, stab2, and binding data should be coming from the foldx values
and they need to be ABSOLUTE energy not differences. The N, beta and
threshold numbers need to specified for the theoretical population size,
the beta distribution constant, and the soft threshold for survival of
each protein in the complex.
| write_line | identifier_name |
main.py | float(args[10])
dGt3 = float(args[11])
out_file = args[12]
all_file = args[13]
all_kept_mutants = []
all_mutants_tried = []
output_dict = {}
count = 0
initialize_output_files(out_file, all_file)
if available_mutations == 'list':
remaining_mutations = [mut.strip() for mut in list(open('mutations.txt', 'r').readlines())]
else:
remaining_mutations = ['unused', 'unused']
pyros.runFoldxRepair(prefix)
score_ob = pyros.Scores()
score_ob.cleanUp([])
repair_file = glob.glob(prefix + '.clean.pdb')
if len(repair_file) == 1:
shutil.move(repair_file[0], prefix + '.pdb')
else:
raise Exception('No output from RepairPDB.')
pyros.makeInitRepair(prefix)
i = 0
while i < num_mutations and len(remaining_mutations) > 0:
#Make sure the pdb exists
prefix, count, all_kept_mutants, all_mutants_tried, exists = does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried)
if not exists:
continue
if available_mutations == 'random':
(mutation_code, site) = generate_mutation_code(prefix, which_chain)
elif available_mutations == 'list':
(mutation_code, site) = pick_mutation_code_from_list(remaining_mutations)
remaining_mutations.remove(mutation_code)
pyros.runFoldxSimpleMutator(mutation_code, prefix + '.pdb')
(new_mutant_name, old_mutant_name) = recode_mutant_pdb(mutation_code, site, prefix)
print("mut anme",new_mutant_name[0:-4])
print("mut name 2",[new_mutant_name])
pyros.runFoldxRepair(new_mutant_name[0:-4])
repair_file = glob.glob(new_mutant_name[0:-4] + '*.clean.pdb')
shutil.move(repair_file[0], new_mutant_name)
print("runing folx analyze complex")
pyros.runFoldxAnalyzeComplex(new_mutant_name[0:-4] + '_complex', [old_mutant_name, new_mutant_name])
proceed = pyros.checkOutputAnalyzeComplex(new_mutant_name[0:-4])
#See if we got the files we needed from Analyze Complex
if not proceed:
score_ob = pyros.Scores() | continue
#Declare the score parsing object
score_ob = pyros.Scores()
score_ob.parseAnalyzeComplex()
#Grab the scores to be used in the probability calculations
ids = score_ob.getIds()
stab1 = [score_ob.getStability1()[0], score_ob.getStability2()[0]]
stab2 = [score_ob.getStability1()[1], score_ob.getStability2()[1]]
binding = score_ob.getInteractionEnergies()
thresholds = [dGt1, dGt2, dGt3]
#To this function you need 6 variables: stab1, stab2, binding, N, beta, and threshold
probability = calc_prob(stab1, stab2, binding, population_size, beta, thresholds)
all_mutants_tried.append(new_mutant_name[0:-4])
count += 1
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(all_file, to_file)
if random.random() < probability or selection == 'no_selection':
print('\n\nPassing to the next round...\n')
score_ob.cleanUp(['*energies*', 'WT_*'])
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(out_file, to_file)
shutil.move(new_mutant_name, str(count) + '.pdb')
shutil.move(old_mutant_name, str(count) + '.wt.pdb')
prefix = str(count)
all_kept_mutants.append(new_mutant_name[0:-4])
i+=1
elif available_mutations == 'list':
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
remaining_mutations.append(mutation_code)
if tried_or_fixed == 'tried':
i+=1
else:
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
if tried_or_fixed == 'tried':
i+=1
score_ob.cleanUp(['*energies*'])
def write_line(out_file, line):
output = open(out_file, 'a')
output.write(line)
output.close()
def does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried):
file_exists = True
if not os.path.isfile(prefix + '.pdb') and i > 0:
all_kept_mutants = all_kept_mutants[0:-1]
prefix = all_kept_mutants[-1]
all_mutants_tried = all_mutants_tried[0:-1]
count -= 1
file_exists = False
return(prefix, count, all_kept_mutants, all_mutants_tried, file_exists)
def initialize_output_files(out_file, all_file):
output = open(out_file, 'w')
to_file = 'file\tmutant\tcount\tstability1\tstability2\tbinding\tprobability\n'
output.write(to_file)
output.close()
output = open(all_file, 'w')
output.write(to_file)
output.close()
def get_pdb_sequence(prefix):
start_name = prefix + '.pdb'
total_length = 0
total_sequence = ''
count = 0
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', start_name)
ppb = PDB.PPBuilder()
for pp in ppb.build_peptides(structure):
total_length += len(pp.get_sequence())
total_sequence += pp.get_sequence()
if count == 0:
first_chain_length = total_length
count += 1
return(total_sequence, total_length, first_chain_length, structure)
def generate_mutation_code(prefix, which_chain):
total_sequence, total_length, first_chain_length, structure = get_pdb_sequence(prefix)
chain = 0
chain_letters = ''
residue_numbers = []
#run with python /home/ateufel/Rosetta/rosetta_bin_linux_2015.39.58186_bundle/tools/renumber_pdb.py -p 4foe.pdb -b 900 -o 4foe_renumb.pdb
print(total_length - 1)
if which_chain == 'both':
site = random.randint(0, total_length - 1)
elif which_chain == '0':
site = random.randint(0, first_chain_length)
elif which_chain == '1':
site = random.randint(first_chain_length, total_length - 1)
if site > first_chain_length - 1:
chain = 1
for chains in structure.get_chains():
chain_letters += chains.get_id()
for chains in structure.get_residues():
residue_numbers.append(str(chains.get_id()[1]))
mutation = total_sequence[site]
while( mutation == total_sequence[site] ):
mutation = random.choice(pyros.rev_resdict.keys())
mutation_code = total_sequence[site] + chain_letters[chain] + residue_numbers[site] + mutation
#mutation_code = residue_numbers[site] + mutation
return(mutation_code, residue_numbers[site])
def pick_mutation_code_from_list(remaining_mutations):
mutation_code = random.choice(remaining_mutations)
return(mutation_code, mutation_code[2:-1])
def calc_prob(stab1, stab2, binding, N, beta, thresholds):
'''In order to use this function, you need to provide a number of parameters.
The stab1, stab2, and binding data should be coming from the foldx values
and they need to be ABSOLUTE energy not differences. The N, beta and
threshold numbers need to specified for the theoretical population size,
the beta distribution constant, and the soft threshold for survival of
each protein in the complex.
At | score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*', '*energies*'])
remaining_mutations.append(mutation_code) | random_line_split |
main.py | ant_name[0:-4])
count += 1
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(all_file, to_file)
if random.random() < probability or selection == 'no_selection':
print('\n\nPassing to the next round...\n')
score_ob.cleanUp(['*energies*', 'WT_*'])
to_file = str(count) + '.pdb' + '\t' + str(ids[1][0:-4]) + '\t' + str(count) + '\t' + str(stab1[1]) + '\t' + str(stab2[1]) + '\t' + str(binding[1]) + '\t' + str(probability) + '\n'
write_line(out_file, to_file)
shutil.move(new_mutant_name, str(count) + '.pdb')
shutil.move(old_mutant_name, str(count) + '.wt.pdb')
prefix = str(count)
all_kept_mutants.append(new_mutant_name[0:-4])
i+=1
elif available_mutations == 'list':
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
remaining_mutations.append(mutation_code)
if tried_or_fixed == 'tried':
i+=1
else:
print('\n\nMutation is being reverted...\n')
score_ob.cleanUp(['*' + new_mutant_name[0:-4] + '*'])
if tried_or_fixed == 'tried':
i+=1
score_ob.cleanUp(['*energies*'])
def write_line(out_file, line):
output = open(out_file, 'a')
output.write(line)
output.close()
def does_file_exist(prefix, i, count, all_kept_mutants, all_mutants_tried):
file_exists = True
if not os.path.isfile(prefix + '.pdb') and i > 0:
all_kept_mutants = all_kept_mutants[0:-1]
prefix = all_kept_mutants[-1]
all_mutants_tried = all_mutants_tried[0:-1]
count -= 1
file_exists = False
return(prefix, count, all_kept_mutants, all_mutants_tried, file_exists)
def initialize_output_files(out_file, all_file):
output = open(out_file, 'w')
to_file = 'file\tmutant\tcount\tstability1\tstability2\tbinding\tprobability\n'
output.write(to_file)
output.close()
output = open(all_file, 'w')
output.write(to_file)
output.close()
def get_pdb_sequence(prefix):
start_name = prefix + '.pdb'
total_length = 0
total_sequence = ''
count = 0
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', start_name)
ppb = PDB.PPBuilder()
for pp in ppb.build_peptides(structure):
total_length += len(pp.get_sequence())
total_sequence += pp.get_sequence()
if count == 0:
first_chain_length = total_length
count += 1
return(total_sequence, total_length, first_chain_length, structure)
def generate_mutation_code(prefix, which_chain):
total_sequence, total_length, first_chain_length, structure = get_pdb_sequence(prefix)
chain = 0
chain_letters = ''
residue_numbers = []
#run with python /home/ateufel/Rosetta/rosetta_bin_linux_2015.39.58186_bundle/tools/renumber_pdb.py -p 4foe.pdb -b 900 -o 4foe_renumb.pdb
print(total_length - 1)
if which_chain == 'both':
site = random.randint(0, total_length - 1)
elif which_chain == '0':
site = random.randint(0, first_chain_length)
elif which_chain == '1':
site = random.randint(first_chain_length, total_length - 1)
if site > first_chain_length - 1:
chain = 1
for chains in structure.get_chains():
chain_letters += chains.get_id()
for chains in structure.get_residues():
residue_numbers.append(str(chains.get_id()[1]))
mutation = total_sequence[site]
while( mutation == total_sequence[site] ):
mutation = random.choice(pyros.rev_resdict.keys())
mutation_code = total_sequence[site] + chain_letters[chain] + residue_numbers[site] + mutation
#mutation_code = residue_numbers[site] + mutation
return(mutation_code, residue_numbers[site])
def pick_mutation_code_from_list(remaining_mutations):
mutation_code = random.choice(remaining_mutations)
return(mutation_code, mutation_code[2:-1])
def calc_prob(stab1, stab2, binding, N, beta, thresholds):
'''In order to use this function, you need to provide a number of parameters.
The stab1, stab2, and binding data should be coming from the foldx values
and they need to be ABSOLUTE energy not differences. The N, beta and
threshold numbers need to specified for the theoretical population size,
the beta distribution constant, and the soft threshold for survival of
each protein in the complex.
At this point, the function cannot be used if binding on both chains is
not desired.'''
mutant = [stab1[1], stab2[1], binding[1]]
origin = [stab1[0], stab2[0], binding[0]]
xi = calc_x(origin, beta, thresholds)
xj = calc_x(mutant, beta, thresholds)
print("mut:",mutant)
print("orgin:",origin)
print("xi:", xi)
print("xj:",xj)
if xj > xi:
return((1.0))
else:
#Need to make sure you check numbers that are too big for the math library
exponent = -2 * float(N) * (xi - xj)
return(safe_calc(exponent))
def calc_x(data, beta, thresholds):
total = 0
print("comparing:", len(data))
for i in range(0, len(data)):
print("data is:", data[i])
#Need to make sure you check numbers that are too big for the math library
exponent = float(beta) * (float(data[i]) - float(thresholds[i]))
print("beta is:", float(beta))
print("threshold is:", float(thresholds[i]))
print(exponent)
print("log exp,",-math.log(safe_calc(exponent) + 1))
total += -math.log(safe_calc(exponent) + 1)
print("score:",total)
return(total)
def safe_calc(exponent):
if exponent > 700:
print("system maxed")
return(sys.float_info.max)
else:
return(math.exp(exponent))
def recode_mutant_pdb(mutation_code, site, prefix):
recoded_mutant = mutation_code[0] + site + mutation_code[-1]
print("what its looking for", recoded_mutant)
new_test = recoded_mutant + '.pdb'
old_test = recoded_mutant + '.wt.pdb'
existing = glob.glob(recoded_mutant)
if len(existing)/2 > 0:
shutil.move(new_test, new_mutant_name[0:-4] + '_' + str(len(existing)/2) + '.pdb')
shutil.move(old_test, new_mutant_name[0:-4] + '_' + str(len(existing)/2) + '.wt.pdb')
print(prefix)
print("moving to new_test", str(pyros.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb'))
shutil.copy(prefix + '.pdb', recoded_mutant + '.wt.pdb')
print(pyros.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb')
shutil.move(foldx.rev_resdict[mutation_code[-1]] + site + '_' + prefix + '.pdb', new_test)
#Remove the unused file that is output from position scan
old_files = glob.glob('*_' + prefix + '.pdb')
for a_file in old_files:
os.remove(a_file)
return(new_test, old_test)
def capture_mutant_pdb(out_name, mutant, chain_letter):
parser = PDB.PDBParser()
structure = parser.get_structure('working_pdb', mutant)
writer = PDB.PDBIO()
writer.set_structure(structure)
writer.save(out_name, select=SelectChains(chain_letter))
class SelectChains(PDB.Select):
""" Only accept the specified chains when saving. """
def __init__(self, chain_letters):
self.chain_letters = chain_letters
def accept_chain(self, chain):
| return (chain.get_id() in self.chain_letters) | identifier_body | |
Tetris.go | }, {0, -2}, {1, -2}} //2->L
case4 = [][]int{{-1, 0}, {-1, -1}, {0, 2}, {-1, 2}} //L->0
//for I Tetromino's case (clockwise direction)
case1_i = [][]int{{-2, 0}, {1, 0}, {-2, -1}, {1, 2}} //0->R
case2_i = [][]int{{-1, 0}, {2, 0}, {-1, 2}, {2, -1}} //R->2
case3_i = [][]int{{2, 0}, {-1, 0}, {2, 1}, {-1, -2}} //2->L
case4_i = [][]int{{1, 0}, {-2, 0}, {1, -2}, {-2, 1}} //L->0
)
//
type Block struct {
x int
y int
shape [][]int
shapeType int //store following index of blockShape (range [0,6] means shapeO->shapeT above). For rotation supporting
rotateType int //store 1 number in range [0,3]. For rotation supporting
}
//
func (block *Block) reInit() {
randomNum := rand.Intn(7-1) + 1
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
block.shape[i][j] = randomNum
}
}
}
}
//
func (block *Block) transpose() {
for i := range block.shape {
for j := 0 + i; j < len(block.shape[0]); j++ {
//swap
block.shape[j][i], block.shape[i][j] = block.shape[i][j], block.shape[j][i]
}
}
}
//
func (block *Block) reverseColumn() {
temp := 0
pivot := int(len(block.shape[0]) / 2)
//swap column i with column len(block.shape)-1-i
for i := range block.shape {
for j := 0; j < pivot; j++ {
temp = block.shape[i][j]
block.shape[i][j] = block.shape[i][len(block.shape[0])-1-j]
block.shape[i][len(block.shape[0])-1-j] = temp
}
}
}
//
func (block *Block) rotate(land [][]int) {
previousShape := makeCopy(block.shape) //store previouse shape after rotated shape
block.transpose()
block.reverseColumn()
tempX := block.x
tempY := block.y
flag := false
//
if checkCollision(land, *block) {
//following my code, index 5 is I Tetromino, which has its own wall kick case
if block.shapeType != 5 {
if block.rotateType == 0 { //0->R
for i := range case1 {
block.x += case1[i][0]
block.y += case1[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2 {
block.x += case2[i][0]
block.y += case2[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 { //2->L
for i := range case3 {
block.x += case3[i][0]
block.y += case3[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4 {
block.x += case4[i][0]
block.y += case4[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
} else {
if block.rotateType == 0 { //0->R
for i := range case1_i {
block.x += case1_i[i][0]
block.y += case1_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2_i {
block.x += case2_i[i][0]
block.y += case2_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 {
for i := range case3_i {
block.x += case3_i[i][0]
block.y += case3_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4_i {
block.x += case4_i[i][0]
block.y += case4_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
}
} else {
flag = true
}
//if rotation was failed
if flag == false {
block.shape = previousShape
return
}
//
if block.rotateType == 3 {
block.rotateType = 0
} else {
block.rotateType += 1
}
}
//
func (block *Block) moveDown(land [][]int) {
block.y++
if checkCollision(land, *block) {
block.y--
}
}
//
func (block *Block) moveLeft(land [][]int) {
block.x--
if checkCollision(land, *block) {
block.x++
}
}
//
func (block *Block) moveRight(land [][]int) {
block.x++
if checkCollision(land, *block) {
block.x--
}
}
//
func (block *Block) hardDrop(land [][]int) {
for !checkCollision(land, *block) {
block.y++
}
block.y--
}
//
func makeCopy(arr [][]int) [][]int {
newArray := make([][]int, len(arr))
for i := range arr {
newArray[i] = make([]int, len(arr[0]))
for j := range arr[0] {
newArray[i][j] = arr[i][j]
}
}
return newArray
}
//
func checkCollision(land [][]int, block Block) bool {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.x+j < 0 || block.x+j > colSize-1 || block.y+i > rowSize-1 {
return true
} else if block.x+j >= 0 && block.x+j <= colSize-1 && block.y+i <= rowSize-1 && block.y+i >= 0 {
if land[block.y+i][block.x+j] != 0 {
return true
}
}
}
}
}
return false
}
//
func | (board [][]int, Shape [][][]int) Block {
randomNum := rand.Intn(len(Shape) - 1)
randomShape := makeCopy(Shape[randomNum])
coordinateX := int(len(board[0])/2) - len(randomShape[0]) + 1
coordinateY := -2
block := Block{coordinateX, coordinateY, randomShape, randomNum, 0}
block.reInit()
return block
}
//
func initBoard() [][]int {
board := make([][]int, rowSize)
for i := range board {
board[i] = make([]int, colSize)
}
return board
}
//
func mergeBlock(board [][]int, block Block) [][]int {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.y+i >= 0 && block.y+i <= rowSize-1 && block.x+j >= 0 && block.x+j <= colSize-1 {
board[block.y+i][block.x+j] = block.shape | randomBlock | identifier_name |
Tetris.go | 1}, {0, -2}, {1, -2}} //2->L
case4 = [][]int{{-1, 0}, {-1, -1}, {0, 2}, {-1, 2}} //L->0
//for I Tetromino's case (clockwise direction)
case1_i = [][]int{{-2, 0}, {1, 0}, {-2, -1}, {1, 2}} //0->R
case2_i = [][]int{{-1, 0}, {2, 0}, {-1, 2}, {2, -1}} //R->2
case3_i = [][]int{{2, 0}, {-1, 0}, {2, 1}, {-1, -2}} //2->L
case4_i = [][]int{{1, 0}, {-2, 0}, {1, -2}, {-2, 1}} //L->0
)
//
type Block struct {
x int
y int
shape [][]int
shapeType int //store following index of blockShape (range [0,6] means shapeO->shapeT above). For rotation supporting
rotateType int //store 1 number in range [0,3]. For rotation supporting
}
//
func (block *Block) reInit() {
randomNum := rand.Intn(7-1) + 1
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
block.shape[i][j] = randomNum
}
}
}
}
//
func (block *Block) transpose() {
for i := range block.shape {
for j := 0 + i; j < len(block.shape[0]); j++ {
//swap
block.shape[j][i], block.shape[i][j] = block.shape[i][j], block.shape[j][i]
}
}
}
//
func (block *Block) reverseColumn() {
temp := 0
pivot := int(len(block.shape[0]) / 2)
//swap column i with column len(block.shape)-1-i
for i := range block.shape {
for j := 0; j < pivot; j++ {
temp = block.shape[i][j]
block.shape[i][j] = block.shape[i][len(block.shape[0])-1-j]
block.shape[i][len(block.shape[0])-1-j] = temp
}
}
}
//
func (block *Block) rotate(land [][]int) {
previousShape := makeCopy(block.shape) //store previouse shape after rotated shape
block.transpose()
block.reverseColumn()
tempX := block.x
tempY := block.y
flag := false
//
if checkCollision(land, *block) {
//following my code, index 5 is I Tetromino, which has its own wall kick case
if block.shapeType != 5 {
if block.rotateType == 0 { //0->R
for i := range case1 {
block.x += case1[i][0]
block.y += case1[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2 {
block.x += case2[i][0]
block.y += case2[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 { //2->L
for i := range case3 {
block.x += case3[i][0]
block.y += case3[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4 {
block.x += case4[i][0] | if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
} else {
if block.rotateType == 0 { //0->R
for i := range case1_i {
block.x += case1_i[i][0]
block.y += case1_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2_i {
block.x += case2_i[i][0]
block.y += case2_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 {
for i := range case3_i {
block.x += case3_i[i][0]
block.y += case3_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4_i {
block.x += case4_i[i][0]
block.y += case4_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
}
} else {
flag = true
}
//if rotation was failed
if flag == false {
block.shape = previousShape
return
}
//
if block.rotateType == 3 {
block.rotateType = 0
} else {
block.rotateType += 1
}
}
//
func (block *Block) moveDown(land [][]int) {
block.y++
if checkCollision(land, *block) {
block.y--
}
}
//
func (block *Block) moveLeft(land [][]int) {
block.x--
if checkCollision(land, *block) {
block.x++
}
}
//
func (block *Block) moveRight(land [][]int) {
block.x++
if checkCollision(land, *block) {
block.x--
}
}
//
func (block *Block) hardDrop(land [][]int) {
for !checkCollision(land, *block) {
block.y++
}
block.y--
}
//
func makeCopy(arr [][]int) [][]int {
newArray := make([][]int, len(arr))
for i := range arr {
newArray[i] = make([]int, len(arr[0]))
for j := range arr[0] {
newArray[i][j] = arr[i][j]
}
}
return newArray
}
//
func checkCollision(land [][]int, block Block) bool {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.x+j < 0 || block.x+j > colSize-1 || block.y+i > rowSize-1 {
return true
} else if block.x+j >= 0 && block.x+j <= colSize-1 && block.y+i <= rowSize-1 && block.y+i >= 0 {
if land[block.y+i][block.x+j] != 0 {
return true
}
}
}
}
}
return false
}
//
func randomBlock(board [][]int, Shape [][][]int) Block {
randomNum := rand.Intn(len(Shape) - 1)
randomShape := makeCopy(Shape[randomNum])
coordinateX := int(len(board[0])/2) - len(randomShape[0]) + 1
coordinateY := -2
block := Block{coordinateX, coordinateY, randomShape, randomNum, 0}
block.reInit()
return block
}
//
func initBoard() [][]int {
board := make([][]int, rowSize)
for i := range board {
board[i] = make([]int, colSize)
}
return board
}
//
func mergeBlock(board [][]int, block Block) [][]int {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.y+i >= 0 && block.y+i <= rowSize-1 && block.x+j >= 0 && block.x+j <= colSize-1 {
board[block.y+i][block.x+j] = block.shape | block.y += case4[i][1] | random_line_split |
Tetris.go | }, {0, -2}, {1, -2}} //2->L
case4 = [][]int{{-1, 0}, {-1, -1}, {0, 2}, {-1, 2}} //L->0
//for I Tetromino's case (clockwise direction)
case1_i = [][]int{{-2, 0}, {1, 0}, {-2, -1}, {1, 2}} //0->R
case2_i = [][]int{{-1, 0}, {2, 0}, {-1, 2}, {2, -1}} //R->2
case3_i = [][]int{{2, 0}, {-1, 0}, {2, 1}, {-1, -2}} //2->L
case4_i = [][]int{{1, 0}, {-2, 0}, {1, -2}, {-2, 1}} //L->0
)
//
type Block struct {
x int
y int
shape [][]int
shapeType int //store following index of blockShape (range [0,6] means shapeO->shapeT above). For rotation supporting
rotateType int //store 1 number in range [0,3]. For rotation supporting
}
//
func (block *Block) reInit() {
randomNum := rand.Intn(7-1) + 1
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
block.shape[i][j] = randomNum
}
}
}
}
//
func (block *Block) transpose() {
for i := range block.shape {
for j := 0 + i; j < len(block.shape[0]); j++ {
//swap
block.shape[j][i], block.shape[i][j] = block.shape[i][j], block.shape[j][i]
}
}
}
//
func (block *Block) reverseColumn() {
temp := 0
pivot := int(len(block.shape[0]) / 2)
//swap column i with column len(block.shape)-1-i
for i := range block.shape {
for j := 0; j < pivot; j++ {
temp = block.shape[i][j]
block.shape[i][j] = block.shape[i][len(block.shape[0])-1-j]
block.shape[i][len(block.shape[0])-1-j] = temp
}
}
}
//
func (block *Block) rotate(land [][]int) {
previousShape := makeCopy(block.shape) //store previouse shape after rotated shape
block.transpose()
block.reverseColumn()
tempX := block.x
tempY := block.y
flag := false
//
if checkCollision(land, *block) {
//following my code, index 5 is I Tetromino, which has its own wall kick case
if block.shapeType != 5 {
if block.rotateType == 0 { //0->R
for i := range case1 {
block.x += case1[i][0]
block.y += case1[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2 {
block.x += case2[i][0]
block.y += case2[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 { //2->L
for i := range case3 {
block.x += case3[i][0]
block.y += case3[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4 {
block.x += case4[i][0]
block.y += case4[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
} else {
if block.rotateType == 0 { //0->R
for i := range case1_i {
block.x += case1_i[i][0]
block.y += case1_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2_i {
block.x += case2_i[i][0]
block.y += case2_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 {
for i := range case3_i {
block.x += case3_i[i][0]
block.y += case3_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4_i {
block.x += case4_i[i][0]
block.y += case4_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
}
} else {
flag = true
}
//if rotation was failed
if flag == false {
block.shape = previousShape
return
}
//
if block.rotateType == 3 {
block.rotateType = 0
} else {
block.rotateType += 1
}
}
//
func (block *Block) moveDown(land [][]int) {
block.y++
if checkCollision(land, *block) {
block.y--
}
}
//
func (block *Block) moveLeft(land [][]int) {
block.x--
if checkCollision(land, *block) {
block.x++
}
}
//
func (block *Block) moveRight(land [][]int) {
block.x++
if checkCollision(land, *block) {
block.x--
}
}
//
func (block *Block) hardDrop(land [][]int) {
for !checkCollision(land, *block) {
block.y++
}
block.y--
}
//
func makeCopy(arr [][]int) [][]int |
//
func checkCollision(land [][]int, block Block) bool {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.x+j < 0 || block.x+j > colSize-1 || block.y+i > rowSize-1 {
return true
} else if block.x+j >= 0 && block.x+j <= colSize-1 && block.y+i <= rowSize-1 && block.y+i >= 0 {
if land[block.y+i][block.x+j] != 0 {
return true
}
}
}
}
}
return false
}
//
func randomBlock(board [][]int, Shape [][][]int) Block {
randomNum := rand.Intn(len(Shape) - 1)
randomShape := makeCopy(Shape[randomNum])
coordinateX := int(len(board[0])/2) - len(randomShape[0]) + 1
coordinateY := -2
block := Block{coordinateX, coordinateY, randomShape, randomNum, 0}
block.reInit()
return block
}
//
func initBoard() [][]int {
board := make([][]int, rowSize)
for i := range board {
board[i] = make([]int, colSize)
}
return board
}
//
func mergeBlock(board [][]int, block Block) [][]int {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.y+i >= 0 && block.y+i <= rowSize-1 && block.x+j >= 0 && block.x+j <= colSize-1 {
board[block.y+i][block.x+j] = block | {
newArray := make([][]int, len(arr))
for i := range arr {
newArray[i] = make([]int, len(arr[0]))
for j := range arr[0] {
newArray[i][j] = arr[i][j]
}
}
return newArray
} | identifier_body |
Tetris.go | j := 0 + i; j < len(block.shape[0]); j++ {
//swap
block.shape[j][i], block.shape[i][j] = block.shape[i][j], block.shape[j][i]
}
}
}
//
func (block *Block) reverseColumn() {
temp := 0
pivot := int(len(block.shape[0]) / 2)
//swap column i with column len(block.shape)-1-i
for i := range block.shape {
for j := 0; j < pivot; j++ {
temp = block.shape[i][j]
block.shape[i][j] = block.shape[i][len(block.shape[0])-1-j]
block.shape[i][len(block.shape[0])-1-j] = temp
}
}
}
//
func (block *Block) rotate(land [][]int) {
previousShape := makeCopy(block.shape) //store previouse shape after rotated shape
block.transpose()
block.reverseColumn()
tempX := block.x
tempY := block.y
flag := false
//
if checkCollision(land, *block) {
//following my code, index 5 is I Tetromino, which has its own wall kick case
if block.shapeType != 5 {
if block.rotateType == 0 { //0->R
for i := range case1 {
block.x += case1[i][0]
block.y += case1[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2 {
block.x += case2[i][0]
block.y += case2[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 { //2->L
for i := range case3 {
block.x += case3[i][0]
block.y += case3[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4 {
block.x += case4[i][0]
block.y += case4[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
} else {
if block.rotateType == 0 { //0->R
for i := range case1_i {
block.x += case1_i[i][0]
block.y += case1_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 1 { //R->2
for i := range case2_i {
block.x += case2_i[i][0]
block.y += case2_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else if block.rotateType == 2 {
for i := range case3_i {
block.x += case3_i[i][0]
block.y += case3_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
} else { //case == 3 //L->0
for i := range case4_i {
block.x += case4_i[i][0]
block.y += case4_i[i][1]
if !checkCollision(land, *block) {
flag = true
break
} else {
block.x = tempX
block.y = tempY
}
}
}
}
} else {
flag = true
}
//if rotation was failed
if flag == false {
block.shape = previousShape
return
}
//
if block.rotateType == 3 {
block.rotateType = 0
} else {
block.rotateType += 1
}
}
//
func (block *Block) moveDown(land [][]int) {
block.y++
if checkCollision(land, *block) {
block.y--
}
}
//
func (block *Block) moveLeft(land [][]int) {
block.x--
if checkCollision(land, *block) {
block.x++
}
}
//
func (block *Block) moveRight(land [][]int) {
block.x++
if checkCollision(land, *block) {
block.x--
}
}
//
func (block *Block) hardDrop(land [][]int) {
for !checkCollision(land, *block) {
block.y++
}
block.y--
}
//
func makeCopy(arr [][]int) [][]int {
newArray := make([][]int, len(arr))
for i := range arr {
newArray[i] = make([]int, len(arr[0]))
for j := range arr[0] {
newArray[i][j] = arr[i][j]
}
}
return newArray
}
//
func checkCollision(land [][]int, block Block) bool {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.x+j < 0 || block.x+j > colSize-1 || block.y+i > rowSize-1 {
return true
} else if block.x+j >= 0 && block.x+j <= colSize-1 && block.y+i <= rowSize-1 && block.y+i >= 0 {
if land[block.y+i][block.x+j] != 0 {
return true
}
}
}
}
}
return false
}
//
func randomBlock(board [][]int, Shape [][][]int) Block {
randomNum := rand.Intn(len(Shape) - 1)
randomShape := makeCopy(Shape[randomNum])
coordinateX := int(len(board[0])/2) - len(randomShape[0]) + 1
coordinateY := -2
block := Block{coordinateX, coordinateY, randomShape, randomNum, 0}
block.reInit()
return block
}
//
func initBoard() [][]int {
board := make([][]int, rowSize)
for i := range board {
board[i] = make([]int, colSize)
}
return board
}
//
func mergeBlock(board [][]int, block Block) [][]int {
for i := range block.shape {
for j := range block.shape[0] {
if block.shape[i][j] != 0 {
if block.y+i >= 0 && block.y+i <= rowSize-1 && block.x+j >= 0 && block.x+j <= colSize-1 {
board[block.y+i][block.x+j] = block.shape[i][j]
}
}
}
}
return board
}
//
func checkReachTop(land [][]int) bool {
for i := range land[0] {
if land[0][i] != 0 {
return true
}
}
return false
}
//
func handleFullLine(land [][]int, score int) ([][]int, int) {
var line []int
var flag bool
for i := range land {
flag = true
for j := range land[0] {
if land[i][j] == 0 {
flag = false
break
}
}
if flag == true {
line = append(line, i)
}
}
//index is line (row), j is column
for _, index := range line {
for j := range land[index] {
land[index][j] = 0
}
for j := index; j > 0; j-- {
//swap
land[j-1], land[j] = land[j], land[j-1]
}
score += 100
}
return land, score
}
//
func ClearScreen() {
cmd := exec.Command("clear")
cmd.Stdout = os.Stdout
cmd.Run()
}
//
func Display(arr [][]int, nextBlock Block, score int, Color []string) {
ClearScreen()
tempIndexRow := 0
tempIndexCol := 0
for i := range arr {
tempIndexCol = 0
if i == 0 {
fmt.Print("\r ")
for k := 0; k < len(arr[0]); k++ | {
fmt.Print("\033[1m_ ")
} | conditional_block | |
ui.rs | game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.awareness_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn | (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game.clone | main_loop | identifier_name |
ui.rs | }
pub fn draw_game <A: Accessor <Steward = Steward>>(accessor: &A, game: & Game) {
let canvas_width: f64 = js! {return canvas.width;}.try_into().unwrap();
let scale = canvas_width/(game.display_radius as f64*2.0);
js! {
var size = Math.min (window.innerHeight, window.innerWidth);
canvas.setAttribute ("width", size);
canvas.setAttribute ("height", size);
context.clearRect (0, 0, canvas.width, canvas.height);
context.save();
context.scale (@{scale},@{scale});
context.translate (@{-game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.awareness_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn main_loop (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64 | display_center: Vector::new (0, 0),
display_radius: INITIAL_PALACE_DISTANCE*3/2,
selected_object: None,
} | random_line_split | |
ui.rs | game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type != ObjectType::Beast |
if varying.awareness_range >0 && varying.object_type != ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn main_loop (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game | {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}} | conditional_block |
ui.rs | {}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn main_loop (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game.clone();
let time_callback = move |speed: f64| {
let mut game = game.borrow_mut();
game.time_speed = if speed == -10.0 { 0.0 } else { (2.0f64).powf(speed/2.0) };
println!("{:?}", (speed));
};
js! {
var callback = @{time_callback};
game_container.append($("<div>").append(
$("<input>", {
type: "range",
id: "time_speed",
value: 0, min: -10, max: 10, step: 1
}).on ("input", function (event) {
callback(event.target.valueAsNumber);
}),
$("<label>", {
for: "time_speed",
text: " time speed",
})
));
}
}
{
let game = game.clone();
let click_callback = move |x: f64,y: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let location = game.display_center + Vector2::new (offset [0] as Coordinate, offset [1] as Coordinate);
let now = game.now;
//game.steward.insert_fiat_event (now, DeterministicRandomId::new (& (now)), ChangeOrders {team: 1, orders: Orders {unit_destination: Some (location)}}).unwrap();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
for object in Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (location), 0), None) {
let varying = query_ref (& snapshot, & object.varying);
let center = varying.trajectory.evaluate (now);
let object_radius = radius (& varying);
if distance (location, center) < object_radius {
game.selected_object = Some (object.clone());
}
}
};
js! {
var callback = @{click_callback};
canvas.addEventListener ("click", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5
);
event.preventDefault();
});
}
}
js! {
game_container.append($("<div>").append(
$("<input>", {
type: "number",
id: "seed",
value:@{seed},
min: 0,
max: @{0u32.wrapping_sub(1)}
}).on ("input", function (event) {
var value = Math.floor(event.target.valueAsNumber);
if (value >= 0 && value <= @{0u32.wrapping_sub(1)}) {
window.localStorage.setItem ("random_seed", value);
}
}),
$("<label>", {
for: "seed",
text: " random seed (reload page to apply)",
})
));
game_container.append(window.selected_info = $("<div>", {id: "selected_info"}));
}
web::window().request_animation_frame (move | time | main_loop (time, game));
stdweb::event_loop();
}
#[cfg (not(target_os = "emscripten"))]
pub fn run() | {
let mut scores = [0; 2];
loop {
let mut game = make_game(DeterministicRandomId::new (& (scores, 0xae06fcf3129d0685u64)));
loop {
game.now += SECOND /100;
let snapshot = game.steward.snapshot_before (& game.now). unwrap ();
game.steward.forget_before (& game.now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().filter (| object | is_building (&query_ref (& snapshot, & object.varying))).map (| object | query_ref (& snapshot, & object.varying).team).collect();
if teams_alive.len() <= 1 {
scores [teams_alive.into_iter().next().unwrap()] += 1;
println!("{:?}", scores);
break;
}*/
} | identifier_body | |
cached.go | KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cached
import (
"errors"
"sync"
"time"
"github.com/square/metrics/api"
"github.com/square/metrics/log"
"github.com/square/metrics/metric_metadata"
"github.com/square/metrics/util"
)
// BackgroundAPI is a MetadataAPI that also supports background cache updates.
type BackgroundAPI interface {
metadata.MetricAPI
// GetBackgroundAction returns a function to be called to execute a background cache update.
GetBackgroundAction() func(metadata.Context) error
// CurrentLiveRequests returns the number of requests currently in the queue
CurrentLiveRequests() int
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
MaximumLiveRequests() int
}
// metricMetadataAPI caches some of the metadata associated with the API to reduce latency.
// However, it does not reduce total QPS: whenever it reads from the cache, it performs an update
// in the background by launching a new goroutine.
type metricMetadataAPI struct {
metricMetadataAPI metadata.MetricAPI // The internal MetricAPI that performs the actual queries.
clock util.Clock // Here so we can mock out in tests
// Cached items
getAllTagsCache map[api.MetricKey]*TagSetList // The cache of metric -> tags
getAllTagsCacheMutex sync.RWMutex // Mutex for getAllTagsCache
// Cache Config
freshness time.Duration // How long until cache entries become stale
timeToLive time.Duration // How long until cache entries become expired
// Queue
backgroundQueue chan func(metadata.Context) error // A channel that holds background requests.
queueMutex sync.Mutex // Synchronizing mutex for the queue
}
// metricUpdateAPI is a wrapper for when the underlying metadata.MetricAPI is also a metadata.MetricUpdateAPI.
type metricUpdateAPI struct {
metricMetadataAPI
}
func (c *metricMetadataAPI) AddMetric(metric api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetric(metric, context)
}
func (c *metricMetadataAPI) AddMetrics(metrics []api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetrics(metrics, context)
}
// Config stores data needed to instantiate a CachedMetricMetadataAPI.
type Config struct {
Freshness time.Duration
RequestLimit int
TimeToLive time.Duration
}
// TagSetList is an item in the cache.
type TagSetList struct {
TagSets []api.TagSet // The tagsets for this metric
Expiry time.Time // The time at which the cache entry expires
Stale time.Time // The time at which the cache entry becomes stale
sync.Mutex // Synchronizing mutex
inflight bool // Indicates a request is already in flight
enqueued bool // Indicates a request has been enqueued
wg sync.WaitGroup // Synchronizing wait group
fetchError error // Fetch error from the last attempt
}
// NewMetricMetadataAPI creates a cached API given configuration and an underlying API object.
func NewMetricMetadataAPI(apiInstance metadata.MetricAPI, config Config) BackgroundAPI {
requests := make(chan func(metadata.Context) error, config.RequestLimit)
if config.Freshness == 0 {
config.Freshness = config.TimeToLive
}
result := metricMetadataAPI{
metricMetadataAPI: apiInstance,
clock: util.RealClock{},
getAllTagsCache: map[api.MetricKey]*TagSetList{},
freshness: config.Freshness,
timeToLive: config.TimeToLive,
backgroundQueue: requests,
}
if _, ok := apiInstance.(metadata.MetricUpdateAPI); ok {
return &metricUpdateAPI{result}
}
return &result
}
// addBackgroundGetAllTagsRequest adds a job to update the lag list for the given
// metric. Requires the caller hold the lock for the item in the cache.
func (c *metricMetadataAPI) addBackgroundGetAllTagsRequest(item *TagSetList, metricKey api.MetricKey) {
if item == nil {
log.Errorf("Asked to perform a background GetAllTags lookup for %s but missing entry", metricKey)
return
}
c.queueMutex.Lock()
defer c.queueMutex.Unlock()
if cap(c.backgroundQueue) <= len(c.backgroundQueue) {
log.Warningf("Unable to enqueue a background GetAllTags lookup for %s due to a full queue", metricKey)
return
}
if item.enqueued {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already enqueued", metricKey)
return
}
if item.inflight {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already in flight", metricKey)
return
}
log.Infof("Enqueuing a background GetAllTags lookup for %s", metricKey)
item.enqueued = true
c.backgroundQueue <- func(context metadata.Context) error {
log.Infof("Executing the background GetAllTags lookup for %s", metricKey)
defer log.Infof("Finished the background GetAllTags lookup for %s", metricKey)
item.Lock()
defer item.Unlock()
item.enqueued = false
defer context.Profiler.Record("CachedMetricMetadataAPI_BackgroundAction_GetAllTags")()
_, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
return err
}
}
// GetBackgroundAction is a blocking method that runs one queued cache update.
// It will block until an update is available.
func (c *metricMetadataAPI) GetBackgroundAction() func(metadata.Context) error {
return <-c.backgroundQueue
}
// GetAllMetrics waits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetAllMetrics(context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetAllMetrics(context)
}
// GetMetricsForTag wwaits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetMetricsForTag(tagKey, tagValue string, context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetMetricsForTag(tagKey, tagValue, context)
}
// CheckHealthy checks if the underlying MetricAPI is healthy
func (c *metricMetadataAPI) CheckHealthy() error {
return c.metricMetadataAPI.CheckHealthy()
}
// fetchAndUpdateCachedTagSet updates the in-memory cache (asusming the update
// is newer than what is in the cache). Requires the caller hold the lock for the
// item in the cache.
func (c *metricMetadataAPI) fetchAndUpdateCachedTagSet(item *TagSetList, metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
if item == nil {
return nil, errors.New("missing cache list entry")
}
item.wg.Add(1)
item.fetchError = nil
item.inflight = true
item.Unlock()
startTime := c.clock.Now()
tagsets, err := c.metricMetadataAPI.GetAllTags(metricKey, context)
item.Lock()
if err != nil {
item.fetchError = err
item.wg.Done()
item.inflight = false
return nil, err
}
// Only update the cache if the update expires later than the current
// entry in the cache
newExpiry := startTime.Add(c.timeToLive)
if item.Expiry.Before(newExpiry) {
item.TagSets = tagsets
item.Expiry = newExpiry
item.Stale = startTime.Add(c.freshness)
} else {
log.Warningf("Asked to update the tag set for %s but new expiry is earlier than current (%s vs %s)",
metricKey, newExpiry.String(), item.Expiry.String())
}
item.wg.Done()
item.inflight = false
return tagsets, nil
}
// GetAllTags uses the cache to serve tag data for the given metric.
// If the cache entry is missing or out of date, it uses the results of a query
// to the underlying API to return to the caller. Even if the cache entry is
// up-to-date, this method may enqueue a background request to the underlying API
// to keep the cache fresh.
func (c *metricMetadataAPI) GetAllTags(metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags")()
// Get the cached result for this metric.
c.getAllTagsCacheMutex.RLock()
item, ok := c.getAllTagsCache[metricKey]
c.getAllTagsCacheMutex.RUnlock()
if !ok {
c.getAllTagsCacheMutex.Lock()
// Now that we have the mutex for getAllTagsCache, make sure another goroutine
// hasn't already updated the cache
item, ok = c.getAllTagsCache[metricKey]
if !ok {
item = &TagSetList{}
c.getAllTagsCache[metricKey] = item
} | item.Lock()
if item.Expiry.IsZero() || item.Expiry.Before(c.clock.Now()) {
if item.inflight {
item.Unlock()
item.wg.Wait()
// Make sure we have the lock to re-read
item.Lock()
defer item.Unlock()
// If the request we were waiting on errored, we also errored
return item.Tag |
c.getAllTagsCacheMutex.Unlock()
}
| random_line_split |
cached.go | KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cached
import (
"errors"
"sync"
"time"
"github.com/square/metrics/api"
"github.com/square/metrics/log"
"github.com/square/metrics/metric_metadata"
"github.com/square/metrics/util"
)
// BackgroundAPI is a MetadataAPI that also supports background cache updates.
type BackgroundAPI interface {
metadata.MetricAPI
// GetBackgroundAction returns a function to be called to execute a background cache update.
GetBackgroundAction() func(metadata.Context) error
// CurrentLiveRequests returns the number of requests currently in the queue
CurrentLiveRequests() int
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
MaximumLiveRequests() int
}
// metricMetadataAPI caches some of the metadata associated with the API to reduce latency.
// However, it does not reduce total QPS: whenever it reads from the cache, it performs an update
// in the background by launching a new goroutine.
type metricMetadataAPI struct {
metricMetadataAPI metadata.MetricAPI // The internal MetricAPI that performs the actual queries.
clock util.Clock // Here so we can mock out in tests
// Cached items
getAllTagsCache map[api.MetricKey]*TagSetList // The cache of metric -> tags
getAllTagsCacheMutex sync.RWMutex // Mutex for getAllTagsCache
// Cache Config
freshness time.Duration // How long until cache entries become stale
timeToLive time.Duration // How long until cache entries become expired
// Queue
backgroundQueue chan func(metadata.Context) error // A channel that holds background requests.
queueMutex sync.Mutex // Synchronizing mutex for the queue
}
// metricUpdateAPI is a wrapper for when the underlying metadata.MetricAPI is also a metadata.MetricUpdateAPI.
type metricUpdateAPI struct {
metricMetadataAPI
}
func (c *metricMetadataAPI) AddMetric(metric api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetric(metric, context)
}
func (c *metricMetadataAPI) AddMetrics(metrics []api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetrics(metrics, context)
}
// Config stores data needed to instantiate a CachedMetricMetadataAPI.
type Config struct {
Freshness time.Duration
RequestLimit int
TimeToLive time.Duration
}
// TagSetList is an item in the cache.
type TagSetList struct {
TagSets []api.TagSet // The tagsets for this metric
Expiry time.Time // The time at which the cache entry expires
Stale time.Time // The time at which the cache entry becomes stale
sync.Mutex // Synchronizing mutex
inflight bool // Indicates a request is already in flight
enqueued bool // Indicates a request has been enqueued
wg sync.WaitGroup // Synchronizing wait group
fetchError error // Fetch error from the last attempt
}
// NewMetricMetadataAPI creates a cached API given configuration and an underlying API object.
func NewMetricMetadataAPI(apiInstance metadata.MetricAPI, config Config) BackgroundAPI {
requests := make(chan func(metadata.Context) error, config.RequestLimit)
if config.Freshness == 0 {
config.Freshness = config.TimeToLive
}
result := metricMetadataAPI{
metricMetadataAPI: apiInstance,
clock: util.RealClock{},
getAllTagsCache: map[api.MetricKey]*TagSetList{},
freshness: config.Freshness,
timeToLive: config.TimeToLive,
backgroundQueue: requests,
}
if _, ok := apiInstance.(metadata.MetricUpdateAPI); ok {
return &metricUpdateAPI{result}
}
return &result
}
// addBackgroundGetAllTagsRequest adds a job to update the lag list for the given
// metric. Requires the caller hold the lock for the item in the cache.
func (c *metricMetadataAPI) addBackgroundGetAllTagsRequest(item *TagSetList, metricKey api.MetricKey) {
if item == nil {
log.Errorf("Asked to perform a background GetAllTags lookup for %s but missing entry", metricKey)
return
}
c.queueMutex.Lock()
defer c.queueMutex.Unlock()
if cap(c.backgroundQueue) <= len(c.backgroundQueue) {
log.Warningf("Unable to enqueue a background GetAllTags lookup for %s due to a full queue", metricKey)
return
}
if item.enqueued {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already enqueued", metricKey)
return
}
if item.inflight {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already in flight", metricKey)
return
}
log.Infof("Enqueuing a background GetAllTags lookup for %s", metricKey)
item.enqueued = true
c.backgroundQueue <- func(context metadata.Context) error {
log.Infof("Executing the background GetAllTags lookup for %s", metricKey)
defer log.Infof("Finished the background GetAllTags lookup for %s", metricKey)
item.Lock()
defer item.Unlock()
item.enqueued = false
defer context.Profiler.Record("CachedMetricMetadataAPI_BackgroundAction_GetAllTags")()
_, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
return err
}
}
// GetBackgroundAction is a blocking method that runs one queued cache update.
// It will block until an update is available.
func (c *metricMetadataAPI) GetBackgroundAction() func(metadata.Context) error {
return <-c.backgroundQueue
}
// GetAllMetrics waits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetAllMetrics(context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetAllMetrics(context)
}
// GetMetricsForTag wwaits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetMetricsForTag(tagKey, tagValue string, context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetMetricsForTag(tagKey, tagValue, context)
}
// CheckHealthy checks if the underlying MetricAPI is healthy
func (c *metricMetadataAPI) CheckHealthy() error {
return c.metricMetadataAPI.CheckHealthy()
}
// fetchAndUpdateCachedTagSet updates the in-memory cache (asusming the update
// is newer than what is in the cache). Requires the caller hold the lock for the
// item in the cache.
func (c *metricMetadataAPI) fetchAndUpdateCachedTagSet(item *TagSetList, metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
if item == nil {
return nil, errors.New("missing cache list entry")
}
item.wg.Add(1)
item.fetchError = nil
item.inflight = true
item.Unlock()
startTime := c.clock.Now()
tagsets, err := c.metricMetadataAPI.GetAllTags(metricKey, context)
item.Lock()
if err != nil {
item.fetchError = err
item.wg.Done()
item.inflight = false
return nil, err
}
// Only update the cache if the update expires later than the current
// entry in the cache
newExpiry := startTime.Add(c.timeToLive)
if item.Expiry.Before(newExpiry) | else {
log.Warningf("Asked to update the tag set for %s but new expiry is earlier than current (%s vs %s)",
metricKey, newExpiry.String(), item.Expiry.String())
}
item.wg.Done()
item.inflight = false
return tagsets, nil
}
// GetAllTags uses the cache to serve tag data for the given metric.
// If the cache entry is missing or out of date, it uses the results of a query
// to the underlying API to return to the caller. Even if the cache entry is
// up-to-date, this method may enqueue a background request to the underlying API
// to keep the cache fresh.
func (c *metricMetadataAPI) GetAllTags(metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags")()
// Get the cached result for this metric.
c.getAllTagsCacheMutex.RLock()
item, ok := c.getAllTagsCache[metricKey]
c.getAllTagsCacheMutex.RUnlock()
if !ok {
c.getAllTagsCacheMutex.Lock()
// Now that we have the mutex for getAllTagsCache, make sure another goroutine
// hasn't already updated the cache
item, ok = c.getAllTagsCache[metricKey]
if !ok {
item = &TagSetList{}
c.getAllTagsCache[metricKey] = item
}
c.getAllTagsCacheMutex.Unlock()
}
item.Lock()
if item.Expiry.IsZero() || item.Expiry.Before(c.clock.Now()) {
if item.inflight {
item.Unlock()
item.wg.Wait()
// Make sure we have the lock to re-read
item.Lock()
defer item.Unlock()
// If the request we were waiting on errored, we also errored
return item.Tag | {
item.TagSets = tagsets
item.Expiry = newExpiry
item.Stale = startTime.Add(c.freshness)
} | conditional_block |
cached.go | KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cached
import (
"errors"
"sync"
"time"
"github.com/square/metrics/api"
"github.com/square/metrics/log"
"github.com/square/metrics/metric_metadata"
"github.com/square/metrics/util"
)
// BackgroundAPI is a MetadataAPI that also supports background cache updates.
type BackgroundAPI interface {
metadata.MetricAPI
// GetBackgroundAction returns a function to be called to execute a background cache update.
GetBackgroundAction() func(metadata.Context) error
// CurrentLiveRequests returns the number of requests currently in the queue
CurrentLiveRequests() int
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
MaximumLiveRequests() int
}
// metricMetadataAPI caches some of the metadata associated with the API to reduce latency.
// However, it does not reduce total QPS: whenever it reads from the cache, it performs an update
// in the background by launching a new goroutine.
type metricMetadataAPI struct {
metricMetadataAPI metadata.MetricAPI // The internal MetricAPI that performs the actual queries.
clock util.Clock // Here so we can mock out in tests
// Cached items
getAllTagsCache map[api.MetricKey]*TagSetList // The cache of metric -> tags
getAllTagsCacheMutex sync.RWMutex // Mutex for getAllTagsCache
// Cache Config
freshness time.Duration // How long until cache entries become stale
timeToLive time.Duration // How long until cache entries become expired
// Queue
backgroundQueue chan func(metadata.Context) error // A channel that holds background requests.
queueMutex sync.Mutex // Synchronizing mutex for the queue
}
// metricUpdateAPI is a wrapper for when the underlying metadata.MetricAPI is also a metadata.MetricUpdateAPI.
type metricUpdateAPI struct {
metricMetadataAPI
}
func (c *metricMetadataAPI) AddMetric(metric api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetric(metric, context)
}
func (c *metricMetadataAPI) AddMetrics(metrics []api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetrics(metrics, context)
}
// Config stores data needed to instantiate a CachedMetricMetadataAPI.
type Config struct {
Freshness time.Duration
RequestLimit int
TimeToLive time.Duration
}
// TagSetList is an item in the cache.
type TagSetList struct {
TagSets []api.TagSet // The tagsets for this metric
Expiry time.Time // The time at which the cache entry expires
Stale time.Time // The time at which the cache entry becomes stale
sync.Mutex // Synchronizing mutex
inflight bool // Indicates a request is already in flight
enqueued bool // Indicates a request has been enqueued
wg sync.WaitGroup // Synchronizing wait group
fetchError error // Fetch error from the last attempt
}
// NewMetricMetadataAPI creates a cached API given configuration and an underlying API object.
func NewMetricMetadataAPI(apiInstance metadata.MetricAPI, config Config) BackgroundAPI {
requests := make(chan func(metadata.Context) error, config.RequestLimit)
if config.Freshness == 0 {
config.Freshness = config.TimeToLive
}
result := metricMetadataAPI{
metricMetadataAPI: apiInstance,
clock: util.RealClock{},
getAllTagsCache: map[api.MetricKey]*TagSetList{},
freshness: config.Freshness,
timeToLive: config.TimeToLive,
backgroundQueue: requests,
}
if _, ok := apiInstance.(metadata.MetricUpdateAPI); ok {
return &metricUpdateAPI{result}
}
return &result
}
// addBackgroundGetAllTagsRequest adds a job to update the lag list for the given
// metric. Requires the caller hold the lock for the item in the cache.
func (c *metricMetadataAPI) addBackgroundGetAllTagsRequest(item *TagSetList, metricKey api.MetricKey) {
if item == nil {
log.Errorf("Asked to perform a background GetAllTags lookup for %s but missing entry", metricKey)
return
}
c.queueMutex.Lock()
defer c.queueMutex.Unlock()
if cap(c.backgroundQueue) <= len(c.backgroundQueue) {
log.Warningf("Unable to enqueue a background GetAllTags lookup for %s due to a full queue", metricKey)
return
}
if item.enqueued {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already enqueued", metricKey)
return
}
if item.inflight {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already in flight", metricKey)
return
}
log.Infof("Enqueuing a background GetAllTags lookup for %s", metricKey)
item.enqueued = true
c.backgroundQueue <- func(context metadata.Context) error {
log.Infof("Executing the background GetAllTags lookup for %s", metricKey)
defer log.Infof("Finished the background GetAllTags lookup for %s", metricKey)
item.Lock()
defer item.Unlock()
item.enqueued = false
defer context.Profiler.Record("CachedMetricMetadataAPI_BackgroundAction_GetAllTags")()
_, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
return err
}
}
// GetBackgroundAction is a blocking method that runs one queued cache update.
// It will block until an update is available.
func (c *metricMetadataAPI) GetBackgroundAction() func(metadata.Context) error {
return <-c.backgroundQueue
}
// GetAllMetrics waits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetAllMetrics(context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetAllMetrics(context)
}
// GetMetricsForTag wwaits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetMetricsForTag(tagKey, tagValue string, context metadata.Context) ([]api.MetricKey, error) |
// CheckHealthy checks if the underlying MetricAPI is healthy
func (c *metricMetadataAPI) CheckHealthy() error {
return c.metricMetadataAPI.CheckHealthy()
}
// fetchAndUpdateCachedTagSet updates the in-memory cache (asusming the update
// is newer than what is in the cache). Requires the caller hold the lock for the
// item in the cache.
func (c *metricMetadataAPI) fetchAndUpdateCachedTagSet(item *TagSetList, metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
if item == nil {
return nil, errors.New("missing cache list entry")
}
item.wg.Add(1)
item.fetchError = nil
item.inflight = true
item.Unlock()
startTime := c.clock.Now()
tagsets, err := c.metricMetadataAPI.GetAllTags(metricKey, context)
item.Lock()
if err != nil {
item.fetchError = err
item.wg.Done()
item.inflight = false
return nil, err
}
// Only update the cache if the update expires later than the current
// entry in the cache
newExpiry := startTime.Add(c.timeToLive)
if item.Expiry.Before(newExpiry) {
item.TagSets = tagsets
item.Expiry = newExpiry
item.Stale = startTime.Add(c.freshness)
} else {
log.Warningf("Asked to update the tag set for %s but new expiry is earlier than current (%s vs %s)",
metricKey, newExpiry.String(), item.Expiry.String())
}
item.wg.Done()
item.inflight = false
return tagsets, nil
}
// GetAllTags uses the cache to serve tag data for the given metric.
// If the cache entry is missing or out of date, it uses the results of a query
// to the underlying API to return to the caller. Even if the cache entry is
// up-to-date, this method may enqueue a background request to the underlying API
// to keep the cache fresh.
func (c *metricMetadataAPI) GetAllTags(metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags")()
// Get the cached result for this metric.
c.getAllTagsCacheMutex.RLock()
item, ok := c.getAllTagsCache[metricKey]
c.getAllTagsCacheMutex.RUnlock()
if !ok {
c.getAllTagsCacheMutex.Lock()
// Now that we have the mutex for getAllTagsCache, make sure another goroutine
// hasn't already updated the cache
item, ok = c.getAllTagsCache[metricKey]
if !ok {
item = &TagSetList{}
c.getAllTagsCache[metricKey] = item
}
c.getAllTagsCacheMutex.Unlock()
}
item.Lock()
if item.Expiry.IsZero() || item.Expiry.Before(c.clock.Now()) {
if item.inflight {
item.Unlock()
item.wg.Wait()
// Make sure we have the lock to re-read
item.Lock()
defer item.Unlock()
// If the request we were waiting on errored, we also errored
return item | {
return c.metricMetadataAPI.GetMetricsForTag(tagKey, tagValue, context)
} | identifier_body |
cached.go | KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cached
import (
"errors"
"sync"
"time"
"github.com/square/metrics/api"
"github.com/square/metrics/log"
"github.com/square/metrics/metric_metadata"
"github.com/square/metrics/util"
)
// BackgroundAPI is a MetadataAPI that also supports background cache updates.
type BackgroundAPI interface {
metadata.MetricAPI
// GetBackgroundAction returns a function to be called to execute a background cache update.
GetBackgroundAction() func(metadata.Context) error
// CurrentLiveRequests returns the number of requests currently in the queue
CurrentLiveRequests() int
// MaximumLiveRequests returns the maximum number of requests that can be in the queue
MaximumLiveRequests() int
}
// metricMetadataAPI caches some of the metadata associated with the API to reduce latency.
// However, it does not reduce total QPS: whenever it reads from the cache, it performs an update
// in the background by launching a new goroutine.
type metricMetadataAPI struct {
metricMetadataAPI metadata.MetricAPI // The internal MetricAPI that performs the actual queries.
clock util.Clock // Here so we can mock out in tests
// Cached items
getAllTagsCache map[api.MetricKey]*TagSetList // The cache of metric -> tags
getAllTagsCacheMutex sync.RWMutex // Mutex for getAllTagsCache
// Cache Config
freshness time.Duration // How long until cache entries become stale
timeToLive time.Duration // How long until cache entries become expired
// Queue
backgroundQueue chan func(metadata.Context) error // A channel that holds background requests.
queueMutex sync.Mutex // Synchronizing mutex for the queue
}
// metricUpdateAPI is a wrapper for when the underlying metadata.MetricAPI is also a metadata.MetricUpdateAPI.
type metricUpdateAPI struct {
metricMetadataAPI
}
func (c *metricMetadataAPI) AddMetric(metric api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetric(metric, context)
}
func (c *metricMetadataAPI) | (metrics []api.TaggedMetric, context metadata.Context) error {
return c.metricMetadataAPI.(metadata.MetricUpdateAPI).AddMetrics(metrics, context)
}
// Config stores data needed to instantiate a CachedMetricMetadataAPI.
type Config struct {
Freshness time.Duration
RequestLimit int
TimeToLive time.Duration
}
// TagSetList is an item in the cache.
type TagSetList struct {
TagSets []api.TagSet // The tagsets for this metric
Expiry time.Time // The time at which the cache entry expires
Stale time.Time // The time at which the cache entry becomes stale
sync.Mutex // Synchronizing mutex
inflight bool // Indicates a request is already in flight
enqueued bool // Indicates a request has been enqueued
wg sync.WaitGroup // Synchronizing wait group
fetchError error // Fetch error from the last attempt
}
// NewMetricMetadataAPI creates a cached API given configuration and an underlying API object.
func NewMetricMetadataAPI(apiInstance metadata.MetricAPI, config Config) BackgroundAPI {
requests := make(chan func(metadata.Context) error, config.RequestLimit)
if config.Freshness == 0 {
config.Freshness = config.TimeToLive
}
result := metricMetadataAPI{
metricMetadataAPI: apiInstance,
clock: util.RealClock{},
getAllTagsCache: map[api.MetricKey]*TagSetList{},
freshness: config.Freshness,
timeToLive: config.TimeToLive,
backgroundQueue: requests,
}
if _, ok := apiInstance.(metadata.MetricUpdateAPI); ok {
return &metricUpdateAPI{result}
}
return &result
}
// addBackgroundGetAllTagsRequest adds a job to update the lag list for the given
// metric. Requires the caller hold the lock for the item in the cache.
func (c *metricMetadataAPI) addBackgroundGetAllTagsRequest(item *TagSetList, metricKey api.MetricKey) {
if item == nil {
log.Errorf("Asked to perform a background GetAllTags lookup for %s but missing entry", metricKey)
return
}
c.queueMutex.Lock()
defer c.queueMutex.Unlock()
if cap(c.backgroundQueue) <= len(c.backgroundQueue) {
log.Warningf("Unable to enqueue a background GetAllTags lookup for %s due to a full queue", metricKey)
return
}
if item.enqueued {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already enqueued", metricKey)
return
}
if item.inflight {
log.Infof("Unable to perform a background GetAllTags lookup for %s as one is already in flight", metricKey)
return
}
log.Infof("Enqueuing a background GetAllTags lookup for %s", metricKey)
item.enqueued = true
c.backgroundQueue <- func(context metadata.Context) error {
log.Infof("Executing the background GetAllTags lookup for %s", metricKey)
defer log.Infof("Finished the background GetAllTags lookup for %s", metricKey)
item.Lock()
defer item.Unlock()
item.enqueued = false
defer context.Profiler.Record("CachedMetricMetadataAPI_BackgroundAction_GetAllTags")()
_, err := c.fetchAndUpdateCachedTagSet(item, metricKey, context)
return err
}
}
// GetBackgroundAction is a blocking method that runs one queued cache update.
// It will block until an update is available.
func (c *metricMetadataAPI) GetBackgroundAction() func(metadata.Context) error {
return <-c.backgroundQueue
}
// GetAllMetrics waits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetAllMetrics(context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetAllMetrics(context)
}
// GetMetricsForTag wwaits for a slot to be open, then queries the underlying API.
func (c *metricMetadataAPI) GetMetricsForTag(tagKey, tagValue string, context metadata.Context) ([]api.MetricKey, error) {
return c.metricMetadataAPI.GetMetricsForTag(tagKey, tagValue, context)
}
// CheckHealthy checks if the underlying MetricAPI is healthy
func (c *metricMetadataAPI) CheckHealthy() error {
return c.metricMetadataAPI.CheckHealthy()
}
// fetchAndUpdateCachedTagSet updates the in-memory cache (asusming the update
// is newer than what is in the cache). Requires the caller hold the lock for the
// item in the cache.
func (c *metricMetadataAPI) fetchAndUpdateCachedTagSet(item *TagSetList, metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
if item == nil {
return nil, errors.New("missing cache list entry")
}
item.wg.Add(1)
item.fetchError = nil
item.inflight = true
item.Unlock()
startTime := c.clock.Now()
tagsets, err := c.metricMetadataAPI.GetAllTags(metricKey, context)
item.Lock()
if err != nil {
item.fetchError = err
item.wg.Done()
item.inflight = false
return nil, err
}
// Only update the cache if the update expires later than the current
// entry in the cache
newExpiry := startTime.Add(c.timeToLive)
if item.Expiry.Before(newExpiry) {
item.TagSets = tagsets
item.Expiry = newExpiry
item.Stale = startTime.Add(c.freshness)
} else {
log.Warningf("Asked to update the tag set for %s but new expiry is earlier than current (%s vs %s)",
metricKey, newExpiry.String(), item.Expiry.String())
}
item.wg.Done()
item.inflight = false
return tagsets, nil
}
// GetAllTags uses the cache to serve tag data for the given metric.
// If the cache entry is missing or out of date, it uses the results of a query
// to the underlying API to return to the caller. Even if the cache entry is
// up-to-date, this method may enqueue a background request to the underlying API
// to keep the cache fresh.
func (c *metricMetadataAPI) GetAllTags(metricKey api.MetricKey, context metadata.Context) ([]api.TagSet, error) {
defer context.Profiler.Record("CachedMetricMetadataAPI_GetAllTags")()
// Get the cached result for this metric.
c.getAllTagsCacheMutex.RLock()
item, ok := c.getAllTagsCache[metricKey]
c.getAllTagsCacheMutex.RUnlock()
if !ok {
c.getAllTagsCacheMutex.Lock()
// Now that we have the mutex for getAllTagsCache, make sure another goroutine
// hasn't already updated the cache
item, ok = c.getAllTagsCache[metricKey]
if !ok {
item = &TagSetList{}
c.getAllTagsCache[metricKey] = item
}
c.getAllTagsCacheMutex.Unlock()
}
item.Lock()
if item.Expiry.IsZero() || item.Expiry.Before(c.clock.Now()) {
if item.inflight {
item.Unlock()
item.wg.Wait()
// Make sure we have the lock to re-read
item.Lock()
defer item.Unlock()
// If the request we were waiting on errored, we also errored
return item.Tag | AddMetrics | identifier_name |
util.rs | ` and `path` do not share a common ancestor. `path` and `base` must be
/// either both absolute or both relative; returns `None` if one is relative and
/// the other absolute.
///
/// ```
/// use std::path::Path;
/// use figment::util::diff_paths;
///
/// // Paths must be both relative or both absolute.
/// assert_eq!(diff_paths("/a/b/c", "b/c"), None);
/// assert_eq!(diff_paths("a/b/c", "/b/c"), None);
///
/// // The root/relative root is always a common ancestor.
/// assert_eq!(diff_paths("/a/b/c", "/b/c"), Some("../../a/b/c".into()));
/// assert_eq!(diff_paths("c/a", "b/c/a"), Some("../../../c/a".into()));
///
/// let bar = "/foo/bar";
/// let baz = "/foo/bar/baz";
/// let quux = "/foo/bar/quux";
///
/// assert_eq!(diff_paths(bar, baz), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// assert_eq!(diff_paths(quux, baz), Some("../quux".into()));
/// assert_eq!(diff_paths(baz, quux), Some("../baz".into()));
/// assert_eq!(diff_paths(bar, quux), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// ```
// Copyright 2012-2015 The Rust Project Developers.
// Copyright 2017 The Rust Project Developers.
// Adapted from `pathdiff`, which itself adapted from rustc's path_relative_from.
pub fn diff_paths<P, B>(path: P, base: B) -> Option<PathBuf>
where P: AsRef<Path>, B: AsRef<Path>
{
let (path, base) = (path.as_ref(), base.as_ref());
if path.has_root() != base.has_root() {
return None;
}
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None, | }
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
/// A helper to deserialize `0/false` as `false` and `1/true` as `true`.
///
/// Serde's default deserializer for `bool` only parses the strings `"true"` and
/// `"false"` as the booleans `true` and `false`, respectively. By contract,
/// this function _case-insensitively_ parses both the strings `"true"/"false"`
/// and the integers `1/0` as the booleans `true/false`, respectively.
///
/// # Example
///
/// ```rust
/// use figment::Figment;
///
/// #[derive(serde::Deserialize)]
/// struct Config {
/// #[serde(deserialize_with = "figment::util::bool_from_str_or_int")]
/// cli_colors: bool,
/// }
///
/// let c0: Config = Figment::from(("cli_colors", "true")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "TRUE")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 1)).extract().unwrap();
/// assert_eq!(c0.cli_colors, true);
/// assert_eq!(c1.cli_colors, true);
/// assert_eq!(c2.cli_colors, true);
///
/// let c0: Config = Figment::from(("cli_colors", "false")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "fAlSe")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 0)).extract().unwrap();
/// assert_eq!(c0.cli_colors, false);
/// assert_eq!(c1.cli_colors, false);
/// assert_eq!(c2.cli_colors, false);
/// ```
pub fn bool_from_str_or_int<'de, D: Deserializer<'de>>(de: D) -> Result<bool, D::Error> {
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = bool;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a boolean")
}
fn visit_str<E: de::Error>(self, val: &str) -> Result<bool, E> {
match val {
v if uncased::eq(v, "true") => Ok(true),
v if uncased::eq(v, "false") => Ok(false),
s => Err(E::invalid_value(Unexpected::Str(s), &"true or false"))
}
}
fn visit_u64<E: de::Error>(self, n: u64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Unsigned(n), &"0 or 1"))
}
}
fn visit_i64<E: de::Error>(self, n: i64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Signed(n), &"0 or 1"))
}
}
fn visit_bool<E: de::Error>(self, b: bool) -> Result<bool, E> {
Ok(b)
}
}
de.deserialize_any(Visitor)
}
/// A helper to serialize and deserialize a map as a vector of `(key, value)`
/// pairs.
///
/// ```
/// use figment::{Figment, util::map};
/// use serde::{Serialize, Deserialize};
///
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct Config {
/// #[serde(with = "figment::util::vec_tuple_map")]
/// pairs: Vec<(String, usize)>
/// }
///
/// let map = map!["key" => 1, "value" => 100, "name" => 20];
/// let c: Config = Figment::from(("pairs", map)).extract().unwrap();
/// assert_eq!(c.pairs.len(), 3);
///
/// let mut pairs = c.pairs;
/// pairs.sort_by_key(|(_, v)| *v);
///
/// assert_eq!(pairs[0], ("key".into(), 1));
/// assert_eq!(pairs[1], ("name".into(), 20));
/// assert_eq!(pairs[2], ("value".into(), 100));
/// ```
pub mod vec_tuple_map {
use std::fmt;
use serde::{de, Deserialize, Serialize, Deserializer, Serializer};
/// The serializer half.
pub fn serialize<S, K, V>(vec: &[(K, V)], se: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Serialize, V: Serialize
{
se.collect_map(vec.iter().map(|(ref k, ref v)| (k, v)))
}
/// The deserializer half.
pub fn deserialize<'de, K, V, D>(de: D) -> Result<Vec<(K, V)>, D::Error>
where D: Deserializer<'de>, K: Deserialize<'de>, V: Deserialize<'de>
{
struct Visitor<K, V>(std::marker::PhantomData<Vec<(K, V)>>);
impl<'de, K, V> de::Visitor<'de> for Visitor<K, V>
where K: Deserialize<'de>, V: Deserialize<'de>,
{
type Value = Vec<(K, V)>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Vec<(K, V)>, A::Error>
where A: de::MapAccess<'de>
{
let mut vec = Vec::with_capacity(map.size_hint().unwrap_or(0));
while let Some((k, v)) = map.next_entry()? {
vec.push((k, v));
}
Ok(vec)
}
}
de.deserialize_map(Visitor(std::marker::PhantomData))
}
}
use crate::value::{Value, Dict};
/// Given a key path `key` of the form `a.b.c`, creates nested dictionaries for
/// for every path component delimited by `.` in the path string (3 in `a.b.c`),
/// each a parent of the next, and the leaf mapping to `value` (`a` -> `b` ->
/// `c` -> `value`).
///
/// | (Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir); | random_line_split |
util.rs | ` and `path` do not share a common ancestor. `path` and `base` must be
/// either both absolute or both relative; returns `None` if one is relative and
/// the other absolute.
///
/// ```
/// use std::path::Path;
/// use figment::util::diff_paths;
///
/// // Paths must be both relative or both absolute.
/// assert_eq!(diff_paths("/a/b/c", "b/c"), None);
/// assert_eq!(diff_paths("a/b/c", "/b/c"), None);
///
/// // The root/relative root is always a common ancestor.
/// assert_eq!(diff_paths("/a/b/c", "/b/c"), Some("../../a/b/c".into()));
/// assert_eq!(diff_paths("c/a", "b/c/a"), Some("../../../c/a".into()));
///
/// let bar = "/foo/bar";
/// let baz = "/foo/bar/baz";
/// let quux = "/foo/bar/quux";
///
/// assert_eq!(diff_paths(bar, baz), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// assert_eq!(diff_paths(quux, baz), Some("../quux".into()));
/// assert_eq!(diff_paths(baz, quux), Some("../baz".into()));
/// assert_eq!(diff_paths(bar, quux), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// ```
// Copyright 2012-2015 The Rust Project Developers.
// Copyright 2017 The Rust Project Developers.
// Adapted from `pathdiff`, which itself adapted from rustc's path_relative_from.
pub fn diff_paths<P, B>(path: P, base: B) -> Option<PathBuf>
where P: AsRef<Path>, B: AsRef<Path>
{
let (path, base) = (path.as_ref(), base.as_ref());
if path.has_root() != base.has_root() {
return None;
}
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None,
(Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir);
}
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
/// A helper to deserialize `0/false` as `false` and `1/true` as `true`.
///
/// Serde's default deserializer for `bool` only parses the strings `"true"` and
/// `"false"` as the booleans `true` and `false`, respectively. By contract,
/// this function _case-insensitively_ parses both the strings `"true"/"false"`
/// and the integers `1/0` as the booleans `true/false`, respectively.
///
/// # Example
///
/// ```rust
/// use figment::Figment;
///
/// #[derive(serde::Deserialize)]
/// struct Config {
/// #[serde(deserialize_with = "figment::util::bool_from_str_or_int")]
/// cli_colors: bool,
/// }
///
/// let c0: Config = Figment::from(("cli_colors", "true")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "TRUE")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 1)).extract().unwrap();
/// assert_eq!(c0.cli_colors, true);
/// assert_eq!(c1.cli_colors, true);
/// assert_eq!(c2.cli_colors, true);
///
/// let c0: Config = Figment::from(("cli_colors", "false")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "fAlSe")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 0)).extract().unwrap();
/// assert_eq!(c0.cli_colors, false);
/// assert_eq!(c1.cli_colors, false);
/// assert_eq!(c2.cli_colors, false);
/// ```
pub fn bool_from_str_or_int<'de, D: Deserializer<'de>>(de: D) -> Result<bool, D::Error> {
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = bool;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a boolean")
}
fn visit_str<E: de::Error>(self, val: &str) -> Result<bool, E> |
fn visit_u64<E: de::Error>(self, n: u64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Unsigned(n), &"0 or 1"))
}
}
fn visit_i64<E: de::Error>(self, n: i64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Signed(n), &"0 or 1"))
}
}
fn visit_bool<E: de::Error>(self, b: bool) -> Result<bool, E> {
Ok(b)
}
}
de.deserialize_any(Visitor)
}
/// A helper to serialize and deserialize a map as a vector of `(key, value)`
/// pairs.
///
/// ```
/// use figment::{Figment, util::map};
/// use serde::{Serialize, Deserialize};
///
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct Config {
/// #[serde(with = "figment::util::vec_tuple_map")]
/// pairs: Vec<(String, usize)>
/// }
///
/// let map = map!["key" => 1, "value" => 100, "name" => 20];
/// let c: Config = Figment::from(("pairs", map)).extract().unwrap();
/// assert_eq!(c.pairs.len(), 3);
///
/// let mut pairs = c.pairs;
/// pairs.sort_by_key(|(_, v)| *v);
///
/// assert_eq!(pairs[0], ("key".into(), 1));
/// assert_eq!(pairs[1], ("name".into(), 20));
/// assert_eq!(pairs[2], ("value".into(), 100));
/// ```
pub mod vec_tuple_map {
use std::fmt;
use serde::{de, Deserialize, Serialize, Deserializer, Serializer};
/// The serializer half.
pub fn serialize<S, K, V>(vec: &[(K, V)], se: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Serialize, V: Serialize
{
se.collect_map(vec.iter().map(|(ref k, ref v)| (k, v)))
}
/// The deserializer half.
pub fn deserialize<'de, K, V, D>(de: D) -> Result<Vec<(K, V)>, D::Error>
where D: Deserializer<'de>, K: Deserialize<'de>, V: Deserialize<'de>
{
struct Visitor<K, V>(std::marker::PhantomData<Vec<(K, V)>>);
impl<'de, K, V> de::Visitor<'de> for Visitor<K, V>
where K: Deserialize<'de>, V: Deserialize<'de>,
{
type Value = Vec<(K, V)>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Vec<(K, V)>, A::Error>
where A: de::MapAccess<'de>
{
let mut vec = Vec::with_capacity(map.size_hint().unwrap_or(0));
while let Some((k, v)) = map.next_entry()? {
vec.push((k, v));
}
Ok(vec)
}
}
de.deserialize_map(Visitor(std::marker::PhantomData))
}
}
use crate::value::{Value, Dict};
/// Given a key path `key` of the form `a.b.c`, creates nested dictionaries for
/// for every path component delimited by `.` in the path string (3 in `a.b.c`),
/// each a parent of the next, and the leaf mapping to `value` (`a` -> `b` ->
/// `c` -> `value`).
| {
match val {
v if uncased::eq(v, "true") => Ok(true),
v if uncased::eq(v, "false") => Ok(false),
s => Err(E::invalid_value(Unexpected::Str(s), &"true or false"))
}
} | identifier_body |
util.rs | ` and `path` do not share a common ancestor. `path` and `base` must be
/// either both absolute or both relative; returns `None` if one is relative and
/// the other absolute.
///
/// ```
/// use std::path::Path;
/// use figment::util::diff_paths;
///
/// // Paths must be both relative or both absolute.
/// assert_eq!(diff_paths("/a/b/c", "b/c"), None);
/// assert_eq!(diff_paths("a/b/c", "/b/c"), None);
///
/// // The root/relative root is always a common ancestor.
/// assert_eq!(diff_paths("/a/b/c", "/b/c"), Some("../../a/b/c".into()));
/// assert_eq!(diff_paths("c/a", "b/c/a"), Some("../../../c/a".into()));
///
/// let bar = "/foo/bar";
/// let baz = "/foo/bar/baz";
/// let quux = "/foo/bar/quux";
///
/// assert_eq!(diff_paths(bar, baz), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// assert_eq!(diff_paths(quux, baz), Some("../quux".into()));
/// assert_eq!(diff_paths(baz, quux), Some("../baz".into()));
/// assert_eq!(diff_paths(bar, quux), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// ```
// Copyright 2012-2015 The Rust Project Developers.
// Copyright 2017 The Rust Project Developers.
// Adapted from `pathdiff`, which itself adapted from rustc's path_relative_from.
pub fn diff_paths<P, B>(path: P, base: B) -> Option<PathBuf>
where P: AsRef<Path>, B: AsRef<Path>
{
let (path, base) = (path.as_ref(), base.as_ref());
if path.has_root() != base.has_root() {
return None;
}
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None,
(Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir);
}
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
/// A helper to deserialize `0/false` as `false` and `1/true` as `true`.
///
/// Serde's default deserializer for `bool` only parses the strings `"true"` and
/// `"false"` as the booleans `true` and `false`, respectively. By contract,
/// this function _case-insensitively_ parses both the strings `"true"/"false"`
/// and the integers `1/0` as the booleans `true/false`, respectively.
///
/// # Example
///
/// ```rust
/// use figment::Figment;
///
/// #[derive(serde::Deserialize)]
/// struct Config {
/// #[serde(deserialize_with = "figment::util::bool_from_str_or_int")]
/// cli_colors: bool,
/// }
///
/// let c0: Config = Figment::from(("cli_colors", "true")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "TRUE")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 1)).extract().unwrap();
/// assert_eq!(c0.cli_colors, true);
/// assert_eq!(c1.cli_colors, true);
/// assert_eq!(c2.cli_colors, true);
///
/// let c0: Config = Figment::from(("cli_colors", "false")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "fAlSe")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 0)).extract().unwrap();
/// assert_eq!(c0.cli_colors, false);
/// assert_eq!(c1.cli_colors, false);
/// assert_eq!(c2.cli_colors, false);
/// ```
pub fn | <'de, D: Deserializer<'de>>(de: D) -> Result<bool, D::Error> {
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = bool;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a boolean")
}
fn visit_str<E: de::Error>(self, val: &str) -> Result<bool, E> {
match val {
v if uncased::eq(v, "true") => Ok(true),
v if uncased::eq(v, "false") => Ok(false),
s => Err(E::invalid_value(Unexpected::Str(s), &"true or false"))
}
}
fn visit_u64<E: de::Error>(self, n: u64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Unsigned(n), &"0 or 1"))
}
}
fn visit_i64<E: de::Error>(self, n: i64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n != 0),
n => Err(E::invalid_value(Unexpected::Signed(n), &"0 or 1"))
}
}
fn visit_bool<E: de::Error>(self, b: bool) -> Result<bool, E> {
Ok(b)
}
}
de.deserialize_any(Visitor)
}
/// A helper to serialize and deserialize a map as a vector of `(key, value)`
/// pairs.
///
/// ```
/// use figment::{Figment, util::map};
/// use serde::{Serialize, Deserialize};
///
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct Config {
/// #[serde(with = "figment::util::vec_tuple_map")]
/// pairs: Vec<(String, usize)>
/// }
///
/// let map = map!["key" => 1, "value" => 100, "name" => 20];
/// let c: Config = Figment::from(("pairs", map)).extract().unwrap();
/// assert_eq!(c.pairs.len(), 3);
///
/// let mut pairs = c.pairs;
/// pairs.sort_by_key(|(_, v)| *v);
///
/// assert_eq!(pairs[0], ("key".into(), 1));
/// assert_eq!(pairs[1], ("name".into(), 20));
/// assert_eq!(pairs[2], ("value".into(), 100));
/// ```
pub mod vec_tuple_map {
use std::fmt;
use serde::{de, Deserialize, Serialize, Deserializer, Serializer};
/// The serializer half.
pub fn serialize<S, K, V>(vec: &[(K, V)], se: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Serialize, V: Serialize
{
se.collect_map(vec.iter().map(|(ref k, ref v)| (k, v)))
}
/// The deserializer half.
pub fn deserialize<'de, K, V, D>(de: D) -> Result<Vec<(K, V)>, D::Error>
where D: Deserializer<'de>, K: Deserialize<'de>, V: Deserialize<'de>
{
struct Visitor<K, V>(std::marker::PhantomData<Vec<(K, V)>>);
impl<'de, K, V> de::Visitor<'de> for Visitor<K, V>
where K: Deserialize<'de>, V: Deserialize<'de>,
{
type Value = Vec<(K, V)>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Vec<(K, V)>, A::Error>
where A: de::MapAccess<'de>
{
let mut vec = Vec::with_capacity(map.size_hint().unwrap_or(0));
while let Some((k, v)) = map.next_entry()? {
vec.push((k, v));
}
Ok(vec)
}
}
de.deserialize_map(Visitor(std::marker::PhantomData))
}
}
use crate::value::{Value, Dict};
/// Given a key path `key` of the form `a.b.c`, creates nested dictionaries for
/// for every path component delimited by `.` in the path string (3 in `a.b.c`),
/// each a parent of the next, and the leaf mapping to `value` (`a` -> `b` ->
/// `c` -> `value`).
///
| bool_from_str_or_int | identifier_name |
main.rs | add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::FromUtf8Error> {
String::from_utf8(self.to_vec())
}
}
#[test]
fn test_pieces() {
let t = Text::new();
assert_eq!(t.pieces().collect::<Vec<_>>(), vec![]);
}
#[cfg(test)]
mod tests {
mod span {
use super::super::*;
#[test]
fn basics() {
let s = Span::new(1, 1);
assert_eq!(s.len(), 0);
assert!(s.is_empty());
let s2 = Span::new(3, 7);
assert!(s2.len() == 4);
}
#[test]
fn split() {
let s = Span::new(3, 7);
assert_eq!(s.split(0), None);
assert_eq!(s.split(4), None);
assert_eq!(s.split(3), Some((Span { off1: 3, off2: 6 }, Span { off1: 6, off2: 7 })));
}
}
mod append_only_buffer {
use super::super::*;
#[test]
fn basics() {
let mut b = AppendOnlyBuffer::new();
let bytes = "Hello World".as_bytes();
let sp = b.append(bytes);
assert_eq!(b.get(sp), bytes);
let bytes2 = "Just testing".as_bytes();
let sp2 = b.append(bytes2);
assert_eq!(b.get(sp), bytes);
assert_eq!(b.get(sp2), bytes2);
}
}
mod text {
use super::super::*;
#[test]
fn insert_beginning() {
let mut t = Text::new();
assert_eq!(t.len(), 0);
t.insert(0, "World".as_bytes());
assert_eq!(t.len(), 5);
assert_eq!(t.to_utf8_string().unwrap(), "World");
t.insert(0, "Hello ".as_bytes());
assert_eq!(t.len(), 11);
assert_eq!(t.to_utf8_string().unwrap(), "Hello World");
t.insert(0, "...".as_bytes());
assert_eq!(t.len(), 14);
assert_eq!(t.to_utf8_string().unwrap(), "...Hello World");
}
#[test]
fn append() {
let mut t = Text::new();
t.insert(0, "Hello".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello");
t.insert(5, " Bene".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello Bene");
}
#[test]
fn insert_middle() {
let mut t = Text::new();
t.insert(0, "1234".as_bytes());
t.insert(2, "x".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12x34");
t.insert(3, "yz".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12xyz34");
}
#[test]
fn delete_all1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_all2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_part1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn delete_part2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn bytes1() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), bytes);
}
#[test]
fn bytes2() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
let bytes2 = vec![3, 4];
t.insert(0, &bytes2);
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
}
}
/// Set of possible commands
#[derive(Debug, Clone)]
pub enum Command {
Insert(String),
}
use parser_combinators::primitives::ParseError;
use parser_combinators::{spaces, between, many, char, satisfy, Parser, ParserExt};
impl Command {
pub fn parse(s: &str) -> Result<(Command, &str), ParseError> | {
let literal = between(char('/'), char('/'), many(satisfy(|c| c != '/')).map(Command::Insert));
let spaces = spaces();
spaces.with(char('i').with(literal)).parse(s)
} | identifier_body | |
main.rs |
}
}
impl AppendOnlyBuffer {
/// Constructs a new, empty AppendOnlyBuffer.
pub fn new() -> AppendOnlyBuffer {
AppendOnlyBuffer {
buf: Vec::with_capacity(4096)
}
}
/// Append a slice of bytes.
pub fn append(&mut self, bytes: &[u8]) -> Span {
let off1 = self.buf.len() as u32;
self.buf.push_all(bytes);
Span::new(off1, self.buf.len() as u32)
}
pub fn get(&self, s: Span) -> &[u8] {
&self.buf[s.off1 as usize .. s.off2 as usize]
}
pub fn get_byte(&self, p: u32) -> u8 {
self.buf[p as usize]
}
}
/// We represent pieces by their index in the vector that we use to allocate
/// them. That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len());
}
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn link(&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some(( | {
Some((Span::new(self.off1, self.off1+n), Span::new(self.off1+n, self.off2)))
} | conditional_block | |
main.rs | . That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0); | }
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn link(&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::From | l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len()); | random_line_split |
main.rs | That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p != SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len());
}
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn | (&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::From | link | identifier_name |
math.rs | .clone() == other.n.clone() * self.d.clone();
}
}
impl PartialOrd for Rat {
fn partial_cmp(&self, other : &Rat) -> Option<std::cmp::Ordering> {
return (self.n.clone() * other.d.clone()).partial_cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Eq for Rat {
}
impl Ord for Rat {
fn cmp(&self, other: &Rat) -> std::cmp::Ordering {
return (self.n.clone() * other.d.clone()).cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Hash for Rat {
fn hash<H>(&self, state : &mut H) where H: std::hash::Hasher {
let r = self.clone().simplify();
r.n.hash(state);
r.d.hash(state);
}
}
impl fmt::Display for Rat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.n, self.d)
}
}
pub fn gcd(a : BigInt, b : BigInt) -> BigInt {
let mut x = a;
let mut y = b;
while y != Zero::zero() {
let temp = y.clone();
y = x % y;
x = temp;
}
return x;
}
impl Rat {
pub fn new(n : BigInt, d : BigInt) -> Rat {
let r = Rat { n, d };
return r.simplify();
}
pub fn from_usize(n : usize) -> Rat {
return Rat::new(BigInt::from(n), One::one());
}
pub fn simplify(mut self) -> Rat {
let g = gcd(self.n.clone(), self.d.clone());
self.n /= g.clone();
self.d /= g;
if self.d < Zero::zero() && self.n < Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
}
return self;
}
pub fn pow(mut self, a : &BigInt) -> Rat {
if a > &Zero::zero() {
let mut n : BigInt = One::one();
let orig_n = self.n.clone();
let orig_d = self.d.clone();
while &n < a {
self.n *= orig_n.clone();
self.d *= orig_d.clone();
n += 1;
}
return self;
} else if a < &Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
return self.pow(&-a);
} else {
return Rat { n: One::one(), d: One::one() };
}
}
}
impl ops::Add<BigInt> for Rat {
type Output = Rat;
fn add(self, b : BigInt) -> Rat {
return Rat::new(self.n + b * self.d.clone(), self.d);
}
}
impl ops::Sub<BigInt> for Rat {
type Output = Rat;
fn sub(self, b : BigInt) -> Rat {
return Rat::new(self.n - b * self.d.clone(), self.d);
}
}
impl ops::Mul<BigInt> for Rat {
type Output = Rat;
fn mul(self, b : BigInt) -> Rat {
return Rat::new(self.n * b, self.d);
}
}
impl ops::Div<BigInt> for Rat {
type Output = Rat;
fn div(self, b : BigInt) -> Rat {
return Rat::new(self.n, self.d * b);
}
}
impl ops::Add<Rat> for Rat {
type Output = Rat;
fn add(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() + b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Sub<Rat> for Rat {
type Output = Rat;
fn sub(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() - b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Mul<Rat> for Rat {
type Output = Rat;
fn mul(self, b : Rat) -> Rat {
return Rat::new(self.n * b.n, self.d * b.d);
}
}
impl ops::Div<Rat> for Rat {
type Output = Rat;
fn div(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d, self.d * b.n);
}
}
impl ops::MulAssign<Rat> for Rat {
fn mul_assign(&mut self, b : Rat) {
self.n *= b.n;
self.d *= b.d;
}
}
impl ops::Neg for Rat {
type Output = Rat;
fn neg(self) -> Rat {
return Rat { n: -self.n, d: self.d };
}
}
pub struct Integers;
impl Integers {
pub fn new() -> Integers {
return Integers;
}
}
pub fn int_nth(n : usize) -> BigInt {
if n % 2 == 0 {
return BigInt::from(n / 2);
} else {
return -BigInt::from((n + 1) / 2);
}
}
impl Sequence for Integers {
// Enumerate the integers as 0,-1,1,-2,2,...
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(int_nth(n))); | }
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) =>
if n < Zero::zero() {
match to_usize(&-n) {
Ok(m) => Some(2*m - 1),
_ => None
}
} else {
match to_usize(&n) {
Ok(m) => Some(2*m),
_ => None
}
}
_ => None
}
}
}
pub fn prime_factor(n_in : BigInt, ps : &mut PrimeSeq) -> std::collections::hash_map::IntoIter<BigInt, BigInt> {
let mut n = n_in;
let mut powers = HashMap::new();
let mut m = 0;
loop {
let p = ps.at(m);
if p.clone()*p.clone() > n {
break;
}
if n.clone() % p.clone() == Zero::zero() {
*powers.entry(p.clone()).or_insert(Zero::zero()) += 1;
n /= p;
m = 0;
} else {
m += 1;
}
}
*powers.entry(n).or_insert(Zero::zero()) += 1;
return powers.into_iter();
}
pub struct Rationals {
ps : PrimeSeq
}
impl Rationals {
pub fn new() -> Rationals {
return Rationals { ps : PrimeSeq::new() };
}
fn calc_nth(&mut self, n : usize) -> Result<Rat, String> {
let mut res = Rat::from_usize(1);
for (p,a) in prime_factor(BigInt::from(n), &mut self.ps) {
let b = int_nth(to_usize(&a)?);
let r = Rat::new(p.clone(), One::one()).pow(&b);
// println!("{}: {}^({} => {}) = {}", n, p, a, b, r);
res *= r;
}
return Ok(res);
}
}
impl Sequence for Rationals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
if n == 0 {
return Ok(AST::Rat(Rat::from_usize(0)));
}
if n % 2 == 0 {
return Ok(AST::Rat(self.calc_nth(n / 2)?));
} else {
return Ok(AST::Rat(-self.calc_nth((n + 1) / 2)?));
}
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let (mut n,d) = match v {
AST::Int(n) => (n, One::one()),
AST::Rat(Rat{n,d}) => (n,d),
_ => return None
};
let neg = n < Zero::zero();
if neg {
n = -n;
}
let mut powers : HashMap<BigInt, BigInt> = HashMap::new();
for (p,a) in prime_factor(n, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) += a;
}
for (p,a) in prime_factor(d, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) -= a;
}
let mut res = 1;
for (p,a) in powers.into_iter() {
res *= Pow::pow(to_usize(&p).ok()?, Integers.index_of(AST::Int(a))?);
}
if neg {
return Some(2*res - 1);
} else {
return Some(2*res);
}
}
}
pub struct PrimeSeq {
max : usize,
primes : Vec<BigInt>,
| }
fn increasing(&self) -> bool {
return false; | random_line_split |
math.rs | () == other.n.clone() * self.d.clone();
}
}
impl PartialOrd for Rat {
fn partial_cmp(&self, other : &Rat) -> Option<std::cmp::Ordering> {
return (self.n.clone() * other.d.clone()).partial_cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Eq for Rat {
}
impl Ord for Rat {
fn cmp(&self, other: &Rat) -> std::cmp::Ordering {
return (self.n.clone() * other.d.clone()).cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Hash for Rat {
fn hash<H>(&self, state : &mut H) where H: std::hash::Hasher {
let r = self.clone().simplify();
r.n.hash(state);
r.d.hash(state);
}
}
impl fmt::Display for Rat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.n, self.d)
}
}
pub fn gcd(a : BigInt, b : BigInt) -> BigInt {
let mut x = a;
let mut y = b;
while y != Zero::zero() {
let temp = y.clone();
y = x % y;
x = temp;
}
return x;
}
impl Rat {
pub fn new(n : BigInt, d : BigInt) -> Rat {
let r = Rat { n, d };
return r.simplify();
}
pub fn from_usize(n : usize) -> Rat {
return Rat::new(BigInt::from(n), One::one());
}
pub fn simplify(mut self) -> Rat {
let g = gcd(self.n.clone(), self.d.clone());
self.n /= g.clone();
self.d /= g;
if self.d < Zero::zero() && self.n < Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
}
return self;
}
pub fn pow(mut self, a : &BigInt) -> Rat {
if a > &Zero::zero() {
let mut n : BigInt = One::one();
let orig_n = self.n.clone();
let orig_d = self.d.clone();
while &n < a {
self.n *= orig_n.clone();
self.d *= orig_d.clone();
n += 1;
}
return self;
} else if a < &Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
return self.pow(&-a);
} else {
return Rat { n: One::one(), d: One::one() };
}
}
}
impl ops::Add<BigInt> for Rat {
type Output = Rat;
fn add(self, b : BigInt) -> Rat {
return Rat::new(self.n + b * self.d.clone(), self.d);
}
}
impl ops::Sub<BigInt> for Rat {
type Output = Rat;
fn sub(self, b : BigInt) -> Rat {
return Rat::new(self.n - b * self.d.clone(), self.d);
}
}
impl ops::Mul<BigInt> for Rat {
type Output = Rat;
fn mul(self, b : BigInt) -> Rat {
return Rat::new(self.n * b, self.d);
}
}
impl ops::Div<BigInt> for Rat {
type Output = Rat;
fn div(self, b : BigInt) -> Rat {
return Rat::new(self.n, self.d * b);
}
}
impl ops::Add<Rat> for Rat {
type Output = Rat;
fn add(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() + b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Sub<Rat> for Rat {
type Output = Rat;
fn sub(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() - b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Mul<Rat> for Rat {
type Output = Rat;
fn mul(self, b : Rat) -> Rat {
return Rat::new(self.n * b.n, self.d * b.d);
}
}
impl ops::Div<Rat> for Rat {
type Output = Rat;
fn div(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d, self.d * b.n);
}
}
impl ops::MulAssign<Rat> for Rat {
fn mul_assign(&mut self, b : Rat) {
self.n *= b.n;
self.d *= b.d;
}
}
impl ops::Neg for Rat {
type Output = Rat;
fn neg(self) -> Rat {
return Rat { n: -self.n, d: self.d };
}
}
pub struct Integers;
impl Integers {
pub fn new() -> Integers {
return Integers;
}
}
pub fn int_nth(n : usize) -> BigInt {
if n % 2 == 0 {
return BigInt::from(n / 2);
} else {
return -BigInt::from((n + 1) / 2);
}
}
impl Sequence for Integers {
// Enumerate the integers as 0,-1,1,-2,2,...
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(int_nth(n)));
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) =>
if n < Zero::zero() {
match to_usize(&-n) {
Ok(m) => Some(2*m - 1),
_ => None
}
} else {
match to_usize(&n) {
Ok(m) => Some(2*m),
_ => None
}
}
_ => None
}
}
}
pub fn prime_factor(n_in : BigInt, ps : &mut PrimeSeq) -> std::collections::hash_map::IntoIter<BigInt, BigInt> {
let mut n = n_in;
let mut powers = HashMap::new();
let mut m = 0;
loop {
let p = ps.at(m);
if p.clone()*p.clone() > n {
break;
}
if n.clone() % p.clone() == Zero::zero() {
*powers.entry(p.clone()).or_insert(Zero::zero()) += 1;
n /= p;
m = 0;
} else {
m += 1;
}
}
*powers.entry(n).or_insert(Zero::zero()) += 1;
return powers.into_iter();
}
pub struct Rationals {
ps : PrimeSeq
}
impl Rationals {
pub fn new() -> Rationals |
fn calc_nth(&mut self, n : usize) -> Result<Rat, String> {
let mut res = Rat::from_usize(1);
for (p,a) in prime_factor(BigInt::from(n), &mut self.ps) {
let b = int_nth(to_usize(&a)?);
let r = Rat::new(p.clone(), One::one()).pow(&b);
// println!("{}: {}^({} => {}) = {}", n, p, a, b, r);
res *= r;
}
return Ok(res);
}
}
impl Sequence for Rationals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
if n == 0 {
return Ok(AST::Rat(Rat::from_usize(0)));
}
if n % 2 == 0 {
return Ok(AST::Rat(self.calc_nth(n / 2)?));
} else {
return Ok(AST::Rat(-self.calc_nth((n + 1) / 2)?));
}
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let (mut n,d) = match v {
AST::Int(n) => (n, One::one()),
AST::Rat(Rat{n,d}) => (n,d),
_ => return None
};
let neg = n < Zero::zero();
if neg {
n = -n;
}
let mut powers : HashMap<BigInt, BigInt> = HashMap::new();
for (p,a) in prime_factor(n, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) += a;
}
for (p,a) in prime_factor(d, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) -= a;
}
let mut res = 1;
for (p,a) in powers.into_iter() {
res *= Pow::pow(to_usize(&p).ok()?, Integers.index_of(AST::Int(a))?);
}
if neg {
return Some(2*res - 1);
} else {
return Some(2*res);
}
}
}
pub struct PrimeSeq {
max : usize,
primes : Vec<BigInt>,
| {
return Rationals { ps : PrimeSeq::new() };
} | identifier_body |
math.rs | () == other.n.clone() * self.d.clone();
}
}
impl PartialOrd for Rat {
fn partial_cmp(&self, other : &Rat) -> Option<std::cmp::Ordering> {
return (self.n.clone() * other.d.clone()).partial_cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Eq for Rat {
}
impl Ord for Rat {
fn cmp(&self, other: &Rat) -> std::cmp::Ordering {
return (self.n.clone() * other.d.clone()).cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Hash for Rat {
fn hash<H>(&self, state : &mut H) where H: std::hash::Hasher {
let r = self.clone().simplify();
r.n.hash(state);
r.d.hash(state);
}
}
impl fmt::Display for Rat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.n, self.d)
}
}
pub fn gcd(a : BigInt, b : BigInt) -> BigInt {
let mut x = a;
let mut y = b;
while y != Zero::zero() {
let temp = y.clone();
y = x % y;
x = temp;
}
return x;
}
impl Rat {
pub fn new(n : BigInt, d : BigInt) -> Rat {
let r = Rat { n, d };
return r.simplify();
}
pub fn from_usize(n : usize) -> Rat {
return Rat::new(BigInt::from(n), One::one());
}
pub fn simplify(mut self) -> Rat {
let g = gcd(self.n.clone(), self.d.clone());
self.n /= g.clone();
self.d /= g;
if self.d < Zero::zero() && self.n < Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
}
return self;
}
pub fn pow(mut self, a : &BigInt) -> Rat {
if a > &Zero::zero() {
let mut n : BigInt = One::one();
let orig_n = self.n.clone();
let orig_d = self.d.clone();
while &n < a {
self.n *= orig_n.clone();
self.d *= orig_d.clone();
n += 1;
}
return self;
} else if a < &Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
return self.pow(&-a);
} else {
return Rat { n: One::one(), d: One::one() };
}
}
}
impl ops::Add<BigInt> for Rat {
type Output = Rat;
fn add(self, b : BigInt) -> Rat {
return Rat::new(self.n + b * self.d.clone(), self.d);
}
}
impl ops::Sub<BigInt> for Rat {
type Output = Rat;
fn sub(self, b : BigInt) -> Rat {
return Rat::new(self.n - b * self.d.clone(), self.d);
}
}
impl ops::Mul<BigInt> for Rat {
type Output = Rat;
fn mul(self, b : BigInt) -> Rat {
return Rat::new(self.n * b, self.d);
}
}
impl ops::Div<BigInt> for Rat {
type Output = Rat;
fn div(self, b : BigInt) -> Rat {
return Rat::new(self.n, self.d * b);
}
}
impl ops::Add<Rat> for Rat {
type Output = Rat;
fn add(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() + b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Sub<Rat> for Rat {
type Output = Rat;
fn sub(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() - b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Mul<Rat> for Rat {
type Output = Rat;
fn mul(self, b : Rat) -> Rat {
return Rat::new(self.n * b.n, self.d * b.d);
}
}
impl ops::Div<Rat> for Rat {
type Output = Rat;
fn div(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d, self.d * b.n);
}
}
impl ops::MulAssign<Rat> for Rat {
fn mul_assign(&mut self, b : Rat) {
self.n *= b.n;
self.d *= b.d;
}
}
impl ops::Neg for Rat {
type Output = Rat;
fn neg(self) -> Rat {
return Rat { n: -self.n, d: self.d };
}
}
pub struct Integers;
impl Integers {
pub fn new() -> Integers {
return Integers;
}
}
pub fn int_nth(n : usize) -> BigInt {
if n % 2 == 0 {
return BigInt::from(n / 2);
} else {
return -BigInt::from((n + 1) / 2);
}
}
impl Sequence for Integers {
// Enumerate the integers as 0,-1,1,-2,2,...
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(int_nth(n)));
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) =>
if n < Zero::zero() {
match to_usize(&-n) {
Ok(m) => Some(2*m - 1),
_ => None
}
} else {
match to_usize(&n) {
Ok(m) => Some(2*m),
_ => None
}
}
_ => None
}
}
}
pub fn prime_factor(n_in : BigInt, ps : &mut PrimeSeq) -> std::collections::hash_map::IntoIter<BigInt, BigInt> {
let mut n = n_in;
let mut powers = HashMap::new();
let mut m = 0;
loop {
let p = ps.at(m);
if p.clone()*p.clone() > n {
break;
}
if n.clone() % p.clone() == Zero::zero() {
*powers.entry(p.clone()).or_insert(Zero::zero()) += 1;
n /= p;
m = 0;
} else {
m += 1;
}
}
*powers.entry(n).or_insert(Zero::zero()) += 1;
return powers.into_iter();
}
pub struct Rationals {
ps : PrimeSeq
}
impl Rationals {
pub fn new() -> Rationals {
return Rationals { ps : PrimeSeq::new() };
}
fn calc_nth(&mut self, n : usize) -> Result<Rat, String> {
let mut res = Rat::from_usize(1);
for (p,a) in prime_factor(BigInt::from(n), &mut self.ps) {
let b = int_nth(to_usize(&a)?);
let r = Rat::new(p.clone(), One::one()).pow(&b);
// println!("{}: {}^({} => {}) = {}", n, p, a, b, r);
res *= r;
}
return Ok(res);
}
}
impl Sequence for Rationals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
if n == 0 {
return Ok(AST::Rat(Rat::from_usize(0)));
}
if n % 2 == 0 {
return Ok(AST::Rat(self.calc_nth(n / 2)?));
} else {
return Ok(AST::Rat(-self.calc_nth((n + 1) / 2)?));
}
}
fn | (&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let (mut n,d) = match v {
AST::Int(n) => (n, One::one()),
AST::Rat(Rat{n,d}) => (n,d),
_ => return None
};
let neg = n < Zero::zero();
if neg {
n = -n;
}
let mut powers : HashMap<BigInt, BigInt> = HashMap::new();
for (p,a) in prime_factor(n, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) += a;
}
for (p,a) in prime_factor(d, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) -= a;
}
let mut res = 1;
for (p,a) in powers.into_iter() {
res *= Pow::pow(to_usize(&p).ok()?, Integers.index_of(AST::Int(a))?);
}
if neg {
return Some(2*res - 1);
} else {
return Some(2*res);
}
}
}
pub struct PrimeSeq {
max : usize,
primes : Vec<BigInt>,
| increasing | identifier_name |
rtorrent.py | FailedToExecuteException
from ..scgitransport import SCGITransport
from ..torrent import TorrentData, TorrentFile, TorrentState
from ..utils import (
calculate_minimum_expected_data,
has_minimum_expected_data,
map_existing_files,
)
logger = logging.getLogger(__name__)
def create_proxy(url):
parsed = urlsplit(url)
proto = url.split(":")[0].lower()
if proto == "scgi":
|
else:
logger.debug(f"Creating Normal XMLRPC Proxy with url {url}")
return ServerProxy(url)
def bitfield_to_string(bitfield):
"""
Converts a list of booleans into a bitfield
"""
retval = bytearray((len(bitfield) + 7) // 8)
for piece, bit in enumerate(bitfield):
if bit:
retval[piece // 8] |= 1 << (7 - piece % 8)
return bytes(retval)
class RTorrentClient(BaseClient):
identifier = "rtorrent"
display_name = "rtorrent"
_methods = None
def __init__(self, url, session_path=None, torrent_temp_path=None):
self.url = url
self.proxy = create_proxy(url)
self.session_path = session_path and Path(session_path)
self.torrent_temp_path = torrent_temp_path and Path(torrent_temp_path)
def _fetch_list_result(self, view):
result = []
try:
torrents = self.proxy.d.multicall2(
"",
view,
"d.hash=",
"d.name=",
"d.is_active=",
"d.message=",
"d.size_bytes=",
"d.completed_bytes=",
"d.up.total=",
"d.up.rate=",
"d.down.rate=",
"d.timestamp.finished=",
"t.multicall=,t.url=",
"d.custom1=",
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
for torrent in torrents:
if torrent[3]:
state = TorrentState.ERROR
elif torrent[2] == 0:
state = TorrentState.STOPPED
else:
state = TorrentState.ACTIVE
progress = (torrent[5] / torrent[4]) * 100
if torrent[10]:
tracker = ".".join(torrent[10][0][0].split("/")[2].rsplit(".", 2)[1:])
else:
tracker = "None"
result.append(
TorrentData(
torrent[0].lower(),
torrent[1],
torrent[4],
state,
progress,
torrent[6],
datetime.utcfromtimestamp(torrent[9]).astimezone(pytz.UTC),
tracker,
torrent[7],
torrent[8],
torrent[11],
)
)
return result
def get_methods(self):
if self._methods is None:
self._methods = self.proxy.system.listMethods()
return self._methods
def list(self):
return self._fetch_list_result("main")
def list_active(self):
try:
if "spreadsheet_active" not in self.proxy.view.list():
self.proxy.view.add("", "spreadsheet_active")
self.proxy.view.filter(
"", "spreadsheet_active", "or={d.up.rate=,d.down.rate=}"
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
return self._fetch_list_result("spreadsheet_active")
def start(self, infohash):
try:
self.proxy.d.start(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def stop(self, infohash):
try:
self.proxy.d.stop(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def test_connection(self):
try:
return self.proxy.system.pid() is not None
except (XMLRPCError, ConnectionError, OSError, ExpatError):
return False
def add(
self,
torrent,
destination_path,
fast_resume=False,
add_name_to_folder=True,
minimum_expected_data="none",
stopped=False,
):
current_expected_data = calculate_minimum_expected_data(
torrent, destination_path, add_name_to_folder
)
if not has_minimum_expected_data(minimum_expected_data, current_expected_data):
raise FailedToExecuteException(
f"Minimum expected data not reached, wanted {minimum_expected_data} actual {current_expected_data}"
)
destination_path = destination_path.resolve()
if fast_resume:
logger.info("Adding fast resume data")
psize = torrent[b"info"][b"piece length"]
pieces = len(torrent[b"info"][b"pieces"]) // 20
bitfield = [True] * pieces
torrent[b"libtorrent_resume"] = {b"files": []}
files = map_existing_files(torrent, destination_path)
current_position = 0
for fp, f, size, exists in files:
logger.debug(f"Handling file {fp!r}")
result = {b"priority": 1, b"completed": int(exists)}
if exists:
result[b"mtime"] = int(fp.stat().st_mtime)
torrent[b"libtorrent_resume"][b"files"].append(result)
last_position = current_position + size
first_piece = current_position // psize
last_piece = (last_position + psize - 1) // psize
for piece in range(first_piece, last_piece):
logger.debug(f"Setting piece {piece} to {exists}")
bitfield[piece] *= exists
current_position = last_position
if all(bitfield):
logger.info("This torrent is complete, setting bitfield to chunk count")
torrent[b"libtorrent_resume"][
b"bitfield"
] = pieces # rtorrent wants the number of pieces when torrent is complete
else:
logger.info("This torrent is incomplete, setting bitfield")
torrent[b"libtorrent_resume"][b"bitfield"] = bitfield_to_string(
bitfield
)
encoded_torrent = bencode(torrent)
cmd = [encoded_torrent]
if add_name_to_folder:
cmd.append(f'd.directory.set="{destination_path!s}"')
else:
cmd.append(f'd.directory_base.set="{destination_path!s}"')
logger.info(f"Sending to rtorrent: {cmd!r}")
try: # TODO: use torrent_temp_path if payload is too big
if stopped:
self.proxy.load.raw("", *cmd)
else:
self.proxy.load.raw_start("", *cmd)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def remove(self, infohash):
try:
self.proxy.d.erase(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def retrieve_torrentfile(self, infohash):
if not self.session_path:
raise FailedToExecuteException("Session path is not configured")
torrent_path = self.session_path / f"{infohash.upper()}.torrent"
if not torrent_path.is_file():
raise FailedToExecuteException("Torrent file does not exist")
return torrent_path.read_bytes()
def get_download_path(self, infohash):
try:
return Path(self.proxy.d.directory(infohash))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve download path")
def get_files(self, infohash):
result = []
try:
files = self.proxy.f.multicall(
infohash,
"",
"f.path=",
"f.size_bytes=",
"f.completed_chunks=",
"f.size_chunks=",
)
for f in files:
path, size, completed_chunks, size_chunks = f
if completed_chunks > size_chunks:
completed_chunks = size_chunks
if size_chunks == 0:
progress = 0.0
else:
progress = (completed_chunks / size_chunks) * 100
result.append(TorrentFile(path, size, progress))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve files")
return result
def serialize_configuration(self):
url = f"{self.identifier}+{self.url}"
query = {}
if self.session_path:
query["session_path"] = str(self.session_path)
if query:
url += f"?{urlencode(query)}"
return url
@classmethod
def auto_configure(cls, path="~/.rtorrent.rc"):
# Does not work with latest rtorrent config
config_path = Path(path).expanduser()
if not config_path.is_file():
raise FailedToExecuteException("Unable to find config file")
try:
config_data = config | if parsed.netloc:
url = f"http://{parsed.netloc}"
logger.debug(f"Creating SCGI XMLRPC Proxy with url {url}")
return ServerProxy(url, transport=SCGITransport())
else:
path = parsed.path
logger.debug(f"Creating SCGI XMLRPC Socket Proxy with socket file {path}")
return ServerProxy("http://1", transport=SCGITransport(socket_path=path)) | conditional_block |
rtorrent.py | FailedToExecuteException
from ..scgitransport import SCGITransport
from ..torrent import TorrentData, TorrentFile, TorrentState
from ..utils import (
calculate_minimum_expected_data,
has_minimum_expected_data,
map_existing_files,
)
logger = logging.getLogger(__name__)
def create_proxy(url):
parsed = urlsplit(url)
proto = url.split(":")[0].lower()
if proto == "scgi":
if parsed.netloc:
url = f"http://{parsed.netloc}"
logger.debug(f"Creating SCGI XMLRPC Proxy with url {url}")
return ServerProxy(url, transport=SCGITransport())
else:
path = parsed.path
logger.debug(f"Creating SCGI XMLRPC Socket Proxy with socket file {path}")
return ServerProxy("http://1", transport=SCGITransport(socket_path=path))
else:
logger.debug(f"Creating Normal XMLRPC Proxy with url {url}")
return ServerProxy(url)
def bitfield_to_string(bitfield):
"""
Converts a list of booleans into a bitfield
"""
retval = bytearray((len(bitfield) + 7) // 8)
for piece, bit in enumerate(bitfield):
if bit:
retval[piece // 8] |= 1 << (7 - piece % 8)
return bytes(retval)
class RTorrentClient(BaseClient):
identifier = "rtorrent"
display_name = "rtorrent"
_methods = None
def __init__(self, url, session_path=None, torrent_temp_path=None):
self.url = url
self.proxy = create_proxy(url)
self.session_path = session_path and Path(session_path)
self.torrent_temp_path = torrent_temp_path and Path(torrent_temp_path)
def _fetch_list_result(self, view):
result = []
try:
torrents = self.proxy.d.multicall2(
"",
view,
"d.hash=",
"d.name=",
"d.is_active=",
"d.message=",
"d.size_bytes=",
"d.completed_bytes=",
"d.up.total=",
"d.up.rate=",
"d.down.rate=",
"d.timestamp.finished=",
"t.multicall=,t.url=",
"d.custom1=",
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
for torrent in torrents:
if torrent[3]:
state = TorrentState.ERROR
elif torrent[2] == 0:
state = TorrentState.STOPPED
else:
state = TorrentState.ACTIVE
progress = (torrent[5] / torrent[4]) * 100
if torrent[10]:
tracker = ".".join(torrent[10][0][0].split("/")[2].rsplit(".", 2)[1:])
else:
tracker = "None"
result.append(
TorrentData(
torrent[0].lower(),
torrent[1],
torrent[4],
state,
progress,
torrent[6],
datetime.utcfromtimestamp(torrent[9]).astimezone(pytz.UTC),
tracker,
torrent[7],
torrent[8],
torrent[11],
)
)
return result
def get_methods(self):
if self._methods is None:
self._methods = self.proxy.system.listMethods()
return self._methods
def list(self):
return self._fetch_list_result("main")
def list_active(self):
try:
if "spreadsheet_active" not in self.proxy.view.list():
self.proxy.view.add("", "spreadsheet_active")
self.proxy.view.filter(
"", "spreadsheet_active", "or={d.up.rate=,d.down.rate=}"
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
return self._fetch_list_result("spreadsheet_active")
def start(self, infohash):
try:
self.proxy.d.start(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def stop(self, infohash):
try:
self.proxy.d.stop(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def test_connection(self):
try:
return self.proxy.system.pid() is not None
except (XMLRPCError, ConnectionError, OSError, ExpatError):
return False
def add(
self,
torrent,
destination_path,
fast_resume=False,
add_name_to_folder=True,
minimum_expected_data="none",
stopped=False,
):
current_expected_data = calculate_minimum_expected_data(
torrent, destination_path, add_name_to_folder
)
if not has_minimum_expected_data(minimum_expected_data, current_expected_data):
raise FailedToExecuteException(
f"Minimum expected data not reached, wanted {minimum_expected_data} actual {current_expected_data}"
)
destination_path = destination_path.resolve()
if fast_resume:
logger.info("Adding fast resume data")
psize = torrent[b"info"][b"piece length"]
pieces = len(torrent[b"info"][b"pieces"]) // 20
bitfield = [True] * pieces
torrent[b"libtorrent_resume"] = {b"files": []}
files = map_existing_files(torrent, destination_path)
current_position = 0
for fp, f, size, exists in files:
logger.debug(f"Handling file {fp!r}")
result = {b"priority": 1, b"completed": int(exists)}
if exists:
result[b"mtime"] = int(fp.stat().st_mtime)
torrent[b"libtorrent_resume"][b"files"].append(result)
last_position = current_position + size
first_piece = current_position // psize
last_piece = (last_position + psize - 1) // psize
for piece in range(first_piece, last_piece):
logger.debug(f"Setting piece {piece} to {exists}")
bitfield[piece] *= exists
current_position = last_position
if all(bitfield):
logger.info("This torrent is complete, setting bitfield to chunk count")
torrent[b"libtorrent_resume"][
b"bitfield"
] = pieces # rtorrent wants the number of pieces when torrent is complete
else:
logger.info("This torrent is incomplete, setting bitfield")
torrent[b"libtorrent_resume"][b"bitfield"] = bitfield_to_string(
bitfield
)
encoded_torrent = bencode(torrent)
cmd = [encoded_torrent]
if add_name_to_folder:
cmd.append(f'd.directory.set="{destination_path!s}"')
else:
cmd.append(f'd.directory_base.set="{destination_path!s}"')
logger.info(f"Sending to rtorrent: {cmd!r}")
try: # TODO: use torrent_temp_path if payload is too big
if stopped:
self.proxy.load.raw("", *cmd)
else:
self.proxy.load.raw_start("", *cmd)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def remove(self, infohash):
try:
self.proxy.d.erase(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def retrieve_torrentfile(self, infohash):
if not self.session_path:
raise FailedToExecuteException("Session path is not configured")
torrent_path = self.session_path / f"{infohash.upper()}.torrent"
if not torrent_path.is_file():
raise FailedToExecuteException("Torrent file does not exist")
return torrent_path.read_bytes()
def get_download_path(self, infohash):
try:
return Path(self.proxy.d.directory(infohash))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve download path")
def get_files(self, infohash):
result = []
try:
files = self.proxy.f.multicall(
infohash,
"",
"f.path=",
"f.size_bytes=",
"f.completed_chunks=",
"f.size_chunks=",
)
for f in files:
path, size, completed_chunks, size_chunks = f
if completed_chunks > size_chunks:
completed_chunks = size_chunks
if size_chunks == 0:
progress = 0.0
else:
progress = (completed_chunks / size_chunks) * 100
result.append(TorrentFile(path, size, progress))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve files")
return result
def | (self):
url = f"{self.identifier}+{self.url}"
query = {}
if self.session_path:
query["session_path"] = str(self.session_path)
if query:
url += f"?{urlencode(query)}"
return url
@classmethod
def auto_configure(cls, path="~/.rtorrent.rc"):
# Does not work with latest rtorrent config
config_path = Path(path).expanduser()
if not config_path.is_file():
raise FailedToExecuteException("Unable to find config file")
try:
config_data = config | serialize_configuration | identifier_name |
rtorrent.py | import FailedToExecuteException
from ..scgitransport import SCGITransport
from ..torrent import TorrentData, TorrentFile, TorrentState
from ..utils import (
calculate_minimum_expected_data,
has_minimum_expected_data,
map_existing_files,
)
logger = logging.getLogger(__name__)
def create_proxy(url):
parsed = urlsplit(url)
proto = url.split(":")[0].lower()
if proto == "scgi": | return ServerProxy(url, transport=SCGITransport())
else:
path = parsed.path
logger.debug(f"Creating SCGI XMLRPC Socket Proxy with socket file {path}")
return ServerProxy("http://1", transport=SCGITransport(socket_path=path))
else:
logger.debug(f"Creating Normal XMLRPC Proxy with url {url}")
return ServerProxy(url)
def bitfield_to_string(bitfield):
"""
Converts a list of booleans into a bitfield
"""
retval = bytearray((len(bitfield) + 7) // 8)
for piece, bit in enumerate(bitfield):
if bit:
retval[piece // 8] |= 1 << (7 - piece % 8)
return bytes(retval)
class RTorrentClient(BaseClient):
identifier = "rtorrent"
display_name = "rtorrent"
_methods = None
def __init__(self, url, session_path=None, torrent_temp_path=None):
self.url = url
self.proxy = create_proxy(url)
self.session_path = session_path and Path(session_path)
self.torrent_temp_path = torrent_temp_path and Path(torrent_temp_path)
def _fetch_list_result(self, view):
result = []
try:
torrents = self.proxy.d.multicall2(
"",
view,
"d.hash=",
"d.name=",
"d.is_active=",
"d.message=",
"d.size_bytes=",
"d.completed_bytes=",
"d.up.total=",
"d.up.rate=",
"d.down.rate=",
"d.timestamp.finished=",
"t.multicall=,t.url=",
"d.custom1=",
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
for torrent in torrents:
if torrent[3]:
state = TorrentState.ERROR
elif torrent[2] == 0:
state = TorrentState.STOPPED
else:
state = TorrentState.ACTIVE
progress = (torrent[5] / torrent[4]) * 100
if torrent[10]:
tracker = ".".join(torrent[10][0][0].split("/")[2].rsplit(".", 2)[1:])
else:
tracker = "None"
result.append(
TorrentData(
torrent[0].lower(),
torrent[1],
torrent[4],
state,
progress,
torrent[6],
datetime.utcfromtimestamp(torrent[9]).astimezone(pytz.UTC),
tracker,
torrent[7],
torrent[8],
torrent[11],
)
)
return result
def get_methods(self):
if self._methods is None:
self._methods = self.proxy.system.listMethods()
return self._methods
def list(self):
return self._fetch_list_result("main")
def list_active(self):
try:
if "spreadsheet_active" not in self.proxy.view.list():
self.proxy.view.add("", "spreadsheet_active")
self.proxy.view.filter(
"", "spreadsheet_active", "or={d.up.rate=,d.down.rate=}"
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
return self._fetch_list_result("spreadsheet_active")
def start(self, infohash):
try:
self.proxy.d.start(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def stop(self, infohash):
try:
self.proxy.d.stop(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def test_connection(self):
try:
return self.proxy.system.pid() is not None
except (XMLRPCError, ConnectionError, OSError, ExpatError):
return False
def add(
self,
torrent,
destination_path,
fast_resume=False,
add_name_to_folder=True,
minimum_expected_data="none",
stopped=False,
):
current_expected_data = calculate_minimum_expected_data(
torrent, destination_path, add_name_to_folder
)
if not has_minimum_expected_data(minimum_expected_data, current_expected_data):
raise FailedToExecuteException(
f"Minimum expected data not reached, wanted {minimum_expected_data} actual {current_expected_data}"
)
destination_path = destination_path.resolve()
if fast_resume:
logger.info("Adding fast resume data")
psize = torrent[b"info"][b"piece length"]
pieces = len(torrent[b"info"][b"pieces"]) // 20
bitfield = [True] * pieces
torrent[b"libtorrent_resume"] = {b"files": []}
files = map_existing_files(torrent, destination_path)
current_position = 0
for fp, f, size, exists in files:
logger.debug(f"Handling file {fp!r}")
result = {b"priority": 1, b"completed": int(exists)}
if exists:
result[b"mtime"] = int(fp.stat().st_mtime)
torrent[b"libtorrent_resume"][b"files"].append(result)
last_position = current_position + size
first_piece = current_position // psize
last_piece = (last_position + psize - 1) // psize
for piece in range(first_piece, last_piece):
logger.debug(f"Setting piece {piece} to {exists}")
bitfield[piece] *= exists
current_position = last_position
if all(bitfield):
logger.info("This torrent is complete, setting bitfield to chunk count")
torrent[b"libtorrent_resume"][
b"bitfield"
] = pieces # rtorrent wants the number of pieces when torrent is complete
else:
logger.info("This torrent is incomplete, setting bitfield")
torrent[b"libtorrent_resume"][b"bitfield"] = bitfield_to_string(
bitfield
)
encoded_torrent = bencode(torrent)
cmd = [encoded_torrent]
if add_name_to_folder:
cmd.append(f'd.directory.set="{destination_path!s}"')
else:
cmd.append(f'd.directory_base.set="{destination_path!s}"')
logger.info(f"Sending to rtorrent: {cmd!r}")
try: # TODO: use torrent_temp_path if payload is too big
if stopped:
self.proxy.load.raw("", *cmd)
else:
self.proxy.load.raw_start("", *cmd)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def remove(self, infohash):
try:
self.proxy.d.erase(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def retrieve_torrentfile(self, infohash):
if not self.session_path:
raise FailedToExecuteException("Session path is not configured")
torrent_path = self.session_path / f"{infohash.upper()}.torrent"
if not torrent_path.is_file():
raise FailedToExecuteException("Torrent file does not exist")
return torrent_path.read_bytes()
def get_download_path(self, infohash):
try:
return Path(self.proxy.d.directory(infohash))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve download path")
def get_files(self, infohash):
result = []
try:
files = self.proxy.f.multicall(
infohash,
"",
"f.path=",
"f.size_bytes=",
"f.completed_chunks=",
"f.size_chunks=",
)
for f in files:
path, size, completed_chunks, size_chunks = f
if completed_chunks > size_chunks:
completed_chunks = size_chunks
if size_chunks == 0:
progress = 0.0
else:
progress = (completed_chunks / size_chunks) * 100
result.append(TorrentFile(path, size, progress))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve files")
return result
def serialize_configuration(self):
url = f"{self.identifier}+{self.url}"
query = {}
if self.session_path:
query["session_path"] = str(self.session_path)
if query:
url += f"?{urlencode(query)}"
return url
@classmethod
def auto_configure(cls, path="~/.rtorrent.rc"):
# Does not work with latest rtorrent config
config_path = Path(path).expanduser()
if not config_path.is_file():
raise FailedToExecuteException("Unable to find config file")
try:
config_data = config | if parsed.netloc:
url = f"http://{parsed.netloc}"
logger.debug(f"Creating SCGI XMLRPC Proxy with url {url}") | random_line_split |
rtorrent.py | FailedToExecuteException
from ..scgitransport import SCGITransport
from ..torrent import TorrentData, TorrentFile, TorrentState
from ..utils import (
calculate_minimum_expected_data,
has_minimum_expected_data,
map_existing_files,
)
logger = logging.getLogger(__name__)
def create_proxy(url):
|
def bitfield_to_string(bitfield):
"""
Converts a list of booleans into a bitfield
"""
retval = bytearray((len(bitfield) + 7) // 8)
for piece, bit in enumerate(bitfield):
if bit:
retval[piece // 8] |= 1 << (7 - piece % 8)
return bytes(retval)
class RTorrentClient(BaseClient):
identifier = "rtorrent"
display_name = "rtorrent"
_methods = None
def __init__(self, url, session_path=None, torrent_temp_path=None):
self.url = url
self.proxy = create_proxy(url)
self.session_path = session_path and Path(session_path)
self.torrent_temp_path = torrent_temp_path and Path(torrent_temp_path)
def _fetch_list_result(self, view):
result = []
try:
torrents = self.proxy.d.multicall2(
"",
view,
"d.hash=",
"d.name=",
"d.is_active=",
"d.message=",
"d.size_bytes=",
"d.completed_bytes=",
"d.up.total=",
"d.up.rate=",
"d.down.rate=",
"d.timestamp.finished=",
"t.multicall=,t.url=",
"d.custom1=",
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
for torrent in torrents:
if torrent[3]:
state = TorrentState.ERROR
elif torrent[2] == 0:
state = TorrentState.STOPPED
else:
state = TorrentState.ACTIVE
progress = (torrent[5] / torrent[4]) * 100
if torrent[10]:
tracker = ".".join(torrent[10][0][0].split("/")[2].rsplit(".", 2)[1:])
else:
tracker = "None"
result.append(
TorrentData(
torrent[0].lower(),
torrent[1],
torrent[4],
state,
progress,
torrent[6],
datetime.utcfromtimestamp(torrent[9]).astimezone(pytz.UTC),
tracker,
torrent[7],
torrent[8],
torrent[11],
)
)
return result
def get_methods(self):
if self._methods is None:
self._methods = self.proxy.system.listMethods()
return self._methods
def list(self):
return self._fetch_list_result("main")
def list_active(self):
try:
if "spreadsheet_active" not in self.proxy.view.list():
self.proxy.view.add("", "spreadsheet_active")
self.proxy.view.filter(
"", "spreadsheet_active", "or={d.up.rate=,d.down.rate=}"
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
return self._fetch_list_result("spreadsheet_active")
def start(self, infohash):
try:
self.proxy.d.start(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def stop(self, infohash):
try:
self.proxy.d.stop(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def test_connection(self):
try:
return self.proxy.system.pid() is not None
except (XMLRPCError, ConnectionError, OSError, ExpatError):
return False
def add(
self,
torrent,
destination_path,
fast_resume=False,
add_name_to_folder=True,
minimum_expected_data="none",
stopped=False,
):
current_expected_data = calculate_minimum_expected_data(
torrent, destination_path, add_name_to_folder
)
if not has_minimum_expected_data(minimum_expected_data, current_expected_data):
raise FailedToExecuteException(
f"Minimum expected data not reached, wanted {minimum_expected_data} actual {current_expected_data}"
)
destination_path = destination_path.resolve()
if fast_resume:
logger.info("Adding fast resume data")
psize = torrent[b"info"][b"piece length"]
pieces = len(torrent[b"info"][b"pieces"]) // 20
bitfield = [True] * pieces
torrent[b"libtorrent_resume"] = {b"files": []}
files = map_existing_files(torrent, destination_path)
current_position = 0
for fp, f, size, exists in files:
logger.debug(f"Handling file {fp!r}")
result = {b"priority": 1, b"completed": int(exists)}
if exists:
result[b"mtime"] = int(fp.stat().st_mtime)
torrent[b"libtorrent_resume"][b"files"].append(result)
last_position = current_position + size
first_piece = current_position // psize
last_piece = (last_position + psize - 1) // psize
for piece in range(first_piece, last_piece):
logger.debug(f"Setting piece {piece} to {exists}")
bitfield[piece] *= exists
current_position = last_position
if all(bitfield):
logger.info("This torrent is complete, setting bitfield to chunk count")
torrent[b"libtorrent_resume"][
b"bitfield"
] = pieces # rtorrent wants the number of pieces when torrent is complete
else:
logger.info("This torrent is incomplete, setting bitfield")
torrent[b"libtorrent_resume"][b"bitfield"] = bitfield_to_string(
bitfield
)
encoded_torrent = bencode(torrent)
cmd = [encoded_torrent]
if add_name_to_folder:
cmd.append(f'd.directory.set="{destination_path!s}"')
else:
cmd.append(f'd.directory_base.set="{destination_path!s}"')
logger.info(f"Sending to rtorrent: {cmd!r}")
try: # TODO: use torrent_temp_path if payload is too big
if stopped:
self.proxy.load.raw("", *cmd)
else:
self.proxy.load.raw_start("", *cmd)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def remove(self, infohash):
try:
self.proxy.d.erase(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def retrieve_torrentfile(self, infohash):
if not self.session_path:
raise FailedToExecuteException("Session path is not configured")
torrent_path = self.session_path / f"{infohash.upper()}.torrent"
if not torrent_path.is_file():
raise FailedToExecuteException("Torrent file does not exist")
return torrent_path.read_bytes()
def get_download_path(self, infohash):
try:
return Path(self.proxy.d.directory(infohash))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve download path")
def get_files(self, infohash):
result = []
try:
files = self.proxy.f.multicall(
infohash,
"",
"f.path=",
"f.size_bytes=",
"f.completed_chunks=",
"f.size_chunks=",
)
for f in files:
path, size, completed_chunks, size_chunks = f
if completed_chunks > size_chunks:
completed_chunks = size_chunks
if size_chunks == 0:
progress = 0.0
else:
progress = (completed_chunks / size_chunks) * 100
result.append(TorrentFile(path, size, progress))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve files")
return result
def serialize_configuration(self):
url = f"{self.identifier}+{self.url}"
query = {}
if self.session_path:
query["session_path"] = str(self.session_path)
if query:
url += f"?{urlencode(query)}"
return url
@classmethod
def auto_configure(cls, path="~/.rtorrent.rc"):
# Does not work with latest rtorrent config
config_path = Path(path).expanduser()
if not config_path.is_file():
raise FailedToExecuteException("Unable to find config file")
try:
config_data = config | parsed = urlsplit(url)
proto = url.split(":")[0].lower()
if proto == "scgi":
if parsed.netloc:
url = f"http://{parsed.netloc}"
logger.debug(f"Creating SCGI XMLRPC Proxy with url {url}")
return ServerProxy(url, transport=SCGITransport())
else:
path = parsed.path
logger.debug(f"Creating SCGI XMLRPC Socket Proxy with socket file {path}")
return ServerProxy("http://1", transport=SCGITransport(socket_path=path))
else:
logger.debug(f"Creating Normal XMLRPC Proxy with url {url}")
return ServerProxy(url) | identifier_body |
server.rs | on_read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE {
return RWIobuf::new(capacity);
}
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn checked_tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len != 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if !in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if !self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor, | global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
| random_line_split | |
server.rs | match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn checked_tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len != 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if !in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if !self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
// connections.
_ => return Ok(()),
};
debug!("spawning server.");
let fd = socket.desc().fd;
let per_client = (self.on_accept)(reactor);
let handler = Connection::new(socket, self.global.clone(), per_client);
try!(reactor.register(fd, handler));
debug!("done accept.");
Ok(())
}
fn writable(&mut self, _reactor: &mut Reactor) -> MioResult<()> {
warn!("Accepting socket got a `writable` notification. How odd. Ignoring.");
Ok(())
}
}
// TODO(cgaebel): The connection factory `F` should take the reactor, but
// doesn't because I have no idea how to pass a &mut to an unboxed closure.
pub fn gen_tcp_server<St, C: PerClient<St>>(
reactor: &mut Reactor,
listen_on: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
backlog: uint,
shared_state: St,
on_accept: fn(reactor: &mut Reactor) -> C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let accept_socket: TcpSocket = try!(TcpSocket::v4());
tweak_sock_opts(&accept_socket);
let acceptor: TcpAcceptor = try!(accept_socket.bind(listen_on));
let global = Rc::new(Global::new(shared_state));
let mut on_accept = Some(on_accept);
reactor.listen(acceptor, backlog, |socket| {
AcceptHandler::new(socket, global.clone(), on_accept.take().unwrap())
})
}
pub fn gen_tcp_client<C: PerClient<()>>(
reactor: &mut Reactor,
connect_to: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
client: C)
-> MioResult<()> | {
// TODO(cgaebel): ipv6? udp?
let socket: TcpSocket = try!(TcpSocket::v4());
let mut client = Some(client);
let global = Rc::new(Global::new(()));
reactor.connect(socket, connect_to, |socket| {
tweak_sock_opts(&socket);
Connection::new(socket, global.clone(), client.take().unwrap())
})
} | identifier_body | |
server.rs | _read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE |
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn checked_tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len != 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if !in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if !self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
| {
return RWIobuf::new(capacity);
} | conditional_block |
server.rs | _read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE {
return RWIobuf::new(capacity);
}
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn | (&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len != 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if !self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if !in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if !self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
| checked_tick | identifier_name |
main.go | env("SQLX_URL"))
if err != nil {
log.Fatalf("failed to connect to the db: %s", err)
}
}
// InitializeRedis 初始化Redis
func InitializeRedis() {
opt, err := redis.ParseURL(os.Getenv("REDIS_URL"))
if err != nil {
log.Fatalf("failed to connect to redis db: %s", err)
}
// Create client as usually.
redisClient = redis.NewClient(opt)
}
// Article 就是文章
type Article struct {
Title string `json:"title"`
Date string `json:"date_str"`
Filename string `json:"file_name"`
DirName string `json:"dir_name"`
PubDate time.Time `json:"-"`
Description string `json:"description"`
}
// Articles 文章列表
type Articles []Article
func (a Articles) Len() int { return len(a) }
func (a Articles) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a Articles) Less(i, j int) bool {
v := strings.Compare(a[i].Date, a[j].Date)
if v <= 0 {
return true
}
return false
}
// RandomN return n articles by random
func (a Articles) RandomN(n int) Articles {
if n <= 0 {
return nil
}
length := len(a)
pos := rand.Intn(length - n)
return a[pos : pos+n]
}
func isBlogApp(c *gin.Context) bool {
ua := c.GetHeader("User-Agent")
if strings.HasPrefix(ua, "BlogApp/") {
return true
}
return false
}
func getFilePath(path string) string {
suffix := ".html"
if strings.HasSuffix(path, suffix) {
path = path[:len(path)-len(suffix)]
}
return "./" + path
}
// ReadDesc 把简介读出来
func ReadDesc(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
reader := bufio.NewReader(file)
reader.ReadLine() // 忽略第一行(标题)
reader.ReadLine() // 忽略第二行(空行)
desc := ""
for i := 0; i < 3; i++ {
line, _, err := reader.ReadLine()
if err != nil && err != io.EOF {
log.Printf("failed to read desc of file(%s): %s", path, err)
continue
}
desc += string(line)
}
trimChars := "\n,。:,.:"
return strings.TrimRight(strings.TrimLeft(desc, trimChars), trimChars) + "..."
}
// ReadTitle 把标题读出来
func ReadTitle(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
line, _, err := bufio.NewReader(file).ReadLine()
if err != nil {
log.Printf("failed to read title of file(%s): %s", path, err)
return ""
}
title := strings.Replace(string(line), "# ", "", -1)
return title
}
// VisitedArticle is for remember which article had been visited
type VisitedArticle struct {
URLPath string `json:"url_path"`
Title string `json:"title"`
}
func genVisited(urlPath, subTitle string) (string, error) {
title := ReadTitle(urlPath)
if title == "" {
return "", ErrNotFound
}
if subTitle != "" {
title += " - " + subTitle
}
visited := VisitedArticle{URLPath: urlPath, Title: title}
b, err := json.Marshal(visited)
if err != nil {
return "", ErrFailedToLoad
}
return string(b), nil
}
func getTopVisited(n int) []VisitedArticle {
visitedArticles := []VisitedArticle{}
articles, err := redisClient.ZRevRangeByScore(zsetKey, &redis.ZRangeBy{
Min: "-inf", Max: "+inf", Offset: 0, Count: int64(n),
}).Result()
if err != nil {
log.Printf("failed to get top %d visited articles: %s", n, err)
return nil
}
for _, article := range articles {
var va VisitedArticle
if err := json.Unmarshal([]byte(article), &va); err != nil {
log.Printf("failed to unmarshal article: %s", err)
continue
}
visitedArticles = append(visitedArticles, va)
}
return visitedArticles
}
// LoadArticle 把文章的元信息读出来
func LoadArticle(dirname, filename string) *Article {
match := filenameRegex.FindStringSubmatch(filename)
if len(match) != 2 {
return nil
}
dateString := strings.Replace(match[1], "_", "-", -1)
filepath := fmt.Sprintf("./%s/%s", dirname, filename)
title := ReadTitle(filepath)
pubDate, err := time.Parse("2006-01-02", dateString)
if err != nil {
log.Panicf("failed to parse date: %s", err)
}
desc := ReadDesc(filepath)
return &Article{
Title: title,
Date: dateString,
Filename: filename,
DirName: dirname,
PubDate: pubDate,
Description: desc,
}
}
// LoadMDs 读取给定目录中的所有markdown文章
func LoadMDs(dirname string) Articles {
files, err := ioutil.ReadDir(dirname)
if err != nil {
log.Fatalf("failed to read dir(%s): %s", dirname, err)
return nil
}
var articles Articles
for _, file := range files {
filename := file.Name()
if article := LoadArticle(dirname, filename); article != nil {
articles = append(articles, *article)
}
}
sort.Sort(sort.Reverse(articles))
return articles
}
// IndexHandler 首页
func IndexHandler(c *gin.Context) {
topArticles := getTopVisited(15)
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles[:100],
"totalCount": len(articles),
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
"topArticles": topArticles,
},
)
}
// ArchiveHandler 全部文章
func ArchiveHandler(c *gin.Context) {
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles,
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
},
)
}
func renderArticle(c *gin.Context, status int, path string, subtitle string, randomN int) {
path = getFilePath(path)
content, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("failed to read file %s: %s", path, err)
c.Redirect(http.StatusFound, "/404")
return
}
content = blackfriday.MarkdownCommon(content)
recommends := articles.RandomN(randomN)
topArticles := getTopVisited(15)
c.HTML(
status, "article.html", gin.H{
"isBlogApp": isBlogApp(c),
"content": template.HTML(content),
"title": ReadTitle(path),
"subtitle": subtitle,
"recommends": recommends,
"topArticles": topArticles,
},
)
}
func incrVisited(urlPath, subTitle string) {
if visited, err := genVisited(urlPath, subTitle); err != nil {
log.Printf("failed to gen visited: %s", err)
} else {
if _, err := redisClient.ZIncrBy(zsetKey, 1, visited).Result(); err != nil {
log.Printf("failed to incr score of %s: %s", urlPath, err)
}
}
}
// PingPongHandler ping pong
func PingPongHandler(c *gin.Context) {
c.JSON(http.StatusOK, nil)
}
// ArticleHandler 具体文章
func ArticleHandler(c *gin.Context) {
urlPath := c.Request.URL.Path
incrVisited(urlPath, "")
renderArticle(c, http.StatusOK, urlPath, "", 15)
}
// TutorialPageHandler 教程index
func TutorialPageHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/tutorial.md", "", 0)
}
// AboutMeHandler 关于我
func AboutMeHandler(c *gin.Context | sql", os.Get | identifier_name | |
main.go | a[i] }
func (a Articles) Less(i, j int) bool {
v := strings.Compare(a[i].Date, a[j].Date)
if v <= 0 {
return true
}
return false
}
// RandomN return n articles by random
func (a Articles) RandomN(n int) Articles {
if n <= 0 {
return nil
}
length := len(a)
pos := rand.Intn(length - n)
return a[pos : pos+n]
}
func isBlogApp(c *gin.Context) bool {
ua := c.GetHeader("User-Agent")
if strings.HasPrefix(ua, "BlogApp/") {
return true
}
return false
}
func getFilePath(path string) string {
suffix := ".html"
if strings.HasSuffix(path, suffix) {
path = path[:len(path)-len(suffix)]
}
return "./" + path
}
// ReadDesc 把简介读出来
func ReadDesc(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
reader := bufio.NewReader(file)
reader.ReadLine() // 忽略第一行(标题)
reader.ReadLine() // 忽略第二行(空行)
desc := ""
for i := 0; i < 3; i++ {
line, _, err := reader.ReadLine()
if err != nil && err != io.EOF {
log.Printf("failed to read desc of file(%s): %s", path, err)
continue
}
desc += string(line)
}
trimChars := "\n,。:,.:"
return strings.TrimRight(strings.TrimLeft(desc, trimChars), trimChars) + "..."
}
// ReadTitle 把标题读出来
func ReadTitle(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
line, _, err := bufio.NewReader(file).ReadLine()
if err != nil {
log.Printf("failed to read title of file(%s): %s", path, err)
return ""
}
title := strings.Replace(string(line), "# ", "", -1)
return title
}
// VisitedArticle is for remember which article had been visited
type VisitedArticle struct {
URLPath string `json:"url_path"`
Title string `json:"title"`
}
func genVisited(urlPath, subTitle string) (string, error) {
title := ReadTitle(urlPath)
if title == "" {
return "", ErrNotFound
}
if subTitle != "" {
title += " - " + subTitle
}
visited := VisitedArticle{URLPath: urlPath, Title: title}
b, err := json.Marshal(visited)
if err != nil {
return "", ErrFailedToLoad
}
return string(b), nil
}
func getTopVisited(n int) []VisitedArticle {
visitedArticles := []VisitedArticle{}
articles, err := redisClient.ZRevRangeByScore(zsetKey, &redis.ZRangeBy{
Min: "-inf", Max: "+inf", Offset: 0, Count: int64(n),
}).Result()
if err != nil {
log.Printf("failed to get top %d visited articles: %s", n, err)
return nil
}
for _, article := range articles {
var va VisitedArticle
if err := json.Unmarshal([]byte(article), &va); err != nil {
log.Printf("failed to unmarshal article: %s", err)
continue
}
visitedArticles = append(visitedArticles, va)
}
return visitedArticles
}
// LoadArticle 把文章的元信息读出来
func LoadArticle(dirname, filename string) *Article {
match := filenameRegex.FindStringSubmatch(filename)
if len(match) != 2 {
return nil
}
dateString := strings.Replace(match[1], "_", "-", -1)
filepath := fmt.Sprintf("./%s/%s", dirname, filename)
title := ReadTitle(filepath)
pubDate, err := time.Parse("2006-01-02", dateString)
if err != nil {
log.Panicf("failed to parse date: %s", err)
}
desc := ReadDesc(filepath)
return &Article{
Title: title, | DirName: dirname,
PubDate: pubDate,
Description: desc,
}
}
// LoadMDs 读取给定目录中的所有markdown文章
func LoadMDs(dirname string) Articles {
files, err := ioutil.ReadDir(dirname)
if err != nil {
log.Fatalf("failed to read dir(%s): %s", dirname, err)
return nil
}
var articles Articles
for _, file := range files {
filename := file.Name()
if article := LoadArticle(dirname, filename); article != nil {
articles = append(articles, *article)
}
}
sort.Sort(sort.Reverse(articles))
return articles
}
// IndexHandler 首页
func IndexHandler(c *gin.Context) {
topArticles := getTopVisited(15)
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles[:100],
"totalCount": len(articles),
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
"topArticles": topArticles,
},
)
}
// ArchiveHandler 全部文章
func ArchiveHandler(c *gin.Context) {
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles,
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
},
)
}
func renderArticle(c *gin.Context, status int, path string, subtitle string, randomN int) {
path = getFilePath(path)
content, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("failed to read file %s: %s", path, err)
c.Redirect(http.StatusFound, "/404")
return
}
content = blackfriday.MarkdownCommon(content)
recommends := articles.RandomN(randomN)
topArticles := getTopVisited(15)
c.HTML(
status, "article.html", gin.H{
"isBlogApp": isBlogApp(c),
"content": template.HTML(content),
"title": ReadTitle(path),
"subtitle": subtitle,
"recommends": recommends,
"topArticles": topArticles,
},
)
}
func incrVisited(urlPath, subTitle string) {
if visited, err := genVisited(urlPath, subTitle); err != nil {
log.Printf("failed to gen visited: %s", err)
} else {
if _, err := redisClient.ZIncrBy(zsetKey, 1, visited).Result(); err != nil {
log.Printf("failed to incr score of %s: %s", urlPath, err)
}
}
}
// PingPongHandler ping pong
func PingPongHandler(c *gin.Context) {
c.JSON(http.StatusOK, nil)
}
// ArticleHandler 具体文章
func ArticleHandler(c *gin.Context) {
urlPath := c.Request.URL.Path
incrVisited(urlPath, "")
renderArticle(c, http.StatusOK, urlPath, "", 15)
}
// TutorialPageHandler 教程index
func TutorialPageHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/tutorial.md", "", 0)
}
// AboutMeHandler 关于我
func AboutMeHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/aboutme.md", "", 0)
}
// FriendsHandler 友链
func FriendsHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/friends.md", "", 0)
}
// AppHandler App页面
func AppHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/app.md", "", 0)
}
// NotFoundHandler 404
func NotFoundHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/404.md", "", 20)
}
// AllSharingHandler 所有分享
func AllSharingHandler(c *gin.Context) {
sharing := dao.GetAllSharing()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
},
)
}
// SharingHandler 分享
func SharingHandler(c *gin.Context) {
sharing := dao.GetSharingWithLimit(20)
c | Date: dateString,
Filename: filename, | random_line_split |
main.go | [i] }
func (a Articles) Less(i, j int) bool {
v := strings.Compare(a[i].Date, a[j].Date)
if v <= 0 {
return true
}
return false
}
// RandomN return n articles by random
func (a Articles) RandomN(n int) Articles {
if n <= 0 {
return nil
}
length := len(a)
pos := rand.Intn(length - n)
return a[pos : pos+n]
}
func isBlogApp(c *gin.Context) bool {
ua := c.GetHeader("User-Agent")
if strings.HasPrefix(ua, "BlogApp/") {
return true
}
return false
}
func getFilePath(path string) string {
suffix := ".html"
if strings.HasSuffix(path, suffix) {
path = path[:len(path)-len(suffix)]
}
return "./" + path
}
// ReadDesc 把简介读出来
func ReadDesc(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
reader := bufio.NewReader(file)
reader.ReadLine() // 忽略第一行(标题)
reader.ReadLine() // 忽略第二行(空行)
desc := ""
for i := 0; i < 3; i++ {
line, _, err := reader.ReadLine()
if err != nil && err != io.EOF {
log.Printf("failed to read desc of file(%s): %s", path, err)
continue
}
desc += string(line)
}
trimChars := "\n,。:,.:"
return strings.TrimRight(strings.TrimLeft(desc, trimChars), trimChars) + "..."
}
// ReadTitle 把标题读出来
func ReadTitle(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
line, _, err := bufio.NewReader(file).ReadLine()
if err != nil {
log.Printf("failed to read title of file(%s): %s", path, err)
return ""
}
title := strings.Replace(string(line), "# ", "", -1)
return title
}
// VisitedArticle is for remember which article had been visited
type VisitedArticle struct {
URLPath string `json:"url_path"`
Title string `json:"title"`
}
func genVisited(urlPath, subTitle string) (string, error) {
title := ReadTitle(urlPath)
if title == "" {
return "", ErrNotFound
}
if subTitle != "" {
title += " - " + subTitle
}
visited := VisitedArticle{URLPath: urlPath, Title: titl | sited)
if err != nil {
return "", ErrFailedToLoad
}
return string(b), nil
}
func getTopVisited(n int) []VisitedArticle {
visitedArticles := []VisitedArticle{}
articles, err := redisClient.ZRevRangeByScore(zsetKey, &redis.ZRangeBy{
Min: "-inf", Max: "+inf", Offset: 0, Count: int64(n),
}).Result()
if err != nil {
log.Printf("failed to get top %d visited articles: %s", n, err)
return nil
}
for _, article := range articles {
var va VisitedArticle
if err := json.Unmarshal([]byte(article), &va); err != nil {
log.Printf("failed to unmarshal article: %s", err)
continue
}
visitedArticles = append(visitedArticles, va)
}
return visitedArticles
}
// LoadArticle 把文章的元信息读出来
func LoadArticle(dirname, filename string) *Article {
match := filenameRegex.FindStringSubmatch(filename)
if len(match) != 2 {
return nil
}
dateString := strings.Replace(match[1], "_", "-", -1)
filepath := fmt.Sprintf("./%s/%s", dirname, filename)
title := ReadTitle(filepath)
pubDate, err := time.Parse("2006-01-02", dateString)
if err != nil {
log.Panicf("failed to parse date: %s", err)
}
desc := ReadDesc(filepath)
return &Article{
Title: title,
Date: dateString,
Filename: filename,
DirName: dirname,
PubDate: pubDate,
Description: desc,
}
}
// LoadMDs 读取给定目录中的所有markdown文章
func LoadMDs(dirname string) Articles {
files, err := ioutil.ReadDir(dirname)
if err != nil {
log.Fatalf("failed to read dir(%s): %s", dirname, err)
return nil
}
var articles Articles
for _, file := range files {
filename := file.Name()
if article := LoadArticle(dirname, filename); article != nil {
articles = append(articles, *article)
}
}
sort.Sort(sort.Reverse(articles))
return articles
}
// IndexHandler 首页
func IndexHandler(c *gin.Context) {
topArticles := getTopVisited(15)
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles[:100],
"totalCount": len(articles),
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
"topArticles": topArticles,
},
)
}
// ArchiveHandler 全部文章
func ArchiveHandler(c *gin.Context) {
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles,
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
},
)
}
func renderArticle(c *gin.Context, status int, path string, subtitle string, randomN int) {
path = getFilePath(path)
content, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("failed to read file %s: %s", path, err)
c.Redirect(http.StatusFound, "/404")
return
}
content = blackfriday.MarkdownCommon(content)
recommends := articles.RandomN(randomN)
topArticles := getTopVisited(15)
c.HTML(
status, "article.html", gin.H{
"isBlogApp": isBlogApp(c),
"content": template.HTML(content),
"title": ReadTitle(path),
"subtitle": subtitle,
"recommends": recommends,
"topArticles": topArticles,
},
)
}
func incrVisited(urlPath, subTitle string) {
if visited, err := genVisited(urlPath, subTitle); err != nil {
log.Printf("failed to gen visited: %s", err)
} else {
if _, err := redisClient.ZIncrBy(zsetKey, 1, visited).Result(); err != nil {
log.Printf("failed to incr score of %s: %s", urlPath, err)
}
}
}
// PingPongHandler ping pong
func PingPongHandler(c *gin.Context) {
c.JSON(http.StatusOK, nil)
}
// ArticleHandler 具体文章
func ArticleHandler(c *gin.Context) {
urlPath := c.Request.URL.Path
incrVisited(urlPath, "")
renderArticle(c, http.StatusOK, urlPath, "", 15)
}
// TutorialPageHandler 教程index
func TutorialPageHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/tutorial.md", "", 0)
}
// AboutMeHandler 关于我
func AboutMeHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/aboutme.md", "", 0)
}
// FriendsHandler 友链
func FriendsHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/friends.md", "", 0)
}
// AppHandler App页面
func AppHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/app.md", "", 0)
}
// NotFoundHandler 404
func NotFoundHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/404.md", "", 20)
}
// AllSharingHandler 所有分享
func AllSharingHandler(c *gin.Context) {
sharing := dao.GetAllSharing()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
},
)
}
// SharingHandler 分享
func SharingHandler(c *gin.Context) {
sharing := dao.GetSharingWithLimit(2 | e}
b, err := json.Marshal(vi | conditional_block |
main.go | a[i] }
func (a Articles) Less(i, j int) bool {
v := strings.Compare(a[i].Date, a[j].Date)
if v <= 0 {
return true
}
return false
}
// RandomN return n articles by random
func (a Articles) RandomN(n int) Articles {
if n <= 0 {
return nil
}
length := len(a)
pos := rand.Intn(length - n)
return a[pos : pos+n]
}
func isBlogApp(c *gin.Context) bool {
ua := c.GetHeader("User-Agent")
if strings.HasPrefix(ua, "BlogApp/") {
return true
}
return false
}
func getFilePath(path string) string {
suffix := ".html"
if strings.HasSuffix(path, suffix) {
path = path[:len(path)-len(suffix)]
}
return "./" + path
}
// ReadDesc 把简介读出来
func ReadDesc(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil {
log.Printf("failed | // ReadTitle 把标题读出来
func ReadTitle(path string) string {
path = getFilePath(path)
file, err := os.Open(path)
if err != nil
{
log.Printf("failed to read file(%s): %s", path, err)
return ""
}
line, _, err := bufio.NewReader(file).ReadLine()
if err != nil {
log.Printf("failed to read title of file(%s): %s", path, err)
return ""
}
title := strings.Replace(string(line), "# ", "", -1)
return title
}
// VisitedArticle is for remember which article had been visited
type VisitedArticle struct {
URLPath string `json:"url_path"`
Title string `json:"title"`
}
func genVisited(urlPath, subTitle string) (string, error) {
title := ReadTitle(urlPath)
if title == "" {
return "", ErrNotFound
}
if subTitle != "" {
title += " - " + subTitle
}
visited := VisitedArticle{URLPath: urlPath, Title: title}
b, err := json.Marshal(visited)
if err != nil {
return "", ErrFailedToLoad
}
return string(b), nil
}
func getTopVisited(n int) []VisitedArticle {
visitedArticles := []VisitedArticle{}
articles, err := redisClient.ZRevRangeByScore(zsetKey, &redis.ZRangeBy{
Min: "-inf", Max: "+inf", Offset: 0, Count: int64(n),
}).Result()
if err != nil {
log.Printf("failed to get top %d visited articles: %s", n, err)
return nil
}
for _, article := range articles {
var va VisitedArticle
if err := json.Unmarshal([]byte(article), &va); err != nil {
log.Printf("failed to unmarshal article: %s", err)
continue
}
visitedArticles = append(visitedArticles, va)
}
return visitedArticles
}
// LoadArticle 把文章的元信息读出来
func LoadArticle(dirname, filename string) *Article {
match := filenameRegex.FindStringSubmatch(filename)
if len(match) != 2 {
return nil
}
dateString := strings.Replace(match[1], "_", "-", -1)
filepath := fmt.Sprintf("./%s/%s", dirname, filename)
title := ReadTitle(filepath)
pubDate, err := time.Parse("2006-01-02", dateString)
if err != nil {
log.Panicf("failed to parse date: %s", err)
}
desc := ReadDesc(filepath)
return &Article{
Title: title,
Date: dateString,
Filename: filename,
DirName: dirname,
PubDate: pubDate,
Description: desc,
}
}
// LoadMDs 读取给定目录中的所有markdown文章
func LoadMDs(dirname string) Articles {
files, err := ioutil.ReadDir(dirname)
if err != nil {
log.Fatalf("failed to read dir(%s): %s", dirname, err)
return nil
}
var articles Articles
for _, file := range files {
filename := file.Name()
if article := LoadArticle(dirname, filename); article != nil {
articles = append(articles, *article)
}
}
sort.Sort(sort.Reverse(articles))
return articles
}
// IndexHandler 首页
func IndexHandler(c *gin.Context) {
topArticles := getTopVisited(15)
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles[:100],
"totalCount": len(articles),
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
"topArticles": topArticles,
},
)
}
// ArchiveHandler 全部文章
func ArchiveHandler(c *gin.Context) {
c.HTML(
http.StatusOK, "index.html", gin.H{
"isBlogApp": isBlogApp(c),
"articles": articles,
"keywords": "Golang,Python,Go语言,Dart,Android,安卓,Kotlin,分布式,高并发,Haskell,C,微服务,软件工程,源码阅读,源码分析",
"description": "享受技术带来的快乐~分布式系统/高并发处理/Golang/Python/Haskell/C/微服务/Android/安卓/Kotlin/软件工程/源码阅读与分析",
},
)
}
func renderArticle(c *gin.Context, status int, path string, subtitle string, randomN int) {
path = getFilePath(path)
content, err := ioutil.ReadFile(path)
if err != nil {
log.Printf("failed to read file %s: %s", path, err)
c.Redirect(http.StatusFound, "/404")
return
}
content = blackfriday.MarkdownCommon(content)
recommends := articles.RandomN(randomN)
topArticles := getTopVisited(15)
c.HTML(
status, "article.html", gin.H{
"isBlogApp": isBlogApp(c),
"content": template.HTML(content),
"title": ReadTitle(path),
"subtitle": subtitle,
"recommends": recommends,
"topArticles": topArticles,
},
)
}
func incrVisited(urlPath, subTitle string) {
if visited, err := genVisited(urlPath, subTitle); err != nil {
log.Printf("failed to gen visited: %s", err)
} else {
if _, err := redisClient.ZIncrBy(zsetKey, 1, visited).Result(); err != nil {
log.Printf("failed to incr score of %s: %s", urlPath, err)
}
}
}
// PingPongHandler ping pong
func PingPongHandler(c *gin.Context) {
c.JSON(http.StatusOK, nil)
}
// ArticleHandler 具体文章
func ArticleHandler(c *gin.Context) {
urlPath := c.Request.URL.Path
incrVisited(urlPath, "")
renderArticle(c, http.StatusOK, urlPath, "", 15)
}
// TutorialPageHandler 教程index
func TutorialPageHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/tutorial.md", "", 0)
}
// AboutMeHandler 关于我
func AboutMeHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/aboutme.md", "", 0)
}
// FriendsHandler 友链
func FriendsHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/friends.md", "", 0)
}
// AppHandler App页面
func AppHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/app.md", "", 0)
}
// NotFoundHandler 404
func NotFoundHandler(c *gin.Context) {
renderArticle(c, http.StatusOK, "articles/404.md", "", 20)
}
// AllSharingHandler 所有分享
func AllSharingHandler(c *gin.Context) {
sharing := dao.GetAllSharing()
c.HTML(
http.StatusOK, "list.html", gin.H{
"isBlogApp": isBlogApp(c),
"sharing": sharing,
},
)
}
// SharingHandler 分享
func SharingHandler(c *gin.Context) {
sharing := dao.GetSharingWithLimit(20)
| to read file(%s): %s", path, err)
return ""
}
reader := bufio.NewReader(file)
reader.ReadLine() // 忽略第一行(标题)
reader.ReadLine() // 忽略第二行(空行)
desc := ""
for i := 0; i < 3; i++ {
line, _, err := reader.ReadLine()
if err != nil && err != io.EOF {
log.Printf("failed to read desc of file(%s): %s", path, err)
continue
}
desc += string(line)
}
trimChars := "\n,。:,.:"
return strings.TrimRight(strings.TrimLeft(desc, trimChars), trimChars) + "..."
}
| identifier_body |
sequences.py | batch {} with {} items via sequence".format(index, self.batch_size))
raw_features, raw_responses, raw_weights = self._get_features_responses_weights(index)
trans_features = [raw_feature.copy() for raw_feature in raw_features]
trans_responses = [raw_response.copy() for raw_response in raw_responses]
trans_weights = [raw_weight.copy() for raw_weight in raw_weights]
if self.custom_augmentations is not None:
trans_features, trans_responses, trans_weights = self._apply_augmentations(
trans_features, trans_responses, trans_weights
)
trans_features = self._scale_features(trans_features)
trans_responses = self._scale_responses(trans_responses)
if self.nan_replacement_value is not None:
trans_features = self._replace_nan_data_values(trans_features, self.nan_replacement_value)
trans_responses = self._replace_nan_data_values(trans_responses, self.nan_replacement_value)
else:
assert np.all(np.isfinite(trans_features)), (
"Some feature values are nan but nan_replacement_value not provided in data config. Please provide "
+ "a nan_replacement_value to transform features correctly."
)
# Append weights to responses for loss function calculations
raw_responses = [np.append(response, weight, axis=-1) for response, weight in zip(raw_responses, raw_weights)]
trans_responses = [np.append(resp, weight, axis=-1) for resp, weight in zip(trans_responses, trans_weights)]
if return_raw_sample is True:
# This is for BGFN reporting and other functionality
return_value = ((raw_features, raw_responses), (trans_features, trans_responses))
else:
# This is for Keras sequence generator behavior
return_value = (trans_features, trans_responses)
return return_value
def get_raw_and_transformed_sample(
self, index: int
) -> Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]]:
return self.__getitem__(index, return_raw_sample=True)
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
raise NotImplementedError(
"Custom Sequences must implement _get_features_responses_weights for training and reporting to work. "
+ "See method header for expected arguments and returned objects."
)
def _replace_nan_data_values(self, data: List[np.array], replacement_value):
for idx_array in range(len(data)):
data[idx_array][np.isnan(data[idx_array])] = replacement_value
return data
def _apply_augmentations(
self, features: List[np.array], responses: List[np.array], weights: List[np.array]
) -> Tuple[List[np.array], List[np.array], List[np.array]]:
assert len(responses) == 1, (
"Custom augmentations have not been tested on multiple responses. Please feel free to handle this "
+ "case, test your code, and submit a pull request."
)
# Loop through samples, augmenting each
num_samples = features[0].shape[0]
for idx_sample in range(num_samples):
# Get sample data
sample_features = [feature[idx_sample] for feature in features]
sample_responses = responses[0][idx_sample] # Assume single response
sample_weights = weights[0][idx_sample]
mask_loss_window = (sample_weights > 0)[..., 0]
# Format for albumentations.Compose
data_to_augment = {"image": sample_features.pop(0), "mask": np.dstack([sample_responses, sample_weights])}
target_keys = ["image"]
for idx, feature in enumerate(sample_features):
key_feature = ADDITIONAL_TARGETS_KEY.format(idx + 1)
data_to_augment[key_feature] = feature
target_keys.append(key_feature)
# Augment data and parse results
augmented = self.custom_augmentations(**data_to_augment)
sample_features = list() # For creating a weights mask
for idx_feature, key_feature in enumerate(target_keys):
features[idx_feature][idx_sample] = augmented[key_feature]
sample_features.append(augmented[key_feature])
responses[0][idx_sample] = augmented["mask"][..., :-1]
mask_features = np.isfinite(np.dstack(sample_features)).all(axis=-1)
mask = np.logical_and(mask_features, mask_loss_window)
weights[0][idx_sample] = np.expand_dims(mask * augmented["mask"][..., -1], axis=-1)
return features, responses, weights
def _scale_features(self, features: List[np.array]) -> List[np.array]:
return [self.feature_scaler.transform(feature) for feature in features]
def _scale_responses(self, responses: List[np.array]) -> List[np.array]:
return [self.response_scaler.transform(response) for response in responses]
class MemmappedSequence(BaseSequence):
def __init__(
self,
features,
responses,
weights,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
feature_mean_centering: False,
nan_replacement_value: None,
custom_augmentations: albumentations.Compose = None,
) -> None:
self.features = features # a list of numpy arrays, each of which is (n,y,x,f)
self.responses = responses # a list of numpy arrays, each of which is (n,y,x,r)
self.weights = weights # a list of numpy arrays, each of which is (n,y,x,1)
super().__init__(
feature_scaler=feature_scaler,
response_scaler=response_scaler,
batch_size=batch_size,
custom_augmentations=custom_augmentations,
nan_replacement_value=nan_replacement_value,
)
# Determine the cumulative number of total samples across arrays - we're going to use
# it to roll between files when extracting samples
self.cum_samples_per_array = np.zeros(len(features) + 1).astype(int)
for _array in range(1, len(features) + 1):
self.cum_samples_per_array[_array] = features[_array - 1].shape[0] + self.cum_samples_per_array[_array - 1]
self.feature_mean_centering = feature_mean_centering
def __len__(self):
# Method is required for Keras functionality, a.k.a. steps_per_epoch in fit_generator
return int(np.ceil(self.cum_samples_per_array[-1] / self.batch_size))
def _mean_center(self, data: np.array) -> np.array:
return data - np.mean(data, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
# start by finding which array we're starting in, based on the input index, batch size,
# and the number of samples per array
| current_array += 1
if current_array == len(self.features):
break
stop_ind = self.batch_size - batch_features.shape[0]
batch_features = np.append(
batch_features, (self.features[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_responses = np.append(
batch_responses, (self.responses[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_weights = np.append(batch_weights, (self.weights[current_array])[sample_index:stop_ind, ...], axis=0)
if self.feature_mean_centering is True:
batch_features = self._mean_center(batch_features)
return [batch_features], [batch_responses], [batch_weights]
def sample_custom_augmentations_constructor(num_features: int, window_radius: int) -> albumentations.Compose:
"""
This function returns a custom augmentations object for use with sequences via the load_sequences function in
data_core.py. Please note that these augmentations have only been tested with RGB data between 0 and 1 and that
order of operations is critical. e.g., blurs don't like missing data so shouldn't be applied before dropout, noise
probably shouldn't be applied before color changes or blurs... of course, this is all dependent on your specific
problem.
Args:
num_features: number of features used in the model
window_size: window_size from the data configs
Returns:
custom augmentations function for use with sequences
| current_array = 0
while current_array < len(self.cum_samples_per_array) - 1:
if (
index * self.batch_size >= self.cum_samples_per_array[current_array]
and index * self.batch_size < self.cum_samples_per_array[current_array + 1]
):
break
current_array += 1
# grab the the appropriate number of samples from the current array
sample_index = int(index * self.batch_size - self.cum_samples_per_array[current_array])
batch_features = (self.features[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_responses = (self.responses[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_weights = (self.weights[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
# if the current array didn't have enough samples in it, roll forward to the next one (and keep
# doing so until we have enough samples)
while batch_features.shape[0] < self.batch_size:
sample_index = 0 | identifier_body |
sequences.py | {} with {} items via sequence".format(index, self.batch_size))
raw_features, raw_responses, raw_weights = self._get_features_responses_weights(index)
trans_features = [raw_feature.copy() for raw_feature in raw_features]
trans_responses = [raw_response.copy() for raw_response in raw_responses]
trans_weights = [raw_weight.copy() for raw_weight in raw_weights]
if self.custom_augmentations is not None:
trans_features, trans_responses, trans_weights = self._apply_augmentations(
trans_features, trans_responses, trans_weights
)
trans_features = self._scale_features(trans_features)
trans_responses = self._scale_responses(trans_responses)
if self.nan_replacement_value is not None:
trans_features = self._replace_nan_data_values(trans_features, self.nan_replacement_value)
trans_responses = self._replace_nan_data_values(trans_responses, self.nan_replacement_value)
else:
assert np.all(np.isfinite(trans_features)), (
"Some feature values are nan but nan_replacement_value not provided in data config. Please provide "
+ "a nan_replacement_value to transform features correctly."
)
# Append weights to responses for loss function calculations
raw_responses = [np.append(response, weight, axis=-1) for response, weight in zip(raw_responses, raw_weights)]
trans_responses = [np.append(resp, weight, axis=-1) for resp, weight in zip(trans_responses, trans_weights)]
if return_raw_sample is True:
# This is for BGFN reporting and other functionality
|
else:
# This is for Keras sequence generator behavior
return_value = (trans_features, trans_responses)
return return_value
def get_raw_and_transformed_sample(
self, index: int
) -> Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]]:
return self.__getitem__(index, return_raw_sample=True)
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
raise NotImplementedError(
"Custom Sequences must implement _get_features_responses_weights for training and reporting to work. "
+ "See method header for expected arguments and returned objects."
)
def _replace_nan_data_values(self, data: List[np.array], replacement_value):
for idx_array in range(len(data)):
data[idx_array][np.isnan(data[idx_array])] = replacement_value
return data
def _apply_augmentations(
self, features: List[np.array], responses: List[np.array], weights: List[np.array]
) -> Tuple[List[np.array], List[np.array], List[np.array]]:
assert len(responses) == 1, (
"Custom augmentations have not been tested on multiple responses. Please feel free to handle this "
+ "case, test your code, and submit a pull request."
)
# Loop through samples, augmenting each
num_samples = features[0].shape[0]
for idx_sample in range(num_samples):
# Get sample data
sample_features = [feature[idx_sample] for feature in features]
sample_responses = responses[0][idx_sample] # Assume single response
sample_weights = weights[0][idx_sample]
mask_loss_window = (sample_weights > 0)[..., 0]
# Format for albumentations.Compose
data_to_augment = {"image": sample_features.pop(0), "mask": np.dstack([sample_responses, sample_weights])}
target_keys = ["image"]
for idx, feature in enumerate(sample_features):
key_feature = ADDITIONAL_TARGETS_KEY.format(idx + 1)
data_to_augment[key_feature] = feature
target_keys.append(key_feature)
# Augment data and parse results
augmented = self.custom_augmentations(**data_to_augment)
sample_features = list() # For creating a weights mask
for idx_feature, key_feature in enumerate(target_keys):
features[idx_feature][idx_sample] = augmented[key_feature]
sample_features.append(augmented[key_feature])
responses[0][idx_sample] = augmented["mask"][..., :-1]
mask_features = np.isfinite(np.dstack(sample_features)).all(axis=-1)
mask = np.logical_and(mask_features, mask_loss_window)
weights[0][idx_sample] = np.expand_dims(mask * augmented["mask"][..., -1], axis=-1)
return features, responses, weights
def _scale_features(self, features: List[np.array]) -> List[np.array]:
return [self.feature_scaler.transform(feature) for feature in features]
def _scale_responses(self, responses: List[np.array]) -> List[np.array]:
return [self.response_scaler.transform(response) for response in responses]
class MemmappedSequence(BaseSequence):
def __init__(
self,
features,
responses,
weights,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
feature_mean_centering: False,
nan_replacement_value: None,
custom_augmentations: albumentations.Compose = None,
) -> None:
self.features = features # a list of numpy arrays, each of which is (n,y,x,f)
self.responses = responses # a list of numpy arrays, each of which is (n,y,x,r)
self.weights = weights # a list of numpy arrays, each of which is (n,y,x,1)
super().__init__(
feature_scaler=feature_scaler,
response_scaler=response_scaler,
batch_size=batch_size,
custom_augmentations=custom_augmentations,
nan_replacement_value=nan_replacement_value,
)
# Determine the cumulative number of total samples across arrays - we're going to use
# it to roll between files when extracting samples
self.cum_samples_per_array = np.zeros(len(features) + 1).astype(int)
for _array in range(1, len(features) + 1):
self.cum_samples_per_array[_array] = features[_array - 1].shape[0] + self.cum_samples_per_array[_array - 1]
self.feature_mean_centering = feature_mean_centering
def __len__(self):
# Method is required for Keras functionality, a.k.a. steps_per_epoch in fit_generator
return int(np.ceil(self.cum_samples_per_array[-1] / self.batch_size))
def _mean_center(self, data: np.array) -> np.array:
return data - np.mean(data, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
# start by finding which array we're starting in, based on the input index, batch size,
# and the number of samples per array
current_array = 0
while current_array < len(self.cum_samples_per_array) - 1:
if (
index * self.batch_size >= self.cum_samples_per_array[current_array]
and index * self.batch_size < self.cum_samples_per_array[current_array + 1]
):
break
current_array += 1
# grab the the appropriate number of samples from the current array
sample_index = int(index * self.batch_size - self.cum_samples_per_array[current_array])
batch_features = (self.features[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_responses = (self.responses[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_weights = (self.weights[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
# if the current array didn't have enough samples in it, roll forward to the next one (and keep
# doing so until we have enough samples)
while batch_features.shape[0] < self.batch_size:
sample_index = 0
current_array += 1
if current_array == len(self.features):
break
stop_ind = self.batch_size - batch_features.shape[0]
batch_features = np.append(
batch_features, (self.features[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_responses = np.append(
batch_responses, (self.responses[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_weights = np.append(batch_weights, (self.weights[current_array])[sample_index:stop_ind, ...], axis=0)
if self.feature_mean_centering is True:
batch_features = self._mean_center(batch_features)
return [batch_features], [batch_responses], [batch_weights]
def sample_custom_augmentations_constructor(num_features: int, window_radius: int) -> albumentations.Compose:
"""
This function returns a custom augmentations object for use with sequences via the load_sequences function in
data_core.py. Please note that these augmentations have only been tested with RGB data between 0 and 1 and that
order of operations is critical. e.g., blurs don't like missing data so shouldn't be applied before dropout, noise
probably shouldn't be applied before color changes or blurs... of course, this is all dependent on your specific
problem.
Args:
num_features: number of features used in the model
window_size: window_size from the data configs
Returns:
custom augmentations function for use with sequences | return_value = ((raw_features, raw_responses), (trans_features, trans_responses)) | conditional_block |
sequences.py | {} with {} items via sequence".format(index, self.batch_size))
raw_features, raw_responses, raw_weights = self._get_features_responses_weights(index)
trans_features = [raw_feature.copy() for raw_feature in raw_features]
trans_responses = [raw_response.copy() for raw_response in raw_responses]
trans_weights = [raw_weight.copy() for raw_weight in raw_weights]
if self.custom_augmentations is not None:
trans_features, trans_responses, trans_weights = self._apply_augmentations(
trans_features, trans_responses, trans_weights
)
trans_features = self._scale_features(trans_features)
trans_responses = self._scale_responses(trans_responses)
if self.nan_replacement_value is not None:
trans_features = self._replace_nan_data_values(trans_features, self.nan_replacement_value)
trans_responses = self._replace_nan_data_values(trans_responses, self.nan_replacement_value)
else:
assert np.all(np.isfinite(trans_features)), (
"Some feature values are nan but nan_replacement_value not provided in data config. Please provide "
+ "a nan_replacement_value to transform features correctly."
)
# Append weights to responses for loss function calculations
raw_responses = [np.append(response, weight, axis=-1) for response, weight in zip(raw_responses, raw_weights)]
trans_responses = [np.append(resp, weight, axis=-1) for resp, weight in zip(trans_responses, trans_weights)]
if return_raw_sample is True:
# This is for BGFN reporting and other functionality
return_value = ((raw_features, raw_responses), (trans_features, trans_responses))
else:
# This is for Keras sequence generator behavior
return_value = (trans_features, trans_responses)
return return_value
def get_raw_and_transformed_sample(
self, index: int
) -> Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]]:
return self.__getitem__(index, return_raw_sample=True)
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
raise NotImplementedError(
"Custom Sequences must implement _get_features_responses_weights for training and reporting to work. "
+ "See method header for expected arguments and returned objects."
)
def _replace_nan_data_values(self, data: List[np.array], replacement_value):
for idx_array in range(len(data)):
data[idx_array][np.isnan(data[idx_array])] = replacement_value
return data
def _apply_augmentations(
self, features: List[np.array], responses: List[np.array], weights: List[np.array]
) -> Tuple[List[np.array], List[np.array], List[np.array]]:
assert len(responses) == 1, (
"Custom augmentations have not been tested on multiple responses. Please feel free to handle this "
+ "case, test your code, and submit a pull request."
)
# Loop through samples, augmenting each
num_samples = features[0].shape[0]
for idx_sample in range(num_samples):
# Get sample data
sample_features = [feature[idx_sample] for feature in features]
sample_responses = responses[0][idx_sample] # Assume single response
sample_weights = weights[0][idx_sample]
mask_loss_window = (sample_weights > 0)[..., 0]
# Format for albumentations.Compose
data_to_augment = {"image": sample_features.pop(0), "mask": np.dstack([sample_responses, sample_weights])}
target_keys = ["image"]
for idx, feature in enumerate(sample_features):
key_feature = ADDITIONAL_TARGETS_KEY.format(idx + 1)
data_to_augment[key_feature] = feature
target_keys.append(key_feature)
# Augment data and parse results
augmented = self.custom_augmentations(**data_to_augment)
sample_features = list() # For creating a weights mask
for idx_feature, key_feature in enumerate(target_keys):
features[idx_feature][idx_sample] = augmented[key_feature]
sample_features.append(augmented[key_feature])
responses[0][idx_sample] = augmented["mask"][..., :-1]
mask_features = np.isfinite(np.dstack(sample_features)).all(axis=-1)
mask = np.logical_and(mask_features, mask_loss_window)
weights[0][idx_sample] = np.expand_dims(mask * augmented["mask"][..., -1], axis=-1)
return features, responses, weights
def _scale_features(self, features: List[np.array]) -> List[np.array]:
return [self.feature_scaler.transform(feature) for feature in features]
def _scale_responses(self, responses: List[np.array]) -> List[np.array]:
return [self.response_scaler.transform(response) for response in responses]
class MemmappedSequence(BaseSequence):
def __init__(
self,
features,
responses,
weights,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
feature_mean_centering: False,
nan_replacement_value: None,
custom_augmentations: albumentations.Compose = None,
) -> None:
self.features = features # a list of numpy arrays, each of which is (n,y,x,f)
self.responses = responses # a list of numpy arrays, each of which is (n,y,x,r)
self.weights = weights # a list of numpy arrays, each of which is (n,y,x,1)
super().__init__(
feature_scaler=feature_scaler,
response_scaler=response_scaler,
batch_size=batch_size,
custom_augmentations=custom_augmentations,
nan_replacement_value=nan_replacement_value,
)
# Determine the cumulative number of total samples across arrays - we're going to use
# it to roll between files when extracting samples
self.cum_samples_per_array = np.zeros(len(features) + 1).astype(int)
for _array in range(1, len(features) + 1):
self.cum_samples_per_array[_array] = features[_array - 1].shape[0] + self.cum_samples_per_array[_array - 1]
self.feature_mean_centering = feature_mean_centering
def __len__(self):
# Method is required for Keras functionality, a.k.a. steps_per_epoch in fit_generator
return int(np.ceil(self.cum_samples_per_array[-1] / self.batch_size))
def _mean_center(self, data: np.array) -> np.array:
return data - np.mean(data, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
def | (self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
# start by finding which array we're starting in, based on the input index, batch size,
# and the number of samples per array
current_array = 0
while current_array < len(self.cum_samples_per_array) - 1:
if (
index * self.batch_size >= self.cum_samples_per_array[current_array]
and index * self.batch_size < self.cum_samples_per_array[current_array + 1]
):
break
current_array += 1
# grab the the appropriate number of samples from the current array
sample_index = int(index * self.batch_size - self.cum_samples_per_array[current_array])
batch_features = (self.features[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_responses = (self.responses[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_weights = (self.weights[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
# if the current array didn't have enough samples in it, roll forward to the next one (and keep
# doing so until we have enough samples)
while batch_features.shape[0] < self.batch_size:
sample_index = 0
current_array += 1
if current_array == len(self.features):
break
stop_ind = self.batch_size - batch_features.shape[0]
batch_features = np.append(
batch_features, (self.features[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_responses = np.append(
batch_responses, (self.responses[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_weights = np.append(batch_weights, (self.weights[current_array])[sample_index:stop_ind, ...], axis=0)
if self.feature_mean_centering is True:
batch_features = self._mean_center(batch_features)
return [batch_features], [batch_responses], [batch_weights]
def sample_custom_augmentations_constructor(num_features: int, window_radius: int) -> albumentations.Compose:
"""
This function returns a custom augmentations object for use with sequences via the load_sequences function in
data_core.py. Please note that these augmentations have only been tested with RGB data between 0 and 1 and that
order of operations is critical. e.g., blurs don't like missing data so shouldn't be applied before dropout, noise
probably shouldn't be applied before color changes or blurs... of course, this is all dependent on your specific
problem.
Args:
num_features: number of features used in the model
window_size: window_size from the data configs
Returns:
custom augmentations function for use with sequences
| _get_features_responses_weights | identifier_name |
sequences.py | {} with {} items via sequence".format(index, self.batch_size))
raw_features, raw_responses, raw_weights = self._get_features_responses_weights(index)
trans_features = [raw_feature.copy() for raw_feature in raw_features]
trans_responses = [raw_response.copy() for raw_response in raw_responses]
trans_weights = [raw_weight.copy() for raw_weight in raw_weights]
if self.custom_augmentations is not None:
trans_features, trans_responses, trans_weights = self._apply_augmentations(
trans_features, trans_responses, trans_weights
)
trans_features = self._scale_features(trans_features)
trans_responses = self._scale_responses(trans_responses)
if self.nan_replacement_value is not None:
trans_features = self._replace_nan_data_values(trans_features, self.nan_replacement_value)
trans_responses = self._replace_nan_data_values(trans_responses, self.nan_replacement_value)
else:
assert np.all(np.isfinite(trans_features)), (
"Some feature values are nan but nan_replacement_value not provided in data config. Please provide "
+ "a nan_replacement_value to transform features correctly."
)
# Append weights to responses for loss function calculations
raw_responses = [np.append(response, weight, axis=-1) for response, weight in zip(raw_responses, raw_weights)]
trans_responses = [np.append(resp, weight, axis=-1) for resp, weight in zip(trans_responses, trans_weights)] | return_value = ((raw_features, raw_responses), (trans_features, trans_responses))
else:
# This is for Keras sequence generator behavior
return_value = (trans_features, trans_responses)
return return_value
def get_raw_and_transformed_sample(
self, index: int
) -> Tuple[Tuple[List[np.array], List[np.array]], Tuple[List[np.array], List[np.array]]]:
return self.__getitem__(index, return_raw_sample=True)
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
raise NotImplementedError(
"Custom Sequences must implement _get_features_responses_weights for training and reporting to work. "
+ "See method header for expected arguments and returned objects."
)
def _replace_nan_data_values(self, data: List[np.array], replacement_value):
for idx_array in range(len(data)):
data[idx_array][np.isnan(data[idx_array])] = replacement_value
return data
def _apply_augmentations(
self, features: List[np.array], responses: List[np.array], weights: List[np.array]
) -> Tuple[List[np.array], List[np.array], List[np.array]]:
assert len(responses) == 1, (
"Custom augmentations have not been tested on multiple responses. Please feel free to handle this "
+ "case, test your code, and submit a pull request."
)
# Loop through samples, augmenting each
num_samples = features[0].shape[0]
for idx_sample in range(num_samples):
# Get sample data
sample_features = [feature[idx_sample] for feature in features]
sample_responses = responses[0][idx_sample] # Assume single response
sample_weights = weights[0][idx_sample]
mask_loss_window = (sample_weights > 0)[..., 0]
# Format for albumentations.Compose
data_to_augment = {"image": sample_features.pop(0), "mask": np.dstack([sample_responses, sample_weights])}
target_keys = ["image"]
for idx, feature in enumerate(sample_features):
key_feature = ADDITIONAL_TARGETS_KEY.format(idx + 1)
data_to_augment[key_feature] = feature
target_keys.append(key_feature)
# Augment data and parse results
augmented = self.custom_augmentations(**data_to_augment)
sample_features = list() # For creating a weights mask
for idx_feature, key_feature in enumerate(target_keys):
features[idx_feature][idx_sample] = augmented[key_feature]
sample_features.append(augmented[key_feature])
responses[0][idx_sample] = augmented["mask"][..., :-1]
mask_features = np.isfinite(np.dstack(sample_features)).all(axis=-1)
mask = np.logical_and(mask_features, mask_loss_window)
weights[0][idx_sample] = np.expand_dims(mask * augmented["mask"][..., -1], axis=-1)
return features, responses, weights
def _scale_features(self, features: List[np.array]) -> List[np.array]:
return [self.feature_scaler.transform(feature) for feature in features]
def _scale_responses(self, responses: List[np.array]) -> List[np.array]:
return [self.response_scaler.transform(response) for response in responses]
class MemmappedSequence(BaseSequence):
def __init__(
self,
features,
responses,
weights,
feature_scaler: BaseGlobalScaler,
response_scaler: BaseGlobalScaler,
batch_size: int,
feature_mean_centering: False,
nan_replacement_value: None,
custom_augmentations: albumentations.Compose = None,
) -> None:
self.features = features # a list of numpy arrays, each of which is (n,y,x,f)
self.responses = responses # a list of numpy arrays, each of which is (n,y,x,r)
self.weights = weights # a list of numpy arrays, each of which is (n,y,x,1)
super().__init__(
feature_scaler=feature_scaler,
response_scaler=response_scaler,
batch_size=batch_size,
custom_augmentations=custom_augmentations,
nan_replacement_value=nan_replacement_value,
)
# Determine the cumulative number of total samples across arrays - we're going to use
# it to roll between files when extracting samples
self.cum_samples_per_array = np.zeros(len(features) + 1).astype(int)
for _array in range(1, len(features) + 1):
self.cum_samples_per_array[_array] = features[_array - 1].shape[0] + self.cum_samples_per_array[_array - 1]
self.feature_mean_centering = feature_mean_centering
def __len__(self):
# Method is required for Keras functionality, a.k.a. steps_per_epoch in fit_generator
return int(np.ceil(self.cum_samples_per_array[-1] / self.batch_size))
def _mean_center(self, data: np.array) -> np.array:
return data - np.mean(data, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
def _get_features_responses_weights(self, index: int) -> Tuple[List[np.array], List[np.array], List[np.array]]:
# start by finding which array we're starting in, based on the input index, batch size,
# and the number of samples per array
current_array = 0
while current_array < len(self.cum_samples_per_array) - 1:
if (
index * self.batch_size >= self.cum_samples_per_array[current_array]
and index * self.batch_size < self.cum_samples_per_array[current_array + 1]
):
break
current_array += 1
# grab the the appropriate number of samples from the current array
sample_index = int(index * self.batch_size - self.cum_samples_per_array[current_array])
batch_features = (self.features[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_responses = (self.responses[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
batch_weights = (self.weights[current_array])[sample_index : sample_index + self.batch_size, ...].copy()
# if the current array didn't have enough samples in it, roll forward to the next one (and keep
# doing so until we have enough samples)
while batch_features.shape[0] < self.batch_size:
sample_index = 0
current_array += 1
if current_array == len(self.features):
break
stop_ind = self.batch_size - batch_features.shape[0]
batch_features = np.append(
batch_features, (self.features[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_responses = np.append(
batch_responses, (self.responses[current_array])[sample_index:stop_ind, ...], axis=0
)
batch_weights = np.append(batch_weights, (self.weights[current_array])[sample_index:stop_ind, ...], axis=0)
if self.feature_mean_centering is True:
batch_features = self._mean_center(batch_features)
return [batch_features], [batch_responses], [batch_weights]
def sample_custom_augmentations_constructor(num_features: int, window_radius: int) -> albumentations.Compose:
"""
This function returns a custom augmentations object for use with sequences via the load_sequences function in
data_core.py. Please note that these augmentations have only been tested with RGB data between 0 and 1 and that
order of operations is critical. e.g., blurs don't like missing data so shouldn't be applied before dropout, noise
probably shouldn't be applied before color changes or blurs... of course, this is all dependent on your specific
problem.
Args:
num_features: number of features used in the model
window_size: window_size from the data configs
Returns:
custom augmentations function for use with sequences
|
if return_raw_sample is True:
# This is for BGFN reporting and other functionality | random_line_split |
data.ts | ';
/** Indicates a hourly frequency type */
export const HOUR = 'hour';
/** Indicates a daily frequency type */
export const DAY = 'day';
/** Indicates a weekly frequency type */
export const WEEK = 'week';
/** Indicates a monthly frequency type */
export const MONTH = 'month';
/** Interface for an <option> inside a <select> */
export interface Option {
text: string;
value: string | number;
disabled?: boolean;
}
/** Returns an option whose value matches the given value. */
export function findOptionByValue<T extends Option>(
options: T[],
value: string | number | undefined
): T | undefined {
if (value === undefined) return undefined;
return options.find(option => option.value === value);
}
export const DAYS_OF_WEEK: Option[] = [
{ value: 'sundayRun', text: 'Sun' },
{ value: 'mondayRun', text: 'Mon' },
{ value: 'tuesdayRun', text: 'Tue' },
{ value: 'wednesdayRun', text: 'Wed' },
{ value: 'thursdayRun', text: 'Thur' },
{ value: 'fridayRun', text: 'Fri' },
{ value: 'saturdayRun', text: 'Sat' },
];
export const MONTH_FREQUENCIES: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '6', text: '6' },
{ value: '12', text: '12' },
];
/** Removes the item from the list if found */
export function removeFromList<T>(list: T[], value: T) {
const index = list.indexOf(value);
if (index >= 0) {
list.splice(index, 1);
}
}
/**
* Container images that can be used to schedule jobs on AI Platform.
* https://cloud.google.com/ai-platform/training/docs/containers-overview
*/
export const CONTAINER_IMAGES: Option[] = [
{ value: `${GCR_PREFIX}/base-cpu:latest`, text: 'Python' },
{
value: `${GCR_PREFIX}/tf-cpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf-gpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (GPU)',
},
{
value: `${GCR_PREFIX}/tf2-cpu.2-1:latest`,
text: 'TensorFlow 2.1 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf2-gpu.2-1:latest`,
text: 'TensorFlow 2.1 (GPU)',
},
{
value: `${GCR_PREFIX}/pytorch-cpu.1-4:latest`,
text: 'PyTorch 1.4 (CPU only)',
},
{
value: `${GCR_PREFIX}/pytorch-gpu.1-4:latest`,
text: 'PyTorch 1.4 (GPU)',
},
{
value: `${GCR_PREFIX}/r-cpu.3-6:latest`,
text: 'R 3.6 (with r-essentials)',
},
{ value: `${GCR_PREFIX}/beam-notebooks:latest`, text: 'Apache Beam' },
];
/**
* Scale tier values for AI Platform Jobs
* https://cloud.google.com/ai-platform/training/docs/machine-types#scale_tiers
*/
export const SCALE_TIERS: Option[] = [
{ value: 'BASIC', text: 'Single worker instance' },
{
value: 'BASIC_GPU',
text: 'A single worker instance with an NVIDIA Tesla K80 GPU',
},
{
value: 'STANDARD_1',
text: '1 master instance, 4 workers, 3 parameter servers',
},
{
value: 'PREMIUM_1',
text: '1 master instance, 19 workers, 11 parameter servers',
},
{ value: CUSTOM, text: 'Custom machine type configuration' },
];
/**
* AI Platform Machine types.
* https://cloud.google.com/ai-platform/training/docs/machine-types#compare-machine-types
*/
export const MASTER_TYPES: Option[] = [
{ value: 'n1-standard-4', text: '4 CPUs, 15 GB RAM' },
{ value: 'n1-standard-8', text: '8 CPUs, 30 GB RAM' },
{ value: 'n1-standard-16', text: '16 CPUs, 60 GB RAM' },
{ value: 'n1-standard-32', text: '32 CPUs, 120 GB RAM' },
{ value: 'n1-standard-64', text: '64 CPUs, 240 GB RAM' },
{ value: 'n1-standard-96', text: '96 CPUs, 360 GB RAM' },
{ value: 'n1-highmem-2', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-4', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-8', text: '8 CPUs, 52 GB RAM' },
{ value: 'n1-highmem-16', text: '16 CPUs, 104 GB RAM' },
{ value: 'n1-highmem-32', text: '32 CPUs, 208 GB RAM' },
{ value: 'n1-highmem-64', text: '64 CPUs, 416 GB RAM' },
{ value: 'n1-highmem-96', text: '96 CPUs, 624 GB RAM' },
{ value: 'n1-highcpu-16', text: '16 CPUs, 14.4 GB RAM' },
{ value: 'n1-highcpu-32', text: '32 CPUs, 28.8 GB RAM' },
{ value: 'n1-highcpu-64', text: '64 CPUs, 57.6 GB RAM' },
{ value: 'n1-highcpu-96', text: '96 CPUs, 86.4 GB RAM' },
];
/**
* AI Platform Accelerator types.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_K80', text: 'NVIDIA Tesla K80' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_P100', text: 'NVIDIA Tesla P100' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
/**
* AI Platform Accelerator types for particular machine types that only
* provide a limited amount.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES_REDUCED: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
const MASTER_TYPES_REDUCED: Set<string> = new Set([
'n1-standard-64',
'n1-standard-96',
'n1-highmem-64',
'n1-highmem-96',
'n1-highcpu-96',
]);
/**
* Returns the valid accelerator types given a masterType. Returns empty array
* if masterType is falsy.
*/
export function | (masterType: string): Option[] {
if (masterType) {
if (MASTER_TYPES_REDUCED.has(masterType)) {
return ACCELERATOR_TYPES_REDUCED;
}
return ACCELERATOR_TYPES;
}
return [];
}
/**
* AI Platform Accelerator counts.
* https://cloud.google.com/ai-platform/training/docs/using-gpus
*/
export const ACCELERATOR_COUNTS_1_2_4_8: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '8', text: '8' },
];
/**
* Supported AI Platform regions.
* https://cloud.google.com/ai-platform/training/docs/regions
* TODO: It may be more sensible to invoke the projects.locations.list API
* and filter for locations with TRAINING capability
* https://cloud.google.com/ai-platform/training | getAcceleratorTypes | identifier_name |
data.ts | ';
/** Indicates a hourly frequency type */
export const HOUR = 'hour';
/** Indicates a daily frequency type */
export const DAY = 'day';
/** Indicates a weekly frequency type */
export const WEEK = 'week';
/** Indicates a monthly frequency type */
export const MONTH = 'month';
/** Interface for an <option> inside a <select> */
export interface Option {
text: string;
value: string | number;
disabled?: boolean;
}
/** Returns an option whose value matches the given value. */
export function findOptionByValue<T extends Option>(
options: T[],
value: string | number | undefined
): T | undefined |
export const DAYS_OF_WEEK: Option[] = [
{ value: 'sundayRun', text: 'Sun' },
{ value: 'mondayRun', text: 'Mon' },
{ value: 'tuesdayRun', text: 'Tue' },
{ value: 'wednesdayRun', text: 'Wed' },
{ value: 'thursdayRun', text: 'Thur' },
{ value: 'fridayRun', text: 'Fri' },
{ value: 'saturdayRun', text: 'Sat' },
];
export const MONTH_FREQUENCIES: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '6', text: '6' },
{ value: '12', text: '12' },
];
/** Removes the item from the list if found */
export function removeFromList<T>(list: T[], value: T) {
const index = list.indexOf(value);
if (index >= 0) {
list.splice(index, 1);
}
}
/**
* Container images that can be used to schedule jobs on AI Platform.
* https://cloud.google.com/ai-platform/training/docs/containers-overview
*/
export const CONTAINER_IMAGES: Option[] = [
{ value: `${GCR_PREFIX}/base-cpu:latest`, text: 'Python' },
{
value: `${GCR_PREFIX}/tf-cpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf-gpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (GPU)',
},
{
value: `${GCR_PREFIX}/tf2-cpu.2-1:latest`,
text: 'TensorFlow 2.1 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf2-gpu.2-1:latest`,
text: 'TensorFlow 2.1 (GPU)',
},
{
value: `${GCR_PREFIX}/pytorch-cpu.1-4:latest`,
text: 'PyTorch 1.4 (CPU only)',
},
{
value: `${GCR_PREFIX}/pytorch-gpu.1-4:latest`,
text: 'PyTorch 1.4 (GPU)',
},
{
value: `${GCR_PREFIX}/r-cpu.3-6:latest`,
text: 'R 3.6 (with r-essentials)',
},
{ value: `${GCR_PREFIX}/beam-notebooks:latest`, text: 'Apache Beam' },
];
/**
* Scale tier values for AI Platform Jobs
* https://cloud.google.com/ai-platform/training/docs/machine-types#scale_tiers
*/
export const SCALE_TIERS: Option[] = [
{ value: 'BASIC', text: 'Single worker instance' },
{
value: 'BASIC_GPU',
text: 'A single worker instance with an NVIDIA Tesla K80 GPU',
},
{
value: 'STANDARD_1',
text: '1 master instance, 4 workers, 3 parameter servers',
},
{
value: 'PREMIUM_1',
text: '1 master instance, 19 workers, 11 parameter servers',
},
{ value: CUSTOM, text: 'Custom machine type configuration' },
];
/**
* AI Platform Machine types.
* https://cloud.google.com/ai-platform/training/docs/machine-types#compare-machine-types
*/
export const MASTER_TYPES: Option[] = [
{ value: 'n1-standard-4', text: '4 CPUs, 15 GB RAM' },
{ value: 'n1-standard-8', text: '8 CPUs, 30 GB RAM' },
{ value: 'n1-standard-16', text: '16 CPUs, 60 GB RAM' },
{ value: 'n1-standard-32', text: '32 CPUs, 120 GB RAM' },
{ value: 'n1-standard-64', text: '64 CPUs, 240 GB RAM' },
{ value: 'n1-standard-96', text: '96 CPUs, 360 GB RAM' },
{ value: 'n1-highmem-2', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-4', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-8', text: '8 CPUs, 52 GB RAM' },
{ value: 'n1-highmem-16', text: '16 CPUs, 104 GB RAM' },
{ value: 'n1-highmem-32', text: '32 CPUs, 208 GB RAM' },
{ value: 'n1-highmem-64', text: '64 CPUs, 416 GB RAM' },
{ value: 'n1-highmem-96', text: '96 CPUs, 624 GB RAM' },
{ value: 'n1-highcpu-16', text: '16 CPUs, 14.4 GB RAM' },
{ value: 'n1-highcpu-32', text: '32 CPUs, 28.8 GB RAM' },
{ value: 'n1-highcpu-64', text: '64 CPUs, 57.6 GB RAM' },
{ value: 'n1-highcpu-96', text: '96 CPUs, 86.4 GB RAM' },
];
/**
* AI Platform Accelerator types.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_K80', text: 'NVIDIA Tesla K80' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_P100', text: 'NVIDIA Tesla P100' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
/**
* AI Platform Accelerator types for particular machine types that only
* provide a limited amount.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES_REDUCED: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
const MASTER_TYPES_REDUCED: Set<string> = new Set([
'n1-standard-64',
'n1-standard-96',
'n1-highmem-64',
'n1-highmem-96',
'n1-highcpu-96',
]);
/**
* Returns the valid accelerator types given a masterType. Returns empty array
* if masterType is falsy.
*/
export function getAcceleratorTypes(masterType: string): Option[] {
if (masterType) {
if (MASTER_TYPES_REDUCED.has(masterType)) {
return ACCELERATOR_TYPES_REDUCED;
}
return ACCELERATOR_TYPES;
}
return [];
}
/**
* AI Platform Accelerator counts.
* https://cloud.google.com/ai-platform/training/docs/using-gpus
*/
export const ACCELERATOR_COUNTS_1_2_4_8: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '8', text: '8' },
];
/**
* Supported AI Platform regions.
* https://cloud.google.com/ai-platform/training/docs/regions
* TODO: It may be more sensible to invoke the projects.locations.list API
* and filter for locations with TRAINING capability
* https://cloud.google.com/ai-platform/tr | {
if (value === undefined) return undefined;
return options.find(option => option.value === value);
} | identifier_body |
data.ts | ';
/** Indicates a hourly frequency type */
export const HOUR = 'hour';
/** Indicates a daily frequency type */
export const DAY = 'day';
/** Indicates a weekly frequency type */
export const WEEK = 'week';
/** Indicates a monthly frequency type */
export const MONTH = 'month';
/** Interface for an <option> inside a <select> */
export interface Option {
text: string;
value: string | number;
disabled?: boolean;
}
/** Returns an option whose value matches the given value. */
export function findOptionByValue<T extends Option>(
options: T[],
value: string | number | undefined
): T | undefined {
if (value === undefined) return undefined;
return options.find(option => option.value === value);
}
export const DAYS_OF_WEEK: Option[] = [
{ value: 'sundayRun', text: 'Sun' },
{ value: 'mondayRun', text: 'Mon' },
{ value: 'tuesdayRun', text: 'Tue' },
{ value: 'wednesdayRun', text: 'Wed' },
{ value: 'thursdayRun', text: 'Thur' },
{ value: 'fridayRun', text: 'Fri' },
{ value: 'saturdayRun', text: 'Sat' },
];
export const MONTH_FREQUENCIES: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '6', text: '6' },
{ value: '12', text: '12' },
];
/** Removes the item from the list if found */
export function removeFromList<T>(list: T[], value: T) {
const index = list.indexOf(value);
if (index >= 0) {
list.splice(index, 1);
}
}
/**
* Container images that can be used to schedule jobs on AI Platform.
* https://cloud.google.com/ai-platform/training/docs/containers-overview
*/
export const CONTAINER_IMAGES: Option[] = [
{ value: `${GCR_PREFIX}/base-cpu:latest`, text: 'Python' },
{
value: `${GCR_PREFIX}/tf-cpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf-gpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (GPU)',
},
{
value: `${GCR_PREFIX}/tf2-cpu.2-1:latest`,
text: 'TensorFlow 2.1 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf2-gpu.2-1:latest`,
text: 'TensorFlow 2.1 (GPU)',
},
{
value: `${GCR_PREFIX}/pytorch-cpu.1-4:latest`,
text: 'PyTorch 1.4 (CPU only)',
},
{
value: `${GCR_PREFIX}/pytorch-gpu.1-4:latest`,
text: 'PyTorch 1.4 (GPU)',
},
{
value: `${GCR_PREFIX}/r-cpu.3-6:latest`,
text: 'R 3.6 (with r-essentials)',
},
{ value: `${GCR_PREFIX}/beam-notebooks:latest`, text: 'Apache Beam' },
];
/**
* Scale tier values for AI Platform Jobs
* https://cloud.google.com/ai-platform/training/docs/machine-types#scale_tiers
*/
export const SCALE_TIERS: Option[] = [
{ value: 'BASIC', text: 'Single worker instance' },
{
value: 'BASIC_GPU',
text: 'A single worker instance with an NVIDIA Tesla K80 GPU',
},
{
value: 'STANDARD_1',
text: '1 master instance, 4 workers, 3 parameter servers',
},
{
value: 'PREMIUM_1',
text: '1 master instance, 19 workers, 11 parameter servers',
},
{ value: CUSTOM, text: 'Custom machine type configuration' },
];
/**
* AI Platform Machine types.
* https://cloud.google.com/ai-platform/training/docs/machine-types#compare-machine-types
*/
export const MASTER_TYPES: Option[] = [
{ value: 'n1-standard-4', text: '4 CPUs, 15 GB RAM' },
{ value: 'n1-standard-8', text: '8 CPUs, 30 GB RAM' },
{ value: 'n1-standard-16', text: '16 CPUs, 60 GB RAM' },
{ value: 'n1-standard-32', text: '32 CPUs, 120 GB RAM' },
{ value: 'n1-standard-64', text: '64 CPUs, 240 GB RAM' },
{ value: 'n1-standard-96', text: '96 CPUs, 360 GB RAM' },
{ value: 'n1-highmem-2', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-4', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-8', text: '8 CPUs, 52 GB RAM' },
{ value: 'n1-highmem-16', text: '16 CPUs, 104 GB RAM' },
{ value: 'n1-highmem-32', text: '32 CPUs, 208 GB RAM' },
{ value: 'n1-highmem-64', text: '64 CPUs, 416 GB RAM' },
{ value: 'n1-highmem-96', text: '96 CPUs, 624 GB RAM' },
{ value: 'n1-highcpu-16', text: '16 CPUs, 14.4 GB RAM' },
{ value: 'n1-highcpu-32', text: '32 CPUs, 28.8 GB RAM' },
{ value: 'n1-highcpu-64', text: '64 CPUs, 57.6 GB RAM' },
{ value: 'n1-highcpu-96', text: '96 CPUs, 86.4 GB RAM' },
];
/**
* AI Platform Accelerator types.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_K80', text: 'NVIDIA Tesla K80' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_P100', text: 'NVIDIA Tesla P100' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
/**
* AI Platform Accelerator types for particular machine types that only
* provide a limited amount.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES_REDUCED: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
const MASTER_TYPES_REDUCED: Set<string> = new Set([
'n1-standard-64',
'n1-standard-96',
'n1-highmem-64',
'n1-highmem-96',
'n1-highcpu-96',
]);
/**
* Returns the valid accelerator types given a masterType. Returns empty array
* if masterType is falsy.
*/
export function getAcceleratorTypes(masterType: string): Option[] {
if (masterType) |
return [];
}
/**
* AI Platform Accelerator counts.
* https://cloud.google.com/ai-platform/training/docs/using-gpus
*/
export const ACCELERATOR_COUNTS_1_2_4_8: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '8', text: '8' },
];
/**
* Supported AI Platform regions.
* https://cloud.google.com/ai-platform/training/docs/regions
* TODO: It may be more sensible to invoke the projects.locations.list API
* and filter for locations with TRAINING capability
* https://cloud.google.com/ai-platform/tr | {
if (MASTER_TYPES_REDUCED.has(masterType)) {
return ACCELERATOR_TYPES_REDUCED;
}
return ACCELERATOR_TYPES;
} | conditional_block |
data.ts | ';
/** Indicates a hourly frequency type */
export const HOUR = 'hour';
/** Indicates a daily frequency type */
export const DAY = 'day';
/** Indicates a weekly frequency type */
export const WEEK = 'week';
/** Indicates a monthly frequency type */
export const MONTH = 'month';
/** Interface for an <option> inside a <select> */
export interface Option {
text: string;
value: string | number;
disabled?: boolean;
}
/** Returns an option whose value matches the given value. */
export function findOptionByValue<T extends Option>(
options: T[],
value: string | number | undefined
): T | undefined {
if (value === undefined) return undefined;
return options.find(option => option.value === value);
}
export const DAYS_OF_WEEK: Option[] = [
{ value: 'sundayRun', text: 'Sun' },
{ value: 'mondayRun', text: 'Mon' },
{ value: 'tuesdayRun', text: 'Tue' },
{ value: 'wednesdayRun', text: 'Wed' },
{ value: 'thursdayRun', text: 'Thur' },
{ value: 'fridayRun', text: 'Fri' },
{ value: 'saturdayRun', text: 'Sat' },
];
export const MONTH_FREQUENCIES: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '6', text: '6' },
{ value: '12', text: '12' },
];
/** Removes the item from the list if found */
export function removeFromList<T>(list: T[], value: T) {
const index = list.indexOf(value);
if (index >= 0) {
list.splice(index, 1);
}
}
/**
* Container images that can be used to schedule jobs on AI Platform.
* https://cloud.google.com/ai-platform/training/docs/containers-overview
*/
export const CONTAINER_IMAGES: Option[] = [
{ value: `${GCR_PREFIX}/base-cpu:latest`, text: 'Python' },
{
value: `${GCR_PREFIX}/tf-cpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf-gpu.1-15:latest`,
text: 'TensorFlow Enterprise 1.15 (GPU)',
},
{
value: `${GCR_PREFIX}/tf2-cpu.2-1:latest`,
text: 'TensorFlow 2.1 (CPU only)',
},
{
value: `${GCR_PREFIX}/tf2-gpu.2-1:latest`,
text: 'TensorFlow 2.1 (GPU)',
},
{
value: `${GCR_PREFIX}/pytorch-cpu.1-4:latest`,
text: 'PyTorch 1.4 (CPU only)',
},
{
value: `${GCR_PREFIX}/pytorch-gpu.1-4:latest`,
text: 'PyTorch 1.4 (GPU)',
},
{
value: `${GCR_PREFIX}/r-cpu.3-6:latest`,
text: 'R 3.6 (with r-essentials)',
},
{ value: `${GCR_PREFIX}/beam-notebooks:latest`, text: 'Apache Beam' },
];
/**
* Scale tier values for AI Platform Jobs
* https://cloud.google.com/ai-platform/training/docs/machine-types#scale_tiers
*/
export const SCALE_TIERS: Option[] = [
{ value: 'BASIC', text: 'Single worker instance' },
{
value: 'BASIC_GPU',
text: 'A single worker instance with an NVIDIA Tesla K80 GPU',
},
{
value: 'STANDARD_1',
text: '1 master instance, 4 workers, 3 parameter servers',
},
{
value: 'PREMIUM_1',
text: '1 master instance, 19 workers, 11 parameter servers',
},
{ value: CUSTOM, text: 'Custom machine type configuration' },
];
/**
* AI Platform Machine types.
* https://cloud.google.com/ai-platform/training/docs/machine-types#compare-machine-types
*/
export const MASTER_TYPES: Option[] = [
{ value: 'n1-standard-4', text: '4 CPUs, 15 GB RAM' },
{ value: 'n1-standard-8', text: '8 CPUs, 30 GB RAM' },
{ value: 'n1-standard-16', text: '16 CPUs, 60 GB RAM' },
{ value: 'n1-standard-32', text: '32 CPUs, 120 GB RAM' },
{ value: 'n1-standard-64', text: '64 CPUs, 240 GB RAM' },
{ value: 'n1-standard-96', text: '96 CPUs, 360 GB RAM' },
{ value: 'n1-highmem-2', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-4', text: '4 CPUs, 26 GB RAM' },
{ value: 'n1-highmem-8', text: '8 CPUs, 52 GB RAM' },
{ value: 'n1-highmem-16', text: '16 CPUs, 104 GB RAM' },
{ value: 'n1-highmem-32', text: '32 CPUs, 208 GB RAM' },
{ value: 'n1-highmem-64', text: '64 CPUs, 416 GB RAM' },
{ value: 'n1-highmem-96', text: '96 CPUs, 624 GB RAM' },
{ value: 'n1-highcpu-16', text: '16 CPUs, 14.4 GB RAM' },
{ value: 'n1-highcpu-32', text: '32 CPUs, 28.8 GB RAM' },
{ value: 'n1-highcpu-64', text: '64 CPUs, 57.6 GB RAM' },
{ value: 'n1-highcpu-96', text: '96 CPUs, 86.4 GB RAM' },
];
/**
* AI Platform Accelerator types.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_K80', text: 'NVIDIA Tesla K80' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_P100', text: 'NVIDIA Tesla P100' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
/**
* AI Platform Accelerator types for particular machine types that only
* provide a limited amount.
* https://cloud.google.com/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu
*/
export const ACCELERATOR_TYPES_REDUCED: Option[] = [
{ value: '', text: 'None' },
{ value: 'NVIDIA_TESLA_P4', text: 'NVIDIA Tesla P4' },
{ value: 'NVIDIA_TESLA_T4', text: 'NVIDIA Tesla T4' },
{ value: 'NVIDIA_TESLA_V100', text: 'NVIDIA Tesla V100' },
];
const MASTER_TYPES_REDUCED: Set<string> = new Set([
'n1-standard-64',
'n1-standard-96',
'n1-highmem-64',
'n1-highmem-96',
'n1-highcpu-96', | * if masterType is falsy.
*/
export function getAcceleratorTypes(masterType: string): Option[] {
if (masterType) {
if (MASTER_TYPES_REDUCED.has(masterType)) {
return ACCELERATOR_TYPES_REDUCED;
}
return ACCELERATOR_TYPES;
}
return [];
}
/**
* AI Platform Accelerator counts.
* https://cloud.google.com/ai-platform/training/docs/using-gpus
*/
export const ACCELERATOR_COUNTS_1_2_4_8: Option[] = [
{ value: '1', text: '1' },
{ value: '2', text: '2' },
{ value: '4', text: '4' },
{ value: '8', text: '8' },
];
/**
* Supported AI Platform regions.
* https://cloud.google.com/ai-platform/training/docs/regions
* TODO: It may be more sensible to invoke the projects.locations.list API
* and filter for locations with TRAINING capability
* https://cloud.google.com/ai-platform/training/docs/reference | ]);
/**
* Returns the valid accelerator types given a masterType. Returns empty array | random_line_split |
classic.py | (self, sevr):
if sevr==0:
return ''
try:
return self.severityInfo[sevr]['sevr']
except KeyError:
return str(sevr)
def status(self, stat):
if stat==0:
return ''
try:
return self.statusInfo[stat]
except IndexError:
return str(stat)
def archives(self, pattern):
if not isinstance(pattern, (str,unicode)):
return list(set(reduce(list.__add__, map(self.archives, pattern), [])))
else:
return [a for a in iter(self.__archs.keys()) if fnmatch(a, pattern)]
def lookupArchive(self, arch):
return self.__rarchs[arch]
def _archname2key(self, archs):
if archs is None:
archs = list(self.__archs.values())
else:
for i,a in enumerate(archs):
try:
k = int(a)
if k not in iter(self.__archs.values()):
raise KeyError("Invalid Archive key '%d'"%k)
# do nothing
continue
except ValueError:
pass
try:
k = self.__archs[a]
archs[i] = k
except KeyError:
raise KeyError("Invalid Archive key '%s'"%a)
return archs
@defer.inlineCallbacks
def search(self, exact=None, pattern=None,
archs=None, breakDown=False,
rawTime=False):
"""Search for PV names matching the given pattern.
If archs is None then all archives are searched.
Otherwise archs must be a list of strings or integers
specifing archive names or keys.
Returns a Deferred which fires with a dictionary.
If breakDown is False (the default) then the result is
{'pvname':(firstTime, lastTime)}
If breakDown is True then the result is
{'pvname':[(firstTime, lastTime, archKey)]}
In the second form the ranges for each pv will be sorted
by firstTime.
For either return format, if rawTime is False then a datatime
is given, otherwise a tuple (sec,nsec).
"""
if exact is None and pattern is None:
raise TypeError("Must provide 'exact' or 'pattern'")
if exact is not None:
pattern = '^%s$'%re.escape(exact)
else:
# Test compile to catch basic syntax errors
re.compile(pattern)
archs = self._archname2key(archs)
_log.debug('Searching for %s in %s', pattern, archs)
Ds = [None]*len(archs)
for i,a in enumerate(archs):
Ds[i] = self._proxy.callRemote('archiver.names', a, pattern).addErrback(_connerror)
Ds = yield defer.DeferredList(Ds, fireOnOneErrback=True).addErrback(_connerror)
if breakDown:
results = defaultdict(list)
for i, (junk, A) in enumerate(Ds):
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
results[pv].append( (F, L, archs[i]) )
for R in results.values():
R.sort()
else:
results = defaultdict(lambda:[None]*2)
for junk, A in Ds:
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
C = results[pv]
if C[0] is None or F < C[0]:
C[0] = F
if C[1] is None or L > C[1]:
C[1] = L
results = dict([(K,tuple(V)) for K,V in results.items()])
defer.returnValue(results)
@defer.inlineCallbacks
def _fetchdata(self, arch, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
how=0, enumAsInt=False, displayMeta=False):
if count is None and chunkSize is None:
raise TypeError("If count is None then chunkSize must be given")
if chunkSize is None:
chunkSize = count
if T0 is None and Tend is None:
raise TypeError("Must specify T0 or Tend")
if T0 is None:
T0 = datetime.now()
else:
T0 = makeTime(T0)
if Tend is None:
Tend = datetime.now()
else:
Tend = makeTime(Tend)
if T0 > Tend:
raise ValueError("T0 must be <= Tend")
if count is None:
C = chunkSize
else:
C = min(count, chunkSize)
Tcur = timeTuple(T0)
Tlast =timeTuple(Tend)
N = 0
first = True
last = False
while not last and Tcur < Tlast:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],pv,Tcur,Tlast,C,how)
D = self._proxy.callRemote('archiver.values',
arch, [pv],
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
C, how).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
data = yield D
except:
_log.fatal('Query fails')
raise
assert len(data)==1, "Server returned more than one PVs? (%s)"%len(data)
assert data[0]['name']==pv, "Server gives us %s != %s"%(data[0]['name'], pv)
vals = data[0]['values']
maxcount = data[0]['count']
_log.debug("Query yields %u points"%len(vals))
N += len(vals)
last = len(vals)<C
if count and N>=count:
last = True
the_meta = data[0]['meta']
if data[0]['meta']['type']==0:
states = data[0]['meta']['states']
else:
states = []
orig_type = data[0]['type']
vtype = orig_type
if vtype==1 and enumAsInt:
vtype = 2
try:
dtype = _dtypes[vtype]
except KeyError:
raise ValueError("Server gives unknown value type %d"%vtype)
XML = data[0]['values']
if len(XML)==0:
break
if vtype == 1:
for V in XML:
for i,pnt in enumerate(V['value']):
try:
V['value'][i] = states[pnt]
except IndexError:
V['value'][i] = str(pnt)
maxelem=0
metadata = np.ndarray(len(XML), dtype=dbr_time)
for i,E in enumerate(XML):
maxelem = max(maxelem, len(E['value']))
metadata[i] = (E['sevr'], E['stat'], E['secs'], E['nano'])
if not displayMeta:
assert maxcount==maxelem, "Value shape inconsistent. %d %d"%(maxcount,maxelem)
values = np.ndarray((len(XML), maxelem), dtype=dtype)
for i,E in enumerate(XML):
V = E['value']
values[i,:len(V)] = V
values[i,len(V):] = 0
del XML
del data
if first:
first = False
else:
# remove duplicate sample
values = values[1:]
metadata = metadata[1:]
# no non-duplicate samples
if len(metadata)==0:
break
Tcur = (int(metadata[-1]['sec']), int(metadata[-1]['ns']+1))
if displayMeta:
extraMeta = {'orig_type':orig_type, 'the_meta':the_meta, 'reported_arr_size':maxcount}
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, extraMeta=extraMeta, **cbKWs)
else:
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, **cbKWs)
defer.returnValue(N)
@defer.inlineCallbacks
def fetchraw(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False, displayMeta=False, rawTimes=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True | severity | identifier_name | |
classic.py | catch basic syntax errors
re.compile(pattern)
archs = self._archname2key(archs)
_log.debug('Searching for %s in %s', pattern, archs)
Ds = [None]*len(archs)
for i,a in enumerate(archs):
Ds[i] = self._proxy.callRemote('archiver.names', a, pattern).addErrback(_connerror)
Ds = yield defer.DeferredList(Ds, fireOnOneErrback=True).addErrback(_connerror)
if breakDown:
results = defaultdict(list)
for i, (junk, A) in enumerate(Ds):
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
results[pv].append( (F, L, archs[i]) )
for R in results.values():
R.sort()
else:
results = defaultdict(lambda:[None]*2)
for junk, A in Ds:
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
C = results[pv]
if C[0] is None or F < C[0]:
C[0] = F
if C[1] is None or L > C[1]:
C[1] = L
results = dict([(K,tuple(V)) for K,V in results.items()])
defer.returnValue(results)
@defer.inlineCallbacks
def _fetchdata(self, arch, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
how=0, enumAsInt=False, displayMeta=False):
if count is None and chunkSize is None:
raise TypeError("If count is None then chunkSize must be given")
if chunkSize is None:
chunkSize = count
if T0 is None and Tend is None:
raise TypeError("Must specify T0 or Tend")
if T0 is None:
T0 = datetime.now()
else:
T0 = makeTime(T0)
if Tend is None:
Tend = datetime.now()
else:
Tend = makeTime(Tend)
if T0 > Tend:
raise ValueError("T0 must be <= Tend")
if count is None:
C = chunkSize
else:
C = min(count, chunkSize)
Tcur = timeTuple(T0)
Tlast =timeTuple(Tend)
N = 0
first = True
last = False
while not last and Tcur < Tlast:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],pv,Tcur,Tlast,C,how)
D = self._proxy.callRemote('archiver.values',
arch, [pv],
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
C, how).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
data = yield D
except:
_log.fatal('Query fails')
raise
assert len(data)==1, "Server returned more than one PVs? (%s)"%len(data)
assert data[0]['name']==pv, "Server gives us %s != %s"%(data[0]['name'], pv)
vals = data[0]['values']
maxcount = data[0]['count']
_log.debug("Query yields %u points"%len(vals))
N += len(vals)
last = len(vals)<C
if count and N>=count:
last = True
the_meta = data[0]['meta']
if data[0]['meta']['type']==0:
states = data[0]['meta']['states']
else:
states = []
orig_type = data[0]['type']
vtype = orig_type
if vtype==1 and enumAsInt:
vtype = 2
try:
dtype = _dtypes[vtype]
except KeyError:
raise ValueError("Server gives unknown value type %d"%vtype)
XML = data[0]['values']
if len(XML)==0:
break
if vtype == 1:
for V in XML:
for i,pnt in enumerate(V['value']):
try:
V['value'][i] = states[pnt]
except IndexError:
V['value'][i] = str(pnt)
maxelem=0
metadata = np.ndarray(len(XML), dtype=dbr_time)
for i,E in enumerate(XML):
maxelem = max(maxelem, len(E['value']))
metadata[i] = (E['sevr'], E['stat'], E['secs'], E['nano'])
if not displayMeta:
assert maxcount==maxelem, "Value shape inconsistent. %d %d"%(maxcount,maxelem)
values = np.ndarray((len(XML), maxelem), dtype=dtype)
for i,E in enumerate(XML):
V = E['value']
values[i,:len(V)] = V
values[i,len(V):] = 0
del XML
del data
if first:
first = False
else:
# remove duplicate sample
values = values[1:]
metadata = metadata[1:]
# no non-duplicate samples
if len(metadata)==0:
break
Tcur = (int(metadata[-1]['sec']), int(metadata[-1]['ns']+1))
if displayMeta:
extraMeta = {'orig_type':orig_type, 'the_meta':the_meta, 'reported_arr_size':maxcount}
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, extraMeta=extraMeta, **cbKWs)
else:
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, **cbKWs)
defer.returnValue(N)
@defer.inlineCallbacks
def fetchraw(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False, displayMeta=False, rawTimes=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True, rawTime=True)
breakDown = breakDown[pv]
if len(breakDown)==0:
_log.error("PV not archived")
defer.returnValue(0)
if rawTimes:
Tcur, Tend = T0, Tend
else:
Tcur, Tend = timeTuple(T0), timeTuple(Tend)
_log.debug("Time range: %s -> %s", Tcur, Tend)
_log.debug("Planning with: %s", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))
plan = []
# Plan queries
# Find a set of non-overlapping regions
for F, L, K in breakDown:
# some mis-match of definitions
# the search results give the times
# of the first and last samples
# inclusive.
# time range [F, L]
# However, values() query end time
# is exclusive
# time range [F, L)
# We step the end time forward by 1 micro-second
# to ensure that the last sample can be returned.
# Note: it seems that Channel Archiver uses
# micro-sec resolution times for comparisons...
_log.debug("Before: %s", L)
LS, LN = L
LN += 1000
if LN>1000000000:
LS += 1
LN = 0
L = LS, LN
_log.debug("After: %s", L)
if L <= Tcur:
continue # Too early, keep going
elif F >= Tend:
break # No more data in range
# range to request from this archive
Rstart = max(Tcur, F)
Rend = min(Tend, L)
plan.append((Rstart, Rend, K))
Tcur = Rend
if len(plan)==0 and len(breakDown)>0 and breakDown[-1][1] <= Tcur:
# requested range is later than last recorded sample,
# which is all we can return
| F, L, K = breakDown[-1]
LS, LN = L
plan.append(((LS+1,0),(LS+2,0),K))
count=1
_log.debug("Returning last sample. No data in or after requested time range.") | conditional_block | |
classic.py | # map from key to name
self.__rarchs = dict([(x['key'],x['name']) for x in archs])
def severity(self, sevr):
if sevr==0:
return ''
try:
return self.severityInfo[sevr]['sevr']
except KeyError:
return str(sevr)
def status(self, stat):
if stat==0:
return ''
try:
return self.statusInfo[stat]
except IndexError:
return str(stat)
def archives(self, pattern):
if not isinstance(pattern, (str,unicode)):
return list(set(reduce(list.__add__, map(self.archives, pattern), [])))
else:
return [a for a in iter(self.__archs.keys()) if fnmatch(a, pattern)]
def lookupArchive(self, arch):
return self.__rarchs[arch]
def _archname2key(self, archs):
if archs is None:
archs = list(self.__archs.values())
else:
for i,a in enumerate(archs):
try:
k = int(a)
if k not in iter(self.__archs.values()):
raise KeyError("Invalid Archive key '%d'"%k)
# do nothing
continue
except ValueError:
pass
try:
k = self.__archs[a]
archs[i] = k
except KeyError:
raise KeyError("Invalid Archive key '%s'"%a)
return archs
@defer.inlineCallbacks
def search(self, exact=None, pattern=None,
archs=None, breakDown=False,
rawTime=False):
"""Search for PV names matching the given pattern.
If archs is None then all archives are searched.
Otherwise archs must be a list of strings or integers
specifing archive names or keys.
Returns a Deferred which fires with a dictionary.
If breakDown is False (the default) then the result is
{'pvname':(firstTime, lastTime)}
If breakDown is True then the result is
{'pvname':[(firstTime, lastTime, archKey)]}
In the second form the ranges for each pv will be sorted
by firstTime.
For either return format, if rawTime is False then a datatime
is given, otherwise a tuple (sec,nsec).
"""
if exact is None and pattern is None:
raise TypeError("Must provide 'exact' or 'pattern'")
if exact is not None:
pattern = '^%s$'%re.escape(exact)
else:
# Test compile to catch basic syntax errors
re.compile(pattern)
archs = self._archname2key(archs)
_log.debug('Searching for %s in %s', pattern, archs)
Ds = [None]*len(archs)
for i,a in enumerate(archs):
Ds[i] = self._proxy.callRemote('archiver.names', a, pattern).addErrback(_connerror)
Ds = yield defer.DeferredList(Ds, fireOnOneErrback=True).addErrback(_connerror)
if breakDown:
results = defaultdict(list)
for i, (junk, A) in enumerate(Ds):
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
results[pv].append( (F, L, archs[i]) )
for R in results.values():
R.sort()
else:
results = defaultdict(lambda:[None]*2)
for junk, A in Ds:
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
C = results[pv]
if C[0] is None or F < C[0]:
C[0] = F
if C[1] is None or L > C[1]:
C[1] = L
results = dict([(K,tuple(V)) for K,V in results.items()])
defer.returnValue(results)
@defer.inlineCallbacks
def _fetchdata(self, arch, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
how=0, enumAsInt=False, displayMeta=False):
if count is None and chunkSize is None:
raise TypeError("If count is None then chunkSize must be given")
if chunkSize is None:
chunkSize = count
if T0 is None and Tend is None:
raise TypeError("Must specify T0 or Tend")
if T0 is None:
T0 = datetime.now()
else:
T0 = makeTime(T0)
if Tend is None:
Tend = datetime.now()
else:
Tend = makeTime(Tend)
if T0 > Tend:
raise ValueError("T0 must be <= Tend")
if count is None:
C = chunkSize
else:
C = min(count, chunkSize)
Tcur = timeTuple(T0)
Tlast =timeTuple(Tend)
N = 0
first = True
last = False
while not last and Tcur < Tlast:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],pv,Tcur,Tlast,C,how)
D = self._proxy.callRemote('archiver.values',
arch, [pv],
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
C, how).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
data = yield D
except:
_log.fatal('Query fails')
raise
assert len(data)==1, "Server returned more than one PVs? (%s)"%len(data)
assert data[0]['name']==pv, "Server gives us %s != %s"%(data[0]['name'], pv)
vals = data[0]['values']
maxcount = data[0]['count']
_log.debug("Query yields %u points"%len(vals))
N += len(vals)
last = len(vals)<C
if count and N>=count:
last = True
the_meta = data[0]['meta']
if data[0]['meta']['type']==0:
states = data[0]['meta']['states']
else:
states = []
orig_type = data[0]['type']
vtype = orig_type
if vtype==1 and enumAsInt:
vtype = 2
try:
dtype = _dtypes[vtype]
except KeyError:
raise ValueError("Server gives unknown value type %d"%vtype)
XML = data[0]['values']
if len(XML)==0:
break
if vtype == 1:
for V in XML:
for i,pnt in enumerate(V['value']):
try:
V['value'][i] = states[pnt]
except IndexError:
V['value'][i] = str(pnt)
maxelem=0
metadata = np.ndarray(len(XML), dtype=dbr_time)
for i,E in enumerate(XML):
maxelem = max(maxelem, len(E['value']))
metadata[i] = (E['sevr'], E['stat'], E['secs'], E['nano'])
if not displayMeta:
assert maxcount==maxelem, "Value shape inconsistent. %d %d"%(maxcount,maxelem)
values = np.ndarray((len(XML), maxelem), dtype=dtype)
for i,E in enumerate(XML):
V = E['value']
values[i,:len(V)] = V
values[i,len(V):] = 0
del XML
del data
if first:
first = False
else:
# remove duplicate sample
values = values[1:]
metadata = metadata[1:]
# no non-duplicate samples
if len(metadata)==0:
break
Tcur = (int(metadata[-1]['sec']), int(metadata | """
"""
def __init__(self, proxy, conf, info, archs):
self._proxy = proxy
self.conf = conf
if PVER < info['ver']:
_log.warn('Archive server protocol version %d is newer then ours (%d).\n'+
'Attempting to proceed.', info['ver'], PVER)
self.description = info['desc']
self.statusInfo = dict(enumerate(info['stat']))
self.severityInfo = {}
for S in info['sevr']:
self.severityInfo[int(S['num'])] = S
self.hows = enumerate(info['how'])
self.hows = dict([(a,b) for b,a in self.hows])
# map from name to key
self.__archs = dict([(x['name'],x['key']) for x in archs]) | identifier_body | |
classic.py | junk, A) in enumerate(Ds):
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
results[pv].append( (F, L, archs[i]) )
for R in results.values():
R.sort()
else:
results = defaultdict(lambda:[None]*2)
for junk, A in Ds:
for R in A:
# Note: Order based on sorting by key name
ens, es, ss, sns, pv = R.values()
F = (ss, sns)
L = (es, ens)
if not rawTime:
F, L = makeTime(F), makeTime(L)
C = results[pv]
if C[0] is None or F < C[0]:
C[0] = F
if C[1] is None or L > C[1]:
C[1] = L
results = dict([(K,tuple(V)) for K,V in results.items()])
defer.returnValue(results)
@defer.inlineCallbacks
def _fetchdata(self, arch, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
how=0, enumAsInt=False, displayMeta=False):
if count is None and chunkSize is None:
raise TypeError("If count is None then chunkSize must be given")
if chunkSize is None:
chunkSize = count
if T0 is None and Tend is None:
raise TypeError("Must specify T0 or Tend")
if T0 is None:
T0 = datetime.now()
else:
T0 = makeTime(T0)
if Tend is None:
Tend = datetime.now()
else:
Tend = makeTime(Tend)
if T0 > Tend:
raise ValueError("T0 must be <= Tend")
if count is None:
C = chunkSize
else:
C = min(count, chunkSize)
Tcur = timeTuple(T0)
Tlast =timeTuple(Tend)
N = 0
first = True
last = False
while not last and Tcur < Tlast:
_log.debug('archiver.values(%s,%s,%s,%s,%d,%d)',
self.__rarchs[arch],pv,Tcur,Tlast,C,how)
D = self._proxy.callRemote('archiver.values',
arch, [pv],
Tcur[0], Tcur[1],
Tlast[0], Tlast[1],
C, how).addErrback(_connerror)
D.addCallback(_optime, time.time())
try:
data = yield D
except:
_log.fatal('Query fails')
raise
assert len(data)==1, "Server returned more than one PVs? (%s)"%len(data)
assert data[0]['name']==pv, "Server gives us %s != %s"%(data[0]['name'], pv)
vals = data[0]['values']
maxcount = data[0]['count']
_log.debug("Query yields %u points"%len(vals))
N += len(vals)
last = len(vals)<C
if count and N>=count:
last = True
the_meta = data[0]['meta']
if data[0]['meta']['type']==0:
states = data[0]['meta']['states']
else:
states = []
orig_type = data[0]['type']
vtype = orig_type
if vtype==1 and enumAsInt:
vtype = 2
try:
dtype = _dtypes[vtype]
except KeyError:
raise ValueError("Server gives unknown value type %d"%vtype)
XML = data[0]['values']
if len(XML)==0:
break
if vtype == 1:
for V in XML:
for i,pnt in enumerate(V['value']):
try:
V['value'][i] = states[pnt]
except IndexError:
V['value'][i] = str(pnt)
maxelem=0
metadata = np.ndarray(len(XML), dtype=dbr_time)
for i,E in enumerate(XML):
maxelem = max(maxelem, len(E['value']))
metadata[i] = (E['sevr'], E['stat'], E['secs'], E['nano'])
if not displayMeta:
assert maxcount==maxelem, "Value shape inconsistent. %d %d"%(maxcount,maxelem)
values = np.ndarray((len(XML), maxelem), dtype=dtype)
for i,E in enumerate(XML):
V = E['value']
values[i,:len(V)] = V
values[i,len(V):] = 0
del XML
del data
if first:
first = False
else:
# remove duplicate sample
values = values[1:]
metadata = metadata[1:]
# no non-duplicate samples
if len(metadata)==0:
break
Tcur = (int(metadata[-1]['sec']), int(metadata[-1]['ns']+1))
if displayMeta:
extraMeta = {'orig_type':orig_type, 'the_meta':the_meta, 'reported_arr_size':maxcount}
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, extraMeta=extraMeta, **cbKWs)
else:
yield defer.maybeDeferred(callback, values, metadata, *cbArgs, **cbKWs)
defer.returnValue(N)
@defer.inlineCallbacks
def fetchraw(self, pv, callback,
cbArgs=(), cbKWs={},
T0=None, Tend=None,
count=None, chunkSize=None,
archs=None, breakDown=None,
enumAsInt=False, displayMeta=False, rawTimes=False):
"""Fetch raw data for the given PV.
Results are passed to the given callback as they arrive.
"""
if breakDown is None:
breakDown = yield self.search(exact=pv, archs=archs,
breakDown=True, rawTime=True)
breakDown = breakDown[pv]
if len(breakDown)==0:
_log.error("PV not archived")
defer.returnValue(0)
if rawTimes:
Tcur, Tend = T0, Tend
else:
Tcur, Tend = timeTuple(T0), timeTuple(Tend)
_log.debug("Time range: %s -> %s", Tcur, Tend)
_log.debug("Planning with: %s", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))
plan = []
# Plan queries
# Find a set of non-overlapping regions
for F, L, K in breakDown:
# some mis-match of definitions
# the search results give the times
# of the first and last samples
# inclusive.
# time range [F, L]
# However, values() query end time
# is exclusive
# time range [F, L)
# We step the end time forward by 1 micro-second
# to ensure that the last sample can be returned.
# Note: it seems that Channel Archiver uses
# micro-sec resolution times for comparisons...
_log.debug("Before: %s", L)
LS, LN = L
LN += 1000
if LN>1000000000:
LS += 1
LN = 0
L = LS, LN
_log.debug("After: %s", L)
if L <= Tcur:
continue # Too early, keep going
elif F >= Tend:
break # No more data in range
# range to request from this archive
Rstart = max(Tcur, F)
Rend = min(Tend, L)
plan.append((Rstart, Rend, K))
Tcur = Rend
if len(plan)==0 and len(breakDown)>0 and breakDown[-1][1] <= Tcur:
# requested range is later than last recorded sample,
# which is all we can return
F, L, K = breakDown[-1]
LS, LN = L
plan.append(((LS+1,0),(LS+2,0),K))
count=1
_log.debug("Returning last sample. No data in or after requested time range.")
elif len(plan)==0:
# requested range is earlier than first recorded sample.
_log.warn("Query plan empty. No data in or before request time range.")
defer.returnValue(0)
_log.debug("Using plan of %d queries %s", len(plan), map(lambda a,b,c:(a,b,self.__rarchs[c]), plan))
N = yield self._nextraw(0, pv=pv, plan=plan,
Ctot=0, Climit=count, | callback=callback, cbArgs=cbArgs,
cbKWs=cbKWs, chunkSize=chunkSize, | random_line_split | |
app.js | ");
//APP.USE for middleware elements
app.use(bodyParser.urlencoded({extended: true})); //***
app.use(methodOverride("_method")); //***
app.use(express.static('public')); //the 'static' directory holds CSS files, images, etc.
app.use(session({ //***
secret: 'only for Worldstats',
resave: false,
saveUninitialized: true
}));
app.use('/', function(req,res,next){
req.login = function(user){
req.session.userId = user.id;
req.session.screen_name = user.screen_name;
};
req.currentUser = function() { ///***** WHY DOES THIS HAVE TWO RETURN STATEMENTS
return sql.Player.find({
where: { id: req.session.userId }
}).then(function(user) {
req.user = user;
return user;
})
};
req.logout = function() {
req.session.userId = null;
req.user = null;
};
next(); //move on to next middleware
},
gameStuff); //can only have one '/' level app to use, so put in gameStuff here
var loggedIn = function(req,res,next) {
if (req.session.userId) {
next();
} else {
res.redirect('/');
}
};
//For every page that should be limited when a Player is logged in, apply loggedIn middleware below:
app.use('/profile',loggedIn);
//app.use('/pregame',loggedIn); //FOR SOME REASON THIS MAY BE CAUSING CONFLICT WITH pregame's own authorization
app.use('/question', loggedIn);
app.use('/answer',loggedIn);
app.use('/nextquestion',loggedIn);
//Define the various ROUTES
//Public routes available without login are: 1) welcome page, 2) high scores page, 3) signup page
//ROOT route for welcome page
app.get('/', function(req,res){
console.log("Hello world!");
res.render('index.ejs');
});
//High scores page
app.get('/highscores', function(req,res){
console.log("Hello from highscores route")
sql.Score.findAll({limit: 10, order: '"game_score" DESC', include:[{model:sql.Player}]}).then(function(scoreData){
//console.log(scoreData);
var highScoreArray = [];
for (var i = 0; i < scoreData.length; i++) {
var highScore = scoreData[i].dataValues.game_score;
var highDate = scoreData[i].dataValues.date_played;
var highPlayer = scoreData[i].dataValues.Player.dataValues.screen_name;
highScoreArray.push([highScore,highDate,highPlayer]);
};
console.log("This is highScoreArray")
console.log(highScoreArray);
res.render('highscores.ejs',{ejsScoreArray:highScoreArray});
})
});
//Create new player page
app.get('/players/new', function(req,res){
res.render('signup.ejs');
// res.send("Be a player!");
});
//Once user submits form on create new player page, process info with route below:
app.post('/players', function(req,res){
var newScreenName = req.body.signupScreenName;
var newEmail = req.body.signupEmail;
var newPassword = req.body.signupPassword;
console.log(newScreenName,newEmail,newPassword);
console.log(typeof sql.Player.createSecure);
sql.Player.createSecure(newScreenName,newEmail,newPassword).then(
function(newUser){
res.redirect('/login');
});
});
//Login page
app.get('/login', function(req,res){
res.render('login.ejs');
});
app.post('/login', function(req,res){
var uScreenName = req.body.loginScreenName;
var uPassword = req.body.loginPassword;
console.log(uScreenName,uPassword);
sql.Player.authenticate(uScreenName,uPassword).then(
function(user){
if (user) {
//this means a user was returned by authenticate function, so password valid
req.login(user);
res.redirect('/pregame');
} else {
//this means no user wa`s returned (false was returned), so login credentials invalid
res.render('login.ejs'); //*** Add "login failed" error message to user
}
})
});
//Logout page
app.get('/logout', function(req,res){
req.logout();
res.redirect('/');
});
//Startgame with different levels
app.get('/startgame/:level', function(req, res){
console.log("Hello from startgame");
var level = req.params.level;
console.log("Level");
console.log(level);
req.setupGame(level);
res.redirect('/question');
});
//Private routes that are only available to players after log-in
//Authorization is handled via LoggedIn middleware function
//Profile page -- for edit
// app.get('/players/:id', function(req,res){ //*** STILL TO CONFIRM THIS WORKS AS URL PARAM
// console.log("Hello from profile page")
// console.log(req.currentUser);
// res.send(currentUser);
// })
//Profile page
app.get('/profile', function(req,res){
console.log("Hello from profile route");
var scoreObject = {};
var profileObject = {};
req.currentUser().then(function(foundPlayer){
//console.log("This is found player",foundPlayer);
if (foundPlayer) {
sql.Score.findAll({where: {PlayerId:req.session.userId}, limit: 3, order: '"game_score" DESC'}).then(function(myScores){
scoreObject = myScores;
profileObject = {"score":scoreObject,"player":foundPlayer};
res.render('profile',{ejsProfile:profileObject});
}) //end of myScores function
} else {
res.redirect('/login');
}
})
});
/** Testing the shift of game code to separate file
// and shift of game-consistent variables to req.session.varName
app.get('/teststeven',function(req,res){
req.setupGame();
//console.log()
res.redirect('/isworking');
});
app.get('/isworking', function(req,res){
console.log(req.session.gameScore);
});
**/
//Pregame page
app.get('/pregame', function(req,res){
req.currentUser().then(function(foundPlayer){
if (foundPlayer) { //This if is redundant -- but will leave it to have Player name on pregame page
//req.setupGame(); //initialize what is needed for the game MOVE TO startgame
res.render('pregame',{ejsFoundPlayer:foundPlayer});
} else {
res.redirect('/login');
}
})
});
//Question page
app.get('/question', function(req,res){
var renderIt = function(data){
res.render('question.ejs',{ejsQuestionData:data});
};
req.playBall(renderIt);
});
//Answer page
app.get('/answer', function(req,res){
console.log("Hello from answer page");
var playerAnswer = [];
var correctAnswer = [];
var answerCountryandValue = req.session.countryAndValueData;
for (var id in req.query) {
playerAnswer.push(req.query[id]);
// console.log("This is the answer value",id, req.query[id]);
};
for (var i = 0; i < answerCountryandValue.length; i++) {
correctAnswer.push(answerCountryandValue[i][0]);
};
console.log("playerAnswer",playerAnswer);
console.log("correctAnswer",correctAnswer);
//Send player answer and real answer to get scored
playerResults = compareAnswers(playerAnswer,answerCountryandValue);
//Use the score results to update score
req.session.gameScore += playerResults.numCorrect;
//Add info to gameSummary which is in req.session
req.session.gameSummary.push([req.session.currentRound,req.session.metricShortName,req.session.gameScore]);
console.log("this is gameSummary")
console.log(req.session.gameSummary);
console.log("playerResults")
console.log(playerResults);
res.render('answer.ejs',{ejsAnswer:playerResults});
});
var compareAnswers = function(playerAnswer,fullAnswer){
//this takes two arrays and compares how many items are the same and provides an object back with:
// {"numCorrect":#,
// "whichWrong":[index #s of wrong answer, wrong answer ];
console.log("Hello from compareAnswers");
var correctScore = 0;
var answerMatrix = [];
for (var i = 0; i < fullAnswer.length; i++) | ;
var scoreKey = {
"numCorrect":correctScore,
"answerKey":answerMatrix
};
return scoreKey;
};
app.get('/nextquestion', function(req,res){
if (req.session.nextRound >= req.session.maxRounds) {
//nextRound was already incremented in game.js -- so if nextRound is already beyond maxRounds, then game over!
console.log("Start gameover process");
console.log(req.session.gameSummary);
//Assemble object with data to present on gameover page
//FOR NOW JUST PASS FINAL SCORE AS MVP APPROACH ***
var gameFinalStats = {"finalScore":req.session.gameScore}; //NEED TO ADD ADDITIONAL DATA TO OBJECT
console.log("req.session.gameScore is",req.session.gameScore);
console.log("req.session.maxRounds is ",req.session.maxRounds);
console.log | {
if (playerAnswer[i] === fullAnswer[i][0]) {
correctScore++;
answerMatrix.push([fullAnswer[i][0],fullAnswer[i][1],"Correct"]);
} else {
answerMatrix.push([fullAnswer[i][0],fullAnswer[i][1],playerAnswer[i]]);
}
} | conditional_block |
app.js | ejs");
//APP.USE for middleware elements
app.use(bodyParser.urlencoded({extended: true})); //***
app.use(methodOverride("_method")); //***
app.use(express.static('public')); //the 'static' directory holds CSS files, images, etc.
app.use(session({ //***
secret: 'only for Worldstats',
resave: false,
saveUninitialized: true
}));
app.use('/', function(req,res,next){
req.login = function(user){
req.session.userId = user.id;
req.session.screen_name = user.screen_name;
};
req.currentUser = function() { ///***** WHY DOES THIS HAVE TWO RETURN STATEMENTS
return sql.Player.find({
where: { id: req.session.userId }
}).then(function(user) {
req.user = user;
return user;
})
};
req.logout = function() {
req.session.userId = null;
req.user = null;
};
next(); //move on to next middleware
},
gameStuff); //can only have one '/' level app to use, so put in gameStuff here
var loggedIn = function(req,res,next) {
if (req.session.userId) {
next();
} else {
res.redirect('/');
}
};
//For every page that should be limited when a Player is logged in, apply loggedIn middleware below:
app.use('/profile',loggedIn);
//app.use('/pregame',loggedIn); //FOR SOME REASON THIS MAY BE CAUSING CONFLICT WITH pregame's own authorization
app.use('/question', loggedIn);
app.use('/answer',loggedIn);
app.use('/nextquestion',loggedIn);
//Define the various ROUTES
//Public routes available without login are: 1) welcome page, 2) high scores page, 3) signup page
//ROOT route for welcome page
app.get('/', function(req,res){
console.log("Hello world!");
res.render('index.ejs');
});
//High scores page
app.get('/highscores', function(req,res){
console.log("Hello from highscores route")
sql.Score.findAll({limit: 10, order: '"game_score" DESC', include:[{model:sql.Player}]}).then(function(scoreData){
//console.log(scoreData);
var highScoreArray = [];
for (var i = 0; i < scoreData.length; i++) {
var highScore = scoreData[i].dataValues.game_score;
var highDate = scoreData[i].dataValues.date_played;
var highPlayer = scoreData[i].dataValues.Player.dataValues.screen_name;
highScoreArray.push([highScore,highDate,highPlayer]);
};
console.log("This is highScoreArray")
console.log(highScoreArray);
res.render('highscores.ejs',{ejsScoreArray:highScoreArray});
})
});
//Create new player page
app.get('/players/new', function(req,res){
res.render('signup.ejs');
// res.send("Be a player!");
});
//Once user submits form on create new player page, process info with route below:
app.post('/players', function(req,res){
var newScreenName = req.body.signupScreenName;
var newEmail = req.body.signupEmail;
var newPassword = req.body.signupPassword;
console.log(newScreenName,newEmail,newPassword);
console.log(typeof sql.Player.createSecure);
sql.Player.createSecure(newScreenName,newEmail,newPassword).then(
function(newUser){
res.redirect('/login');
});
});
//Login page
app.get('/login', function(req,res){
res.render('login.ejs');
});
app.post('/login', function(req,res){
var uScreenName = req.body.loginScreenName;
var uPassword = req.body.loginPassword;
console.log(uScreenName,uPassword);
sql.Player.authenticate(uScreenName,uPassword).then(
function(user){
if (user) {
//this means a user was returned by authenticate function, so password valid
req.login(user);
res.redirect('/pregame');
} else {
//this means no user wa`s returned (false was returned), so login credentials invalid
res.render('login.ejs'); //*** Add "login failed" error message to user
}
})
});
//Logout page
app.get('/logout', function(req,res){
req.logout();
res.redirect('/');
});
//Startgame with different levels
app.get('/startgame/:level', function(req, res){
console.log("Hello from startgame");
var level = req.params.level;
console.log("Level");
console.log(level);
req.setupGame(level);
res.redirect('/question');
});
//Private routes that are only available to players after log-in
//Authorization is handled via LoggedIn middleware function
//Profile page -- for edit
// app.get('/players/:id', function(req,res){ //*** STILL TO CONFIRM THIS WORKS AS URL PARAM
// console.log("Hello from profile page")
// console.log(req.currentUser);
// res.send(currentUser);
// })
//Profile page
app.get('/profile', function(req,res){
console.log("Hello from profile route");
var scoreObject = {};
var profileObject = {};
req.currentUser().then(function(foundPlayer){
//console.log("This is found player",foundPlayer);
if (foundPlayer) {
sql.Score.findAll({where: {PlayerId:req.session.userId}, limit: 3, order: '"game_score" DESC'}).then(function(myScores){
scoreObject = myScores;
profileObject = {"score":scoreObject,"player":foundPlayer};
res.render('profile',{ejsProfile:profileObject});
}) //end of myScores function
} else {
res.redirect('/login');
}
})
});
/** Testing the shift of game code to separate file
// and shift of game-consistent variables to req.session.varName
app.get('/teststeven',function(req,res){
req.setupGame();
//console.log()
res.redirect('/isworking');
});
app.get('/isworking', function(req,res){
console.log(req.session.gameScore);
});
**/
//Pregame page
app.get('/pregame', function(req,res){
req.currentUser().then(function(foundPlayer){
if (foundPlayer) { //This if is redundant -- but will leave it to have Player name on pregame page
//req.setupGame(); //initialize what is needed for the game MOVE TO startgame
res.render('pregame',{ejsFoundPlayer:foundPlayer});
} else {
res.redirect('/login');
}
})
});
//Question page
app.get('/question', function(req,res){
var renderIt = function(data){
res.render('question.ejs',{ejsQuestionData:data});
};
req.playBall(renderIt);
});
//Answer page | var answerCountryandValue = req.session.countryAndValueData;
for (var id in req.query) {
playerAnswer.push(req.query[id]);
// console.log("This is the answer value",id, req.query[id]);
};
for (var i = 0; i < answerCountryandValue.length; i++) {
correctAnswer.push(answerCountryandValue[i][0]);
};
console.log("playerAnswer",playerAnswer);
console.log("correctAnswer",correctAnswer);
//Send player answer and real answer to get scored
playerResults = compareAnswers(playerAnswer,answerCountryandValue);
//Use the score results to update score
req.session.gameScore += playerResults.numCorrect;
//Add info to gameSummary which is in req.session
req.session.gameSummary.push([req.session.currentRound,req.session.metricShortName,req.session.gameScore]);
console.log("this is gameSummary")
console.log(req.session.gameSummary);
console.log("playerResults")
console.log(playerResults);
res.render('answer.ejs',{ejsAnswer:playerResults});
});
var compareAnswers = function(playerAnswer,fullAnswer){
//this takes two arrays and compares how many items are the same and provides an object back with:
// {"numCorrect":#,
// "whichWrong":[index #s of wrong answer, wrong answer ];
console.log("Hello from compareAnswers");
var correctScore = 0;
var answerMatrix = [];
for (var i = 0; i < fullAnswer.length; i++) {
if (playerAnswer[i] === fullAnswer[i][0]) {
correctScore++;
answerMatrix.push([fullAnswer[i][0],fullAnswer[i][1],"Correct"]);
} else {
answerMatrix.push([fullAnswer[i][0],fullAnswer[i][1],playerAnswer[i]]);
}
};
var scoreKey = {
"numCorrect":correctScore,
"answerKey":answerMatrix
};
return scoreKey;
};
app.get('/nextquestion', function(req,res){
if (req.session.nextRound >= req.session.maxRounds) {
//nextRound was already incremented in game.js -- so if nextRound is already beyond maxRounds, then game over!
console.log("Start gameover process");
console.log(req.session.gameSummary);
//Assemble object with data to present on gameover page
//FOR NOW JUST PASS FINAL SCORE AS MVP APPROACH ***
var gameFinalStats = {"finalScore":req.session.gameScore}; //NEED TO ADD ADDITIONAL DATA TO OBJECT
console.log("req.session.gameScore is",req.session.gameScore);
console.log("req.session.maxRounds is ",req.session.maxRounds);
console.log("req | app.get('/answer', function(req,res){
console.log("Hello from answer page");
var playerAnswer = [];
var correctAnswer = []; | random_line_split |
instruments.rs | }
}
/// Prepare the Xcode Instruments profiling command
///
/// If the `xctrace` tool is used, the prepared command looks like
///
/// ```sh
/// xcrun xctrace record --template MyTemplate \
/// --time-limit 5000ms \
/// --output path/to/tracefile \
/// --launch \
/// --
/// ```
///
/// If the older `instruments` tool is used, the prepared command looks
/// like
///
/// ```sh
/// instruments -t MyTemplate \
/// -D /path/to/tracefile \
/// -l 5000ms
/// ```
fn profiling_command(
&self,
template_name: &str,
trace_filepath: &Path,
time_limit: Option<usize>,
) -> Result<Command> {
match self {
XcodeInstruments::XcTrace => {
let mut command = Command::new("xcrun");
command.args(["xctrace", "record"]);
command.args(["--template", template_name]);
if let Some(limit_millis) = time_limit {
let limit_millis_str = format!("{}ms", limit_millis);
command.args(["--time-limit", &limit_millis_str]);
}
command.args(["--output", trace_filepath.to_str().unwrap()]);
// redirect stdin & err to the user's terminal
if let Some(tty) = get_tty()? {
command.args(["--target-stdin", &tty, "--target-stdout", &tty]);
}
command.args(["--launch", "--"]);
Ok(command)
}
XcodeInstruments::InstrumentsBinary => {
let mut command = Command::new("instruments");
command.args(["-t", template_name]);
command.arg("-D").arg(trace_filepath);
if let Some(limit) = time_limit {
command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout, .. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if !status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn | () -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line| !line.starts_with('=') && !line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, .. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line| !line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line| !line.starts_with("~/Library/"))
.take_while(|line| !line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output | parse_xctrace_template_list | identifier_name |
instruments.rs | /// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn parse_xctrace_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line| !line.starts_with('=') && !line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, .. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line| !line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line| !line.starts_with("~/Library/"))
.take_while(|line| !line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if !trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
}
/// Return the template name abbreviation if available.
fn abbrev_name(template_name: &str) -> Option<&str> {
match template_name {
"Time Profiler" => Some("time"),
"Allocations" => Some("alloc"),
"File Activity" => Some("io"),
"System Trace" => Some("sys"),
_ => None,
}
}
/// Profile the target binary at `binary_filepath`, write results at
/// `trace_filepath` and returns its path.
pub(crate) fn profile_target(
target_filepath: &Path,
xctrace_tool: &XcodeInstruments,
app_config: &AppConfig,
workspace: &Workspace,
) -> Result<PathBuf> {
// 1. Get the template name from config
// This borrows a ref to the String in Option<String>. The value can be
// unwrapped because in this version the template was checked earlier to
// be a `Some(x)`.
let template_name = resolve_template_name(app_config.template_name.as_deref().unwrap());
// 2. Compute the trace filepath and create its parent directory
let workspace_root = workspace.root().to_path_buf();
let trace_filepath = prepare_trace_filepath(
target_filepath,
template_name,
app_config,
workspace_root.as_path(),
)?;
// 3. Print current activity `Profiling target/debug/tries`
{
let target_shortpath = target_filepath
.strip_prefix(workspace_root)
.unwrap_or(target_filepath)
.to_string_lossy();
let status_detail = format!("{} with template '{}'", target_shortpath, template_name);
workspace.config().shell().status("Profiling", status_detail)?;
}
let mut command =
xctrace_tool.profiling_command(template_name, &trace_filepath, app_config.time_limit)?;
command.arg(target_filepath);
if !app_config.target_args.is_empty() {
command.args(app_config.target_args.as_slice());
}
let output = command.output()?;
if !output.status.success() | {
let stderr =
String::from_utf8(output.stderr).unwrap_or_else(|_| "failed to capture stderr".into());
let stdout =
String::from_utf8(output.stdout).unwrap_or_else(|_| "failed to capture stdout".into());
return Err(anyhow!("instruments errored: {} {}", stderr, stdout));
} | conditional_block | |
instruments.rs | (["-t", template_name]);
command.arg("-D").arg(trace_filepath);
if let Some(limit) = time_limit {
command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout, .. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if !status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn parse_xctrace_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line| !line.starts_with('=') && !line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, .. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line| !line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line| !line.starts_with("~/Library/"))
.take_while(|line| !line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if !trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str | {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
} | identifier_body | |
instruments.rs | command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout, .. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if !status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn parse_xctrace_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line| !line.starts_with('=') && !line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, .. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if !status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line| !line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line| !line.starts_with("~/Library/"))
.take_while(|line| !line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if !trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
}
| /// Return the template name abbreviation if available.
fn abbrev_name(template_name: &str) -> Option<&str> {
match template_name { | random_line_split | |
a_fullscreen_wm.rs | , Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>,
}
/// The errors that this window manager can return.
///
/// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]: ../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn description(&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if !self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
// The last window is focused ...
focused_window: self.get_focused_window(),
// ... and should fill the screen. The other windows are
// simply hidden.
windows: vec![(*w, fullscreen_geometry)],
}
}
// Otherwise, return an empty WindowLayout
None => WindowLayout::new(),
}
}
// Before you continue any further, first have a look at the bottom of
// this file, where we show you how to write unit tests.
/// Try this yourself
///
/// Don't forget that when the argument is `None`, i.e. no window should
/// be focused, `get_focused_window()` must return `None` afterwards. The
/// `focused_window` field of the `WindowLayout` must also be `None`.
///
/// You will probably have to change the code above (method
/// implementations as well as the `FullscreenWM` struct) to achieve this.
fn focus_window(&mut self, window: Option<Window>) -> Result<(), Self::Error> {
// self.focused_window = window;
match window {
Some(i_window) => {
match self.windows.iter().position(|w| *w == i_window) {
None => Err(FullscreenWMError::UnknownWindow(i_window)),
Some(i) => {
// Set window to front
self.windows.remove(i);
self.windows.push_back(i_window);
self.focused_window = Some(i_window);
Ok(())
}
}
}
None => {
self.focused_window = None;
Ok(())
}
}
}
/// Try this yourself
fn cycle_focus(&mut self, dir: PrevOrNext) {
// You will probably notice here that a `Vec` is not the ideal data
// structure to implement this function. Feel free to replace the
// `Vec` with another data structure.
// Do nothing when there are no windows.
if self.windows.is_empty() {
return ();
}
// if self.get_focused_window() == None {
//
match dir {
PrevOrNext::Prev => {
let last_window = self.windows.pop_back().unwrap();
self.windows.push_front(last_window);
}
PrevOrNext::Next => {
let first_window = self.windows.pop_front().unwrap();
self.windows.push_back(first_window);
}
}
// When there is only one window,
// focus it if currently no window is focused, otherwise do nothing.
// When no window is focused, any window may become focused.
let window = self.windows.back().map(|w| *w);
match self.focus_window(window) {
Ok(_) => {}
Err(e) => println!("Error focus_window {}", e),
}
return ();
}
/// Try this yourself
// It should reflect the current state (location/size, floating or tiled,
// fullscreen or not) of the window.
fn get_window_info(&self, window: Window) -> Result<WindowWithInfo, Self::Error> {
let fullscreen_geometry = self.screen.to_geometry();
if self.is_managed(window) {
Ok(WindowWithInfo {
window: window,
geometry: fullscreen_geometry,
float_or_tile: FloatOrTile::Tile,
fullscreen: true,
})
} else | {
Err(FullscreenWMError::UnknownWindow(window))
} | conditional_block | |
a_fullscreen_wm.rs | of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]: ../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>,
}
/// The errors that this window manager can return.
///
/// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]: ../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn description(&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if !self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
// The last window is focused ...
focused_window: self.get_focused_window(),
// ... and should fill the screen. The other windows are
// simply hidden.
windows: vec![(*w, fullscreen_geometry)],
}
}
// Otherwise, return an empty WindowLayout
None => WindowLayout::new(),
}
}
// Before you continue any further, first have a look at the bottom of
// this file, where we show you how to write unit tests.
/// Try this yourself
///
/// Don't forget that when the argument is `None`, i.e. no window should
/// be focused, `get_focused_window()` must return `None` afterwards. The
/// `focused_window` field of the `WindowLayout` must also be `None`.
///
/// You will probably have to change the code above (method
/// implementations as well as the `FullscreenWM` struct) to achieve this.
fn focus_window(&mut self, window: Option<Window>) -> Result<(), Self::Error> | {
// self.focused_window = window;
match window {
Some(i_window) => {
match self.windows.iter().position(|w| *w == i_window) {
None => Err(FullscreenWMError::UnknownWindow(i_window)),
Some(i) => {
// Set window to front
self.windows.remove(i);
self.windows.push_back(i_window);
self.focused_window = Some(i_window);
Ok(())
}
}
}
None => {
self.focused_window = None;
Ok(())
}
} | identifier_body | |
a_fullscreen_wm.rs | this annotation when you have implemented all methods, so you get
// warned about variables that you did not use by mistake.
// We import std::error and std::format so we can say error::Error instead of
// std::error::Error, etc.
use std::error;
use std::fmt;
use std::collections::VecDeque;
// Import some types and the WindowManager trait from the cplwm_api crate
// (defined in the api folder).
use cplwm_api::types::{FloatOrTile, PrevOrNext, Screen, Window, WindowLayout, WindowWithInfo};
use cplwm_api::wm::WindowManager;
/// You are free to choose the name for your window manager. As we will use
/// automated tests when grading your assignment, indicate here the name of
/// your window manager data type so we can just use `WMName` instead of
/// having to manually figure out your window manager name.
pub type WMName = FullscreenWM;
/// The FullscreenWM struct
///
/// The first thing to do when writing a window manager, is to define a struct
/// (or enum) that will contain the state of the window manager, e.g. the
/// managed windows along with their geometries, the focused window, etc.
///
/// Depending on the layout and the functionality the window manager provides,
/// this can vary from simple `Vec`s to trees, hashmaps, etc. You can have a
/// look at the [collections](https://doc.rust-lang.org/std/collections/) Rust
/// provides.
///
/// Remember that you are free to add additional dependencies to your project,
/// e.g., for another type of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]: ../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>, | /// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]: ../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn description(&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if !self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
| }
/// The errors that this window manager can return.
/// | random_line_split |
a_fullscreen_wm.rs | annotation when you have implemented all methods, so you get
// warned about variables that you did not use by mistake.
// We import std::error and std::format so we can say error::Error instead of
// std::error::Error, etc.
use std::error;
use std::fmt;
use std::collections::VecDeque;
// Import some types and the WindowManager trait from the cplwm_api crate
// (defined in the api folder).
use cplwm_api::types::{FloatOrTile, PrevOrNext, Screen, Window, WindowLayout, WindowWithInfo};
use cplwm_api::wm::WindowManager;
/// You are free to choose the name for your window manager. As we will use
/// automated tests when grading your assignment, indicate here the name of
/// your window manager data type so we can just use `WMName` instead of
/// having to manually figure out your window manager name.
pub type WMName = FullscreenWM;
/// The FullscreenWM struct
///
/// The first thing to do when writing a window manager, is to define a struct
/// (or enum) that will contain the state of the window manager, e.g. the
/// managed windows along with their geometries, the focused window, etc.
///
/// Depending on the layout and the functionality the window manager provides,
/// this can vary from simple `Vec`s to trees, hashmaps, etc. You can have a
/// look at the [collections](https://doc.rust-lang.org/std/collections/) Rust
/// provides.
///
/// Remember that you are free to add additional dependencies to your project,
/// e.g., for another type of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]: ../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>,
}
/// The errors that this window manager can return.
///
/// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]: ../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn | (&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if !self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
| description | identifier_name |
lib.rs | , ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
}
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if !is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if !is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
}
}
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> |
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json | {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id ?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
} | identifier_body |
lib.rs | (ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if !is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
}
}
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id ?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
}
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json, the whole cosmogony is loaded
pub fn read_zones_from_file(
input: &str,
) -> Result<Box<dyn std::iter::Iterator<Item = Result<Zone, Error>>>, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
match format {
OutputFormat::JsonGz | OutputFormat::Json => {
let cosmo = load_cosmogony(f, format)?;
Ok(Box::new(cosmo.zones.into_iter().map(|z| Ok(z))))
}
OutputFormat::JsonStream => Ok(Box::new(read_zones(f))),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(f);
let r = std::io::BufReader::new(r);
Ok(Box::new(read_zones(r)))
}
}
}
/// Load a cosmogony from a reader and a file_format
pub fn | load_cosmogony | identifier_name | |
lib.rs | , ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => |
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if !is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if !is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
}
}
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id ?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
}
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json | {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
} | conditional_block |
lib.rs | , ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
}
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if !is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if !is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
} | }
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id ?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
}
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json, the | random_line_split | |
main.rs | ...#....#.....#..#..#..#...........
2: ...##..##...##....#..#..#..##..........
3: ..#.#...#..#.#....#..#..#...#..........
4: ...#.#..#...#.#...#..#..##..##.........
5: ....#...##...#.#..#..#...#...#.........
6: ....##.#.#....#...#..##..##..##........
7: ...#..###.#...##..#...#...#...#........
8: ...#....##.#.#.#..##..##..##..##.......
9: ...##..#..#####....#...#...#...#.......
10: ..#.#..#...#.##....##..##..##..##......
11: ...#...##...#.#...#.#...#...#...#......
12: ...##.#.#....#.#...#.#..##..##..##.....
13: ..#..###.#....#.#...#....#...#...#.....
14: ..#....##.#....#.#..##...##..##..##....
15: ..##..#..#.#....#....#..#.#...#...#....
16: .#.#..#...#.#...##...#...#.#..##..##...
17: ..#...##...#.#.#.#...##...#....#...#...
18: ..##.#.#....#####.#.#.#...##...##..##..
19: .#..###.#..#.#.#######.#.#.#..#.#...#..
20: .#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for .#.#., pot 9 matched .##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn | (state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx != 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' {
PotState::HasPlant
} else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
| convert_state_str_to_vec | identifier_name |
main.rs | s_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx != 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' {
PotState::HasPlant
} else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> PlantsState {
let mut new_state: PlantsState = orig_state.clone();
for _ in 0..n_generations {
new_state = get_new_state_after_one_generation(&mut new_state, &mut combinations_map);
}
new_state
}
fn get_pots_with_plant_sum(plants_state: &mut PlantsState) -> i64 {
let mut sum: i64 = 0;
for (idx, state_item) in plants_state.iter().enumerate() {
if *state_item {
sum += idx as i64 - OFFSET as i64;
}
}
sum
}
fn get_pots_with_plant_sum_using_pattern(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> i64 {
let mut sum: i64;
let mut new_state: PlantsState = orig_state.clone();
let mut last_idx: i64 = 100;
let mut diff_a = 0;
let mut diff_b = 0;
let mut diff_c;
// the number 100 is a random high-enough number found empirically
new_state =
get_new_state_after_n_generations(&mut new_state, &mut combinations_map, last_idx as usize);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
for _ in 0..100 {
diff_c = diff_b;
diff_b = diff_a;
let prev_sum = sum;
new_state = get_new_state_after_n_generations(&mut new_state, &mut combinations_map, 1);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
last_idx += 1;
diff_a = sum - prev_sum;
if diff_a != 0 && diff_a == diff_b && diff_b == diff_c {
break;
}
}
sum + diff_a * (n_generations as i64 - last_idx as i64)
}
fn main() {
let mut input_combinations = get_input_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut input_combinations);
let mut state_vector = convert_state_str_to_vec(INITIAL_STATE);
let mut final_state_20 =
get_new_state_after_n_generations(&mut state_vector, &mut combinations_map, 20);
let sum_20 = get_pots_with_plant_sum(&mut final_state_20);
let sum_5b =
get_pots_with_plant_sum_using_pattern(&mut state_vector, &mut combinations_map, 50_000_000_000);
println!("Results:");
println!("- (1) sum of pots with plant for 20: {}", sum_20);
println!("- (2) sum of pots with plant for 5b: {}", sum_5b);
}
#[cfg(test)]
mod tests {
use super::*;
fn get_result_for_combination_str(
combinations_map: &mut CombinationsMap,
combination_str: &str,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for ch in combination_str.chars() {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) =
combinations_map.get(&combination_id).unwrap()
{
let field = if ch == '#' {
combination_branch.has_plant
} else {
combination_branch.empty
};
if field.is_some() {
prev_id = field;
} else {
prev_id = None;
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_example_combinations() -> Vec<String> {
vec![
"...## => #",
"..#.. => #",
".#... => #",
".#.#. => #",
".#.## => #",
".##.. => #",
".#### => #",
"#.#.# => #",
"#.### => #",
"##.#. => #",
"##.## => #",
"###.. => #",
"###.# => #",
"####. => #",
"..... => .",
]
.iter()
.map(|x| x.to_string()) | .collect()
}
#[test]
fn test_convert_state_str_to_vec() { | random_line_split | |
main.rs | ...##...##..##..
19: .#..###.#..#.#.#######.#.#.#..#.#...#..
20: .#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for .#.#., pot 9 matched .##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn convert_state_str_to_vec(state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx != 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' {
PotState::HasPlant
} else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> PlantsState {
let mut new_state: PlantsState = orig_state.clone();
for _ in 0..n_generations {
new_state = get_new_state_after_one_generation(&mut new_state, &mut combinations_map);
}
new_state
}
fn get_pots_with_plant_sum(plants_state: &mut PlantsState) -> i64 {
let mut sum: i64 = 0;
for (idx, state_item) in plants_state.iter().enumerate() {
if *state_item {
sum += idx as i64 - OFFSET as i64;
}
}
sum
}
fn get_pots_with_plant_sum_using_pattern(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> i64 | {
let mut sum: i64;
let mut new_state: PlantsState = orig_state.clone();
let mut last_idx: i64 = 100;
let mut diff_a = 0;
let mut diff_b = 0;
let mut diff_c;
// the number 100 is a random high-enough number found empirically
new_state =
get_new_state_after_n_generations(&mut new_state, &mut combinations_map, last_idx as usize);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
for _ in 0..100 {
diff_c = diff_b;
diff_b = diff_a;
let prev_sum = sum; | identifier_body | |
main.rs | ...#....#.....#..#..#..#...........
2: ...##..##...##....#..#..#..##..........
3: ..#.#...#..#.#....#..#..#...#..........
4: ...#.#..#...#.#...#..#..##..##.........
5: ....#...##...#.#..#..#...#...#.........
6: ....##.#.#....#...#..##..##..##........
7: ...#..###.#...##..#...#...#...#........
8: ...#....##.#.#.#..##..##..##..##.......
9: ...##..#..#####....#...#...#...#.......
10: ..#.#..#...#.##....##..##..##..##......
11: ...#...##...#.#...#.#...#...#...#......
12: ...##.#.#....#.#...#.#..##..##..##.....
13: ..#..###.#....#.#...#....#...#...#.....
14: ..#....##.#....#.#..##...##..##..##....
15: ..##..#..#.#....#....#..#.#...#...#....
16: .#.#..#...#.#...##...#...#.#..##..##...
17: ..#...##...#.#.#.#...##...#....#...#...
18: ..##.#.#....#####.#.#.#...##...##..##..
19: .#..###.#..#.#.#######.#.#.#..#.#...#..
20: .#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for .#.#., pot 9 matched .##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn convert_state_str_to_vec(state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx != 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' | else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
| {
PotState::HasPlant
} | conditional_block |
fetch.rs | }
.instrument(tracing::debug_span!("fetch"))
.await
}
pub async fn via_git(url: &url::Url, rev: &str) -> Result<crate::git::GitSource, Error> {
// Create a temporary directory to fetch the repo into
let temp_dir = tempfile::tempdir()?;
// Create another temporary directory where we *may* checkout submodules into
let submodule_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let fetch_url = url.as_str().to_owned();
let fetch_rev = rev.to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &fetch_url, &mut |mut opts| {
opts.download_tags(git2::AutotagOption::All);
repo.remote_anonymous(&fetch_url)?
.fetch(
&[
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
// Ensure that the repo actually contains the revision we need | Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
let fetch_rev = rev.to_owned();
let temp_db_path = temp_dir.path().to_owned();
let checkout = tokio::task::spawn(async move {
match crate::git::prepare_submodules(
temp_db_path,
submodule_dir.path().to_owned(),
fetch_rev.clone(),
)
.instrument(tracing::debug_span!("submodule checkout"))
.await
{
Ok(_) => {
util::pack_tar(submodule_dir.path())
.instrument(tracing::debug_span!("tarballing checkout", rev = %fetch_rev))
.await
}
Err(e) => Err(e),
}
});
let (db, checkout) = tokio::join!(
async {
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarballing db", %url, %rev))
.await
},
checkout,
);
Ok(crate::git::GitSource {
db: db?,
checkout: checkout?.ok(),
})
}
pub async fn registry(
url: &url::Url,
krates: impl Iterator<Item = String> + Send + 'static,
) -> Result<Bytes, Error> {
// We don't bother to suport older versions of cargo that don't support
// bare checkouts of registry indexes, as that has been in since early 2017
// See https://github.com/rust-lang/cargo/blob/0e38712d4d7b346747bf91fb26cce8df6934e178/src/cargo/sources/registry/remote.rs#L61
// for details on why cargo still does what it does
let temp_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
//init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let url = url.as_str().to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &url, &mut |mut opts| {
repo.remote_anonymous(&url)?
.fetch(
&[
"refs/heads/master:refs/remotes/origin/master",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
let write_cache = tracing::span!(tracing::Level::DEBUG, "write-cache-entries",);
write_cache.in_scope(|| {
if let Err(e) = write_cache_entries(repo, krates) {
error!("Failed to write all .cache entries: {:#}", e);
}
});
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
// We also write a `.last-updated` file just like cargo so that cargo knows
// the timestamp of the fetch
std::fs::File::create(temp_dir.path().join(".last-updated"))
.context("failed to create .last-updated")?;
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarball"))
.await
}
/// Writes .cache entries in the registry's directory for all of the specified
/// crates. Cargo will write these entries itself if they don't exist the first
/// time it tries to access the crate's metadata, but this noticeably increases
/// initial fetch times. (see src/cargo/sources/registry/index.rs)
fn write_cache_entries(
repo: git2::Repository,
krates: impl Iterator<Item = String>,
) -> Result<(), Error> {
// the path to the repository itself for bare repositories.
let cache = if repo.is_bare() {
repo.path().join(".cache")
} else {
repo.path().parent().unwrap().join(".cache")
};
std::fs::create_dir_all(&cache)?;
// Every .cache entry encodes the sha1 it was created at in the beginning
// so that cargo knows when an entry is out of date with the current HEAD
let head_commit = {
let branch = repo
.find_branch("origin/master", git2::BranchType::Remote)
.context("failed to find 'master' branch")?;
branch
.get()
.target()
.context("unable to find commit for 'master' branch")?
};
let head_commit_str = head_commit.to_string();
let tree = repo
.find_commit(head_commit)
.context("failed to find HEAD commit")?
.tree()
.context("failed to get commit tree")?;
// These can get rather large, so be generous
let mut buffer = Vec::with_capacity(32 * 1024);
for krate in krates {
// cargo always normalizes paths to lowercase
let lkrate = krate.to_lowercase();
let mut rel_path = crate::cargo::get_crate_prefix(&lkrate);
rel_path.push('/');
rel_path.push_str(&lkrate);
let path = &Path::new(&rel_path);
buffer.clear();
{
let write_cache = tracing::span!(tracing::Level::DEBUG, "summary", %krate);
let _s = write_cache.enter();
match write_summary(path, &repo, &tree, head_commit_str.as_bytes(), &mut buffer) {
Ok(num_versions) => tracing::debug!("wrote entries for {} versions", num_versions),
Err(e) => {
warn!("unable to create cache entry for crate: {:#}", e);
continue;
}
}
}
let cache_path = cache.join(rel_path);
if let Err(e) = std::fs::create_dir_all(cache_path.parent().unwrap()) {
warn!(
"failed to create parent .cache directories for crate '{}': {:#}",
krate, e
);
continue;
}
if let Err(e) = std::fs::write(&cache_path, &buffer) {
warn!(
"failed to write .cache entry for crate '{}': {:#}",
krate, e
);
}
}
Ok(())
}
fn write_summary<'blob>(
path: &Path,
repo: &'blob git2::Repository,
tree: &git2::Tree<'blob>,
version: &[u8],
buffer: &mut Vec<u8>,
) -> Result<usize, Error> {
let entry = tree
.get_path(path)
.context("failed to get entry for path")?;
let object = entry
.to_object(repo)
.context("failed to get object for entry")?;
let blob = object.as_blob().context("object is not a blob")?;
// Writes the binary summary for the crate to a buffer, see
// src/cargo/sources/registry/index.rs for details
const CURRENT_CACHE_VERSION: u8 = 1;
buffer.push(CURRENT_CACHE_VERSION);
buffer.extend_from_slice(version);
buffer.push(0);
let mut version_count = 0;
for (version, data) in iter_index_entries(blob.content()) {
buffer.extend_from_slice(version);
buffer.push(0);
buffer.extend_from_slice(data);
buffer.push(0 | repo.revparse_single(&fetch_rev)
.with_context(|| format!("{} doesn't contain rev '{}'", fetch_url, fetch_rev))?;
| random_line_split |
fetch.rs | .instrument(tracing::debug_span!("fetch"))
.await
}
pub async fn via_git(url: &url::Url, rev: &str) -> Result<crate::git::GitSource, Error> {
// Create a temporary directory to fetch the repo into
let temp_dir = tempfile::tempdir()?;
// Create another temporary directory where we *may* checkout submodules into
let submodule_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let fetch_url = url.as_str().to_owned();
let fetch_rev = rev.to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &fetch_url, &mut |mut opts| {
opts.download_tags(git2::AutotagOption::All);
repo.remote_anonymous(&fetch_url)?
.fetch(
&[
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
// Ensure that the repo actually contains the revision we need
repo.revparse_single(&fetch_rev)
.with_context(|| format!("{} doesn't contain rev '{}'", fetch_url, fetch_rev))?;
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
let fetch_rev = rev.to_owned();
let temp_db_path = temp_dir.path().to_owned();
let checkout = tokio::task::spawn(async move {
match crate::git::prepare_submodules(
temp_db_path,
submodule_dir.path().to_owned(),
fetch_rev.clone(),
)
.instrument(tracing::debug_span!("submodule checkout"))
.await
{
Ok(_) => {
util::pack_tar(submodule_dir.path())
.instrument(tracing::debug_span!("tarballing checkout", rev = %fetch_rev))
.await
}
Err(e) => Err(e),
}
});
let (db, checkout) = tokio::join!(
async {
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarballing db", %url, %rev))
.await
},
checkout,
);
Ok(crate::git::GitSource {
db: db?,
checkout: checkout?.ok(),
})
}
pub async fn registry(
url: &url::Url,
krates: impl Iterator<Item = String> + Send + 'static,
) -> Result<Bytes, Error> |
crate::git::with_fetch_options(&git_config, &url, &mut |mut opts| {
repo.remote_anonymous(&url)?
.fetch(
&[
"refs/heads/master:refs/remotes/origin/master",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
let write_cache = tracing::span!(tracing::Level::DEBUG, "write-cache-entries",);
write_cache.in_scope(|| {
if let Err(e) = write_cache_entries(repo, krates) {
error!("Failed to write all .cache entries: {:#}", e);
}
});
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
// We also write a `.last-updated` file just like cargo so that cargo knows
// the timestamp of the fetch
std::fs::File::create(temp_dir.path().join(".last-updated"))
.context("failed to create .last-updated")?;
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarball"))
.await
}
/// Writes .cache entries in the registry's directory for all of the specified
/// crates. Cargo will write these entries itself if they don't exist the first
/// time it tries to access the crate's metadata, but this noticeably increases
/// initial fetch times. (see src/cargo/sources/registry/index.rs)
fn write_cache_entries(
repo: git2::Repository,
krates: impl Iterator<Item = String>,
) -> Result<(), Error> {
// the path to the repository itself for bare repositories.
let cache = if repo.is_bare() {
repo.path().join(".cache")
} else {
repo.path().parent().unwrap().join(".cache")
};
std::fs::create_dir_all(&cache)?;
// Every .cache entry encodes the sha1 it was created at in the beginning
// so that cargo knows when an entry is out of date with the current HEAD
let head_commit = {
let branch = repo
.find_branch("origin/master", git2::BranchType::Remote)
.context("failed to find 'master' branch")?;
branch
.get()
.target()
.context("unable to find commit for 'master' branch")?
};
let head_commit_str = head_commit.to_string();
let tree = repo
.find_commit(head_commit)
.context("failed to find HEAD commit")?
.tree()
.context("failed to get commit tree")?;
// These can get rather large, so be generous
let mut buffer = Vec::with_capacity(32 * 1024);
for krate in krates {
// cargo always normalizes paths to lowercase
let lkrate = krate.to_lowercase();
let mut rel_path = crate::cargo::get_crate_prefix(&lkrate);
rel_path.push('/');
rel_path.push_str(&lkrate);
let path = &Path::new(&rel_path);
buffer.clear();
{
let write_cache = tracing::span!(tracing::Level::DEBUG, "summary", %krate);
let _s = write_cache.enter();
match write_summary(path, &repo, &tree, head_commit_str.as_bytes(), &mut buffer) {
Ok(num_versions) => tracing::debug!("wrote entries for {} versions", num_versions),
Err(e) => {
warn!("unable to create cache entry for crate: {:#}", e);
continue;
}
}
}
let cache_path = cache.join(rel_path);
if let Err(e) = std::fs::create_dir_all(cache_path.parent().unwrap()) {
warn!(
"failed to create parent .cache directories for crate '{}': {:#}",
krate, e
);
continue;
}
if let Err(e) = std::fs::write(&cache_path, &buffer) {
warn!(
"failed to write .cache entry for crate '{}': {:#}",
krate, e
);
}
}
Ok(())
}
fn write_summary<'blob>(
path: &Path,
repo: &'blob git2::Repository,
tree: &git2::Tree<'blob>,
version: &[u8],
buffer: &mut Vec<u8>,
) -> Result<usize, Error> {
let entry = tree
.get_path(path)
.context("failed to get entry for path")?;
let object = entry
.to_object(repo)
.context("failed to get object for entry")?;
let blob = object.as_blob().context("object is not a blob")?;
// Writes the binary summary for the crate to a buffer, see
// src/cargo/sources/registry/index.rs for details
const CURRENT_CACHE_VERSION: u8 = 1;
buffer.push(CURRENT_CACHE_VERSION);
buffer.extend_from_slice(version);
buffer.push(0);
let mut version_count = 0;
for (version, data) in iter_index_entries(blob.content()) {
buffer.extend_from_slice(version);
buffer.push(0);
buffer.extend_from_slice(data);
buffer | {
// We don't bother to suport older versions of cargo that don't support
// bare checkouts of registry indexes, as that has been in since early 2017
// See https://github.com/rust-lang/cargo/blob/0e38712d4d7b346747bf91fb26cce8df6934e178/src/cargo/sources/registry/remote.rs#L61
// for details on why cargo still does what it does
let temp_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
//init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let url = url.as_str().to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?; | identifier_body |
fetch.rs | (client: &Client, krate: &Krate) -> Result<KrateSource, Error> {
async {
match &krate.source {
Source::Git { url, rev, .. } => via_git(&url.clone(), rev).await.map(KrateSource::Git),
Source::Registry { registry, chksum } => {
let url = registry.download_url(krate);
let response = client.get(&url).send().await?.error_for_status()?;
let res = util::convert_response(response).await?;
let content = res.into_body();
util::validate_checksum(&content, &chksum)?;
Ok(KrateSource::Registry(content))
}
}
}
.instrument(tracing::debug_span!("fetch"))
.await
}
pub async fn via_git(url: &url::Url, rev: &str) -> Result<crate::git::GitSource, Error> {
// Create a temporary directory to fetch the repo into
let temp_dir = tempfile::tempdir()?;
// Create another temporary directory where we *may* checkout submodules into
let submodule_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let fetch_url = url.as_str().to_owned();
let fetch_rev = rev.to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &fetch_url, &mut |mut opts| {
opts.download_tags(git2::AutotagOption::All);
repo.remote_anonymous(&fetch_url)?
.fetch(
&[
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
// Ensure that the repo actually contains the revision we need
repo.revparse_single(&fetch_rev)
.with_context(|| format!("{} doesn't contain rev '{}'", fetch_url, fetch_rev))?;
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
let fetch_rev = rev.to_owned();
let temp_db_path = temp_dir.path().to_owned();
let checkout = tokio::task::spawn(async move {
match crate::git::prepare_submodules(
temp_db_path,
submodule_dir.path().to_owned(),
fetch_rev.clone(),
)
.instrument(tracing::debug_span!("submodule checkout"))
.await
{
Ok(_) => {
util::pack_tar(submodule_dir.path())
.instrument(tracing::debug_span!("tarballing checkout", rev = %fetch_rev))
.await
}
Err(e) => Err(e),
}
});
let (db, checkout) = tokio::join!(
async {
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarballing db", %url, %rev))
.await
},
checkout,
);
Ok(crate::git::GitSource {
db: db?,
checkout: checkout?.ok(),
})
}
pub async fn registry(
url: &url::Url,
krates: impl Iterator<Item = String> + Send + 'static,
) -> Result<Bytes, Error> {
// We don't bother to suport older versions of cargo that don't support
// bare checkouts of registry indexes, as that has been in since early 2017
// See https://github.com/rust-lang/cargo/blob/0e38712d4d7b346747bf91fb26cce8df6934e178/src/cargo/sources/registry/remote.rs#L61
// for details on why cargo still does what it does
let temp_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
//init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let url = url.as_str().to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &url, &mut |mut opts| {
repo.remote_anonymous(&url)?
.fetch(
&[
"refs/heads/master:refs/remotes/origin/master",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
let write_cache = tracing::span!(tracing::Level::DEBUG, "write-cache-entries",);
write_cache.in_scope(|| {
if let Err(e) = write_cache_entries(repo, krates) {
error!("Failed to write all .cache entries: {:#}", e);
}
});
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
// We also write a `.last-updated` file just like cargo so that cargo knows
// the timestamp of the fetch
std::fs::File::create(temp_dir.path().join(".last-updated"))
.context("failed to create .last-updated")?;
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarball"))
.await
}
/// Writes .cache entries in the registry's directory for all of the specified
/// crates. Cargo will write these entries itself if they don't exist the first
/// time it tries to access the crate's metadata, but this noticeably increases
/// initial fetch times. (see src/cargo/sources/registry/index.rs)
fn write_cache_entries(
repo: git2::Repository,
krates: impl Iterator<Item = String>,
) -> Result<(), Error> {
// the path to the repository itself for bare repositories.
let cache = if repo.is_bare() {
repo.path().join(".cache")
} else {
repo.path().parent().unwrap().join(".cache")
};
std::fs::create_dir_all(&cache)?;
// Every .cache entry encodes the sha1 it was created at in the beginning
// so that cargo knows when an entry is out of date with the current HEAD
let head_commit = {
let branch = repo
.find_branch("origin/master", git2::BranchType::Remote)
.context("failed to find 'master' branch")?;
branch
.get()
.target()
.context("unable to find commit for 'master' branch")?
};
let head_commit_str = head_commit.to_string();
let tree = repo
.find_commit(head_commit)
.context("failed to find HEAD commit")?
.tree()
.context("failed to get commit tree")?;
// These can get rather large, so be generous
let mut buffer = Vec::with_capacity(32 * 1024);
for krate in krates {
// cargo always normalizes paths to lowercase
let lkrate = krate.to_lowercase();
let mut rel_path = crate::cargo::get_crate_prefix(&lkrate);
rel_path.push('/');
rel_path.push_str(&lkrate);
let path = &Path::new(&rel_path);
buffer.clear();
{
let write_cache = tracing::span!(tracing::Level::DEBUG, "summary", %krate);
let _s = write_cache.enter();
match write_summary(path, &repo, &tree, head_commit_str.as_bytes(), &mut buffer) {
Ok(num_versions) => tracing::debug!("wrote entries for {} versions", num_versions),
Err(e) => {
warn!("unable to create cache entry for crate: {:#}", e);
continue;
}
}
}
let cache_path = cache.join(rel_path);
if let Err(e) = std::fs::create_dir_all(cache_path.parent().unwrap()) {
warn!(
"failed to create parent .cache directories for crate '{}': {:#}",
krate, e
);
continue;
}
if let Err(e) = std::fs::write(&cache_path, &buffer) {
warn!(
"failed to write .cache entry for crate '{}': {:#}",
krate, e
);
}
}
Ok(())
}
fn write_summary<'blob>(
path: &Path,
repo: &'blob git2::Repository,
tree: &git2::Tree<'blob>,
version: &[u8],
buffer: &mut Vec<u8>,
) -> Result<usize, Error> {
let entry = tree
.get_path(path)
.context("failed to get entry for path")?;
let object = | from_registry | identifier_name | |
repooler.py | 1
laneTypeExpr = 0
counter = 0
for sample in lane_maps[ind]:
if not sample == 'Undetermined':
laneTypeExpr += clusters_expr[sample]
for sample in lane_maps[ind]:
act = clusters_expr[sample]/float(laneTypeExpr)
ideal_ratios[ind][counter] = ideal_ratios[ind][counter]*(exp/act)
counter += 1
#Normalizes numbers
for index in xrange(1, len(ideal_ratios.keys())+1):
curSum = sum(ideal_ratios[index])
for sample in xrange(0, len(ideal_ratios[index])):
if curSum == 0:
ideal_ratios[index][sample] = 0
else:
ideal_ratios[index][sample] = (ideal_ratios[index][sample]/curSum)*100
# Iteratively rounds to whole percent (min pipette for volume) to reach 100%
# ideal_ratio * req_lanes.values() = needed
# acc_ratio * total_lanes = current
# means a sample can take any whole number between the two
acc_ratios = copy.deepcopy(ideal_ratios)
for index in xrange(1, len(ideal_ratios.keys())+1):
for sample in xrange(0, len(ideal_ratios[index])):
acc_ratios[index][sample] = math.ceil(ideal_ratios[index][sample])
if sum(acc_ratios[index]) == 100:
break
else:
while sum(acc_ratios[index]) > 100:
stuck = True
for sample in xrange(1, len(ideal_ratios[index])):
need = ideal_ratios[index][sample]*req_lanes.values()[index-1]
cur = (acc_ratios[index][sample] - 1)*total_lanes[index-1]
if sum(acc_ratios[index]) > 100 and cur >= need:
acc_ratios[index][sample] -= 1
stuck = False
if sum(acc_ratios[index])== 100:
break
if(stuck):
total_lanes[index-1] += 1
return acc_ratios
def generate_output(project, destid, total_lanes, req_lanes, lane_maps, acc_ratios):
#Gathers the container id and well name for all samples in project
#Cred to Denis for providing a base epp
location = dict()
lims = Lims(BASEURI, USERNAME, PASSWORD)
allProjects = lims.get_projects()
for proj in allProjects:
if proj.id == project:
projName = proj.name
break
#All normalization processes for project
norms=['Library Normalization (MiSeq) 4.0', 'Library Normalization (Illumina SBS) 4.0','Library Normalization (HiSeq X) 1.0']
pros=lims.get_processes(type=norms, projectname=projName)
#For all processes
for p in pros:
#For all artifacts in process
for o in p.all_outputs():
#If artifact is analyte type and has project name in sample
if o.type=="Analyte" and project in o.name:
location[o.name.split()[0]] = list()
location[o.name.split()[0]].append(o.location[0].id)
location[o.name.split()[0]].append(o.location[1])
#PRINT section
#Print stats including duplicates
timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H:%M')
sumName = projName, "_summary_", timestamp,".txt"
sumName = ''.join(sumName)
with open(sumName, "w") as summary:
if sum(req_lanes.values()) != 0:
OPT = sum(total_lanes)/sum(req_lanes.values())
else:
OPT = 0
output = "Ideal lanes (same schema): ", str(sum(req_lanes.values())) , ", Total lanes: ", str(sum(total_lanes)), ", OPT: ", str(round(OPT,3)),'\n'
output = ''.join(output)
summary.write( output )
output = "Unique pools: ", str(len(total_lanes)), ", Average pool duplication: ", str(sum(total_lanes)/float(len(total_lanes))) ,'\n'
output = ''.join(output)
summary.write( output )
bin = 0
for index in xrange(1, len(lane_maps)+1):
bin += 1
summary.write('\n')
output = "Wells ", str(bin) , '-' , str(bin+int(total_lanes[index-1])-1),':','\n'
output = ''.join(output)
summary.write( output )
bin += int(total_lanes[index-1]-1)
for counter in xrange(1, len(lane_maps[index])):
output = str(lane_maps[index][counter]),' ', str(acc_ratios[index][counter]), "%",'\n'
output = ''.join(output)
summary.write( output )
#Creates csv
name = projName,"_repool_",timestamp,".csv"
name = ''.join(name)
wells = ['Empty','A','B','C','D','E','F','G','H']
#Index 0 is number, index 1 is Letter
wellIndex = [1, 1]
destNo = 0
with open(name, 'w') as csvfile:
writer = csv.writer(csvfile)
for index in xrange(1, len(lane_maps)+1):
for dupes in xrange(1, int(total_lanes[index-1])+1):
if lane_maps[index] == 0:
raise Exception('Error: Project not logged in x_flowcells database!')
for counter in xrange(1, len(lane_maps[index])):
#<source plate ID>,<source well>,<volume>,<destination plate ID>,<destination well>
#Destination well 200 microL, minimum pipette 2 microL; acc_ratios multiplied by 2.
sample = lane_maps[index][counter]
position = wells[wellIndex[1]],':',str(wellIndex[0])
position = ''.join(position)
try:
output = location[sample][0],location[sample][1],str(int(acc_ratios[index][counter]*2)),str(destid[destNo]),position
except KeyError:
print "Error: Samples incorrectly parsed into database, thus causing sample name conflicts!"
if not acc_ratios[index][counter] == 0:
writer.writerow(output)
#Increment wellsindex
if not acc_ratios[index][counter] == 0:
if not wellIndex[1] >= 8:
wellIndex[1] += 1
else:
wellIndex[1] = 1
if not wellIndex[0] >= 12:
wellIndex[0] += 1
else:
wellIndex[0] = 1
destNo += 1
try:
destid[destNo]
except IndexError:
print "Critical error; not enough destination plates provided"
@click.command()
@click.option('--project_id', required=True,help='REQUIRED: ID of project to repool. Examples:P2652, P1312 etc.')
@click.option('--dest_plate_list', default=['dp_1','dp_2','dp_3','dp_4','dp_5'],
help='List of destination plates for the robot\'s csv file. Include too many rather than too few; excess will be unused Default:[dp_1,dp_2,dp_3,dp_4,dp_5]')
@click.option('--target_clusters', default=320*1000000, help='Threshold of clusters per sample. \nDefault:320*1000000')
@click.option('--clusters_per_lane', default=380*1000000, help='Expected clusters generated by a single lane/well. \nDefault:380*1000000')
@click.option('--allow_non_dupl_struct', is_flag=True, help='Allow for samples to be present in different types of flowcells')
def main(target_clusters, clusters_per_lane, project_id, dest_plate_list, allow_non_dupl_struct):
"""Application that calculates samples under threshold for a project, then calculate the optimal composition for reaching the threshold
without altering concentrations nor the structure of the pools. Outputs both a summary as well as a functional csv file."""
couch = connection()
structure = proj_struct(couch, project_id, target_clusters)
[lane_maps, clusters_rem, clusters_expr] = parse_indata(structure, target_clusters)
if allow_non_dupl_struct:
aggregator(lane_maps,clusters_rem,clusters_per_lane)
else:
simple_unique_set(lane_maps)
[ideal_ratios, req_lanes, total_lanes] = sample_distributor(lane_maps, clusters_rem, clusters_per_lane)
acc_ratios = correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes)
generate_output(project_id, dest_plate_list, total_lanes, req_lanes, lane_maps, acc_ratios)
if __name__ == '__main__':
| main() | conditional_block | |
repooler.py | ():
user = ''
pw = ''
couch = couchdb.Server('http://' + user + ':' + pw + '@tools.scilifelab.se:5984')
return couch
#Fetches the structure of a project
def proj_struct(couch, project, target_clusters):
db = couch['x_flowcells']
view = db.view('names/project_ids_list')
fc_track = defaultdict(set)
#Adds flowcells to ALL projects. Due to intractions its easier to just get FCs for ALL projects
for rec in view.rows:
fc = ''.join(rec.key)
fc = unicodedata.normalize('NFKD', fc).encode('ascii','ignore')
id = ''.join(rec.id)
id = unicodedata.normalize('NFKD', id).encode('ascii','ignore')
for projs in rec.value:
projs = ''.join(projs)
projs = unicodedata.normalize('NFKD', projs).encode('ascii','ignore')
if fc_track[projs] == set([]):
fc_track[projs] = dict()
fc_track[projs][fc] = id
#Adds lanes and samples to flowcells, includes samples from other projects if they share lane
if fc_track[project] == set([]):
raise Exception('Error: Project not logged in x_flowcells database!')
for fc, id in fc_track[project].items():
try:
entry = db[id]['illumina']
except KeyError:
print "Error: Illumina table for db entry" , id, "doesn't exist!"
entry = db[id]['illumina']['Demultiplex_Stats']['Barcode_lane_statistics']
for index in xrange(0, len(entry)):
lane = entry[index]['Lane']
sample = entry[index]['Sample']
if 'Clusters' in entry[index]:
clusters = entry[index]['Clusters']
else:
clusters = entry[index]['PF Clusters']
clusters = int(re.sub(r",", "", clusters))
if not isinstance(fc_track[project][fc], dict):
fc_track[project][fc] = dict()
if not lane in fc_track[project][fc]:
fc_track[project][fc][lane] = dict()
#Only counts samples for the given project, other samples are "auto-filled"
if project in sample:
fc_track[project][fc][lane][sample] = clusters
else:
fc_track[project][fc][lane][sample] = target_clusters
#Removes any lanes that don't have any part project samples
for fc, lanes in fc_track[project].items():
for lane,sample in lanes.items():
if not any(project in s for s in sample.keys()):
del fc_track[project][fc][lane]
return fc_track[project]
def parse_indata(struct, target_clusters):
clusters_rem = dict()
clusters_expr = dict()
lane_maps = dict()
counter = 1
#Takes in data and finds unique lane structure, clusters per sample and lane division
#Output could probably be sent as a nested hash.
#Clusters_rem AND clusters_expr may seem redundant, but it saves some calculatin
for fc, lanes in struct.items():
for lane, samples in lanes.items():
#Concatinate structure into a set of unique structures
mapping = sorted(samples.keys(), reverse=True)
if not mapping in lane_maps.values():
lane_maps[counter] = mapping
counter +=1
#Calculate clusters read per sample
for sample, value in samples.items():
if not sample in clusters_rem:
clusters_rem[sample] = target_clusters
clusters_expr[sample] = 0
clusters_rem[sample] -= value
clusters_expr[sample] += value
return [lane_maps, clusters_rem, clusters_expr]
#Creates a set where every sample uniquely appears once and only once
def simple_unique_set(lane_maps):
unique_lane_maps = dict()
for keyz, valz in lane_maps.items():
#Fetch what lanes inclusion of given lane excludes
excluded = list()
for sample in valz:
for k, v in lane_maps.items():
for dupe in v:
if dupe == sample and keyz != k and sample != 'Undetermined':
excluded.append(k)
break
#Check that none of the excluded lanes have uniquely present samples
acceptable = True
for key in excluded:
total_duplicates = list()
for values in lane_maps[key]:
duplicate = 0
for k, v in lane_maps.items():
for dupe in v:
if dupe == values and k != key and sample != 'Undetermined':
duplicate +=1
break
total_duplicates.append(duplicate)
if 0 in total_duplicates:
acceptable = False
break
if acceptable:
#Check that the lane doesn't have sample dupes in the accepted set already
for entries in valz:
for kuyz, vulz in unique_lane_maps.items():
for things in vulz:
if things == entries and entries != 'Undetermined':
acceptable = False
break
if acceptable:
unique_lane_maps[keyz] = valz
lane_maps = unique_lane_maps
#ALL SAMPLES PRESENT CHECK
# summap = []
# for k in lane_maps.keys():
# summap += lane_maps[k]
# print len(set(summap))
validate_template_struct(lane_maps)
def aggregator(lane_maps,clusters_rem,clusters_per_lane):
#Iterate
#Find all samples that are also expressed in another struct
#Sort those structs by duplication
#Fill them to floor(dups); unless mod % 1 > some_number; then ceil(dups)
#Note the remaining necessary
#End
#Use the remaining structs
#Ceil(dups) those babies
raise Exception('Error: Not yet implemented!')
#Gives how many percent of the lane should be allocated to a specific sample
def sample_distributor(lane_maps, clusters_rem, clusters_per_lane):
ideal_ratios = dict()
req_lanes = dict()
for index in lane_maps:
summ = 0
for entry in lane_maps[index]:
if clusters_rem[entry] > 0:
summ += clusters_rem[entry]
for entry in lane_maps[index]:
if not index in ideal_ratios:
ideal_ratios[index] = list()
if clusters_rem[entry] > 0:
ideal_ratios[index].append(clusters_rem[entry]/float(summ))
else:
ideal_ratios[index].append(0.0)
#Minimal number of required lanes per pool
req_lanes[index] = summ/float(clusters_per_lane)
#Have to be rounded up, rounding down when only using duplicates makes no sense
total_lanes = map(math.ceil, req_lanes.values())
return [ideal_ratios, req_lanes, total_lanes]
#Crude way to check that no samples are in different TYPES of lanes
def validate_template_struct(lane_maps):
tempList = list()
for k, v in lane_maps.items():
for index in xrange(1,len(v)):
if not v[index] == 'Undetermined':
tempList.append(v[index])
counter = Counter(tempList)
for values in counter.itervalues():
if values > 1:
raise Exception('Error: This app does NOT handle situations where a sample'
'is present in lanes/well with differing structure!')
#Corrects volumes since conc is non-constant
#Also normalizes the numbers
#Finally translates float -> int without underexpressing anything
def correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes):
# Since some samples are strong and some weaksauce
# 10% in ideal_ratios does not mean 10% of lane volume
# As such, ideal_ratios need to be divided by actual_reads/expected_reads
# Ignores undetermined clusters in calculation
# Assumes sample conc cant be altered; aka only volume is modified
for ind in xrange(1, len(lane_maps.keys())+1):
#Bases w/o sample are not expected
if len(lane_maps[ind]) != 1:
exp = 1/float(len(lane_maps[ind])-1)
else:
exp = 1
laneTypeExpr = 0
counter = 0
for sample in lane_maps[ind]:
if not sample == 'Undetermined':
laneTypeExpr += clusters_expr[sample]
for sample in lane_maps[ind]:
act = clusters_expr[sample]/float(laneTypeExpr)
ideal_ratios[ind][counter] = ideal_ratios[ind][counter]*(exp/act)
counter += 1
#Normalizes numbers
for index in xrange(1, len(ideal_ratios.keys())+1):
curSum = sum(ideal_ratios[index])
for sample in xrange(0, len(ideal_ratios[index])):
if curSum == 0:
ideal_ratios[index][sample] = 0
else:
ideal_ratios[index][sample] = (ideal_ratios[index][sample]/curSum)*100
# Iteratively rounds to whole percent (min pipette for volume) to reach | connection | identifier_name | |
repooler.py | ] = dict()
if not lane in fc_track[project][fc]:
fc_track[project][fc][lane] = dict()
#Only counts samples for the given project, other samples are "auto-filled"
if project in sample:
fc_track[project][fc][lane][sample] = clusters
else:
fc_track[project][fc][lane][sample] = target_clusters
#Removes any lanes that don't have any part project samples
for fc, lanes in fc_track[project].items():
for lane,sample in lanes.items():
if not any(project in s for s in sample.keys()):
del fc_track[project][fc][lane]
return fc_track[project]
def parse_indata(struct, target_clusters):
clusters_rem = dict()
clusters_expr = dict()
lane_maps = dict()
counter = 1
#Takes in data and finds unique lane structure, clusters per sample and lane division
#Output could probably be sent as a nested hash.
#Clusters_rem AND clusters_expr may seem redundant, but it saves some calculatin
for fc, lanes in struct.items():
for lane, samples in lanes.items():
#Concatinate structure into a set of unique structures
mapping = sorted(samples.keys(), reverse=True)
if not mapping in lane_maps.values():
lane_maps[counter] = mapping
counter +=1
#Calculate clusters read per sample
for sample, value in samples.items():
if not sample in clusters_rem:
clusters_rem[sample] = target_clusters
clusters_expr[sample] = 0
clusters_rem[sample] -= value
clusters_expr[sample] += value
return [lane_maps, clusters_rem, clusters_expr]
#Creates a set where every sample uniquely appears once and only once
def simple_unique_set(lane_maps):
unique_lane_maps = dict()
for keyz, valz in lane_maps.items():
#Fetch what lanes inclusion of given lane excludes
excluded = list()
for sample in valz:
for k, v in lane_maps.items():
for dupe in v:
if dupe == sample and keyz != k and sample != 'Undetermined':
excluded.append(k)
break
#Check that none of the excluded lanes have uniquely present samples
acceptable = True
for key in excluded:
total_duplicates = list()
for values in lane_maps[key]:
duplicate = 0
for k, v in lane_maps.items():
for dupe in v:
if dupe == values and k != key and sample != 'Undetermined':
duplicate +=1
break
total_duplicates.append(duplicate)
if 0 in total_duplicates:
acceptable = False
break
if acceptable:
#Check that the lane doesn't have sample dupes in the accepted set already
for entries in valz:
for kuyz, vulz in unique_lane_maps.items():
for things in vulz:
if things == entries and entries != 'Undetermined':
acceptable = False
break
if acceptable:
unique_lane_maps[keyz] = valz
lane_maps = unique_lane_maps
#ALL SAMPLES PRESENT CHECK
# summap = []
# for k in lane_maps.keys():
# summap += lane_maps[k]
# print len(set(summap))
validate_template_struct(lane_maps)
def aggregator(lane_maps,clusters_rem,clusters_per_lane):
#Iterate
#Find all samples that are also expressed in another struct
#Sort those structs by duplication
#Fill them to floor(dups); unless mod % 1 > some_number; then ceil(dups)
#Note the remaining necessary
#End
#Use the remaining structs
#Ceil(dups) those babies
raise Exception('Error: Not yet implemented!')
#Gives how many percent of the lane should be allocated to a specific sample
def sample_distributor(lane_maps, clusters_rem, clusters_per_lane):
ideal_ratios = dict()
req_lanes = dict()
for index in lane_maps:
summ = 0
for entry in lane_maps[index]:
if clusters_rem[entry] > 0:
summ += clusters_rem[entry]
for entry in lane_maps[index]:
if not index in ideal_ratios:
ideal_ratios[index] = list()
if clusters_rem[entry] > 0:
ideal_ratios[index].append(clusters_rem[entry]/float(summ))
else:
ideal_ratios[index].append(0.0)
#Minimal number of required lanes per pool
req_lanes[index] = summ/float(clusters_per_lane)
#Have to be rounded up, rounding down when only using duplicates makes no sense
total_lanes = map(math.ceil, req_lanes.values())
return [ideal_ratios, req_lanes, total_lanes]
#Crude way to check that no samples are in different TYPES of lanes
def validate_template_struct(lane_maps):
|
#Corrects volumes since conc is non-constant
#Also normalizes the numbers
#Finally translates float -> int without underexpressing anything
def correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes):
# Since some samples are strong and some weaksauce
# 10% in ideal_ratios does not mean 10% of lane volume
# As such, ideal_ratios need to be divided by actual_reads/expected_reads
# Ignores undetermined clusters in calculation
# Assumes sample conc cant be altered; aka only volume is modified
for ind in xrange(1, len(lane_maps.keys())+1):
#Bases w/o sample are not expected
if len(lane_maps[ind]) != 1:
exp = 1/float(len(lane_maps[ind])-1)
else:
exp = 1
laneTypeExpr = 0
counter = 0
for sample in lane_maps[ind]:
if not sample == 'Undetermined':
laneTypeExpr += clusters_expr[sample]
for sample in lane_maps[ind]:
act = clusters_expr[sample]/float(laneTypeExpr)
ideal_ratios[ind][counter] = ideal_ratios[ind][counter]*(exp/act)
counter += 1
#Normalizes numbers
for index in xrange(1, len(ideal_ratios.keys())+1):
curSum = sum(ideal_ratios[index])
for sample in xrange(0, len(ideal_ratios[index])):
if curSum == 0:
ideal_ratios[index][sample] = 0
else:
ideal_ratios[index][sample] = (ideal_ratios[index][sample]/curSum)*100
# Iteratively rounds to whole percent (min pipette for volume) to reach 100%
# ideal_ratio * req_lanes.values() = needed
# acc_ratio * total_lanes = current
# means a sample can take any whole number between the two
acc_ratios = copy.deepcopy(ideal_ratios)
for index in xrange(1, len(ideal_ratios.keys())+1):
for sample in xrange(0, len(ideal_ratios[index])):
acc_ratios[index][sample] = math.ceil(ideal_ratios[index][sample])
if sum(acc_ratios[index]) == 100:
break
else:
while sum(acc_ratios[index]) > 100:
stuck = True
for sample in xrange(1, len(ideal_ratios[index])):
need = ideal_ratios[index][sample]*req_lanes.values()[index-1]
cur = (acc_ratios[index][sample] - 1)*total_lanes[index-1]
if sum(acc_ratios[index]) > 100 and cur >= need:
acc_ratios[index][sample] -= 1
stuck = False
if sum(acc_ratios[index])== 100:
break
if(stuck):
total_lanes[index-1] += 1
return acc_ratios
def generate_output(project, destid, total_lanes, req_lanes, lane_maps, acc_ratios):
#Gathers the container id and well name for all samples in project
#Cred to Denis for providing a base epp
location = dict()
lims = Lims(BASEURI, USERNAME, PASSWORD)
allProjects = lims.get_projects()
for proj in allProjects:
if proj.id == project:
projName = proj.name
break
#All normalization processes for project
norms=['Library Normalization (MiSeq) 4.0', 'Library Normalization (Illumina SBS) 4.0','Library Normalization (HiSeq X) 1.0']
pros=lims.get_processes(type=norms, projectname=projName)
#For all processes
| tempList = list()
for k, v in lane_maps.items():
for index in xrange(1,len(v)):
if not v[index] == 'Undetermined':
tempList.append(v[index])
counter = Counter(tempList)
for values in counter.itervalues():
if values > 1:
raise Exception('Error: This app does NOT handle situations where a sample'
'is present in lanes/well with differing structure!') | identifier_body |
repooler.py | fc] = dict()
if not lane in fc_track[project][fc]:
fc_track[project][fc][lane] = dict()
#Only counts samples for the given project, other samples are "auto-filled"
if project in sample:
fc_track[project][fc][lane][sample] = clusters
else:
fc_track[project][fc][lane][sample] = target_clusters
#Removes any lanes that don't have any part project samples
for fc, lanes in fc_track[project].items():
for lane,sample in lanes.items():
if not any(project in s for s in sample.keys()):
del fc_track[project][fc][lane]
return fc_track[project]
def parse_indata(struct, target_clusters):
clusters_rem = dict()
clusters_expr = dict()
lane_maps = dict()
counter = 1
#Takes in data and finds unique lane structure, clusters per sample and lane division
#Output could probably be sent as a nested hash.
#Clusters_rem AND clusters_expr may seem redundant, but it saves some calculatin
for fc, lanes in struct.items():
for lane, samples in lanes.items():
#Concatinate structure into a set of unique structures
mapping = sorted(samples.keys(), reverse=True)
if not mapping in lane_maps.values():
lane_maps[counter] = mapping
counter +=1
#Calculate clusters read per sample
for sample, value in samples.items():
if not sample in clusters_rem:
clusters_rem[sample] = target_clusters
clusters_expr[sample] = 0
clusters_rem[sample] -= value
clusters_expr[sample] += value
return [lane_maps, clusters_rem, clusters_expr]
#Creates a set where every sample uniquely appears once and only once
def simple_unique_set(lane_maps):
unique_lane_maps = dict()
for keyz, valz in lane_maps.items():
#Fetch what lanes inclusion of given lane excludes
excluded = list()
for sample in valz:
for k, v in lane_maps.items():
for dupe in v:
if dupe == sample and keyz != k and sample != 'Undetermined':
excluded.append(k)
break
#Check that none of the excluded lanes have uniquely present samples
acceptable = True
for key in excluded:
total_duplicates = list()
for values in lane_maps[key]:
duplicate = 0
for k, v in lane_maps.items():
for dupe in v:
if dupe == values and k != key and sample != 'Undetermined':
duplicate +=1
break
total_duplicates.append(duplicate)
if 0 in total_duplicates:
acceptable = False
break
if acceptable:
#Check that the lane doesn't have sample dupes in the accepted set already
for entries in valz:
for kuyz, vulz in unique_lane_maps.items():
for things in vulz:
if things == entries and entries != 'Undetermined':
acceptable = False
break
if acceptable:
unique_lane_maps[keyz] = valz
lane_maps = unique_lane_maps
#ALL SAMPLES PRESENT CHECK
# summap = []
# for k in lane_maps.keys():
# summap += lane_maps[k]
# print len(set(summap))
validate_template_struct(lane_maps)
def aggregator(lane_maps,clusters_rem,clusters_per_lane):
#Iterate
#Find all samples that are also expressed in another struct
#Sort those structs by duplication
#Fill them to floor(dups); unless mod % 1 > some_number; then ceil(dups)
#Note the remaining necessary
#End
#Use the remaining structs
#Ceil(dups) those babies
raise Exception('Error: Not yet implemented!')
#Gives how many percent of the lane should be allocated to a specific sample
def sample_distributor(lane_maps, clusters_rem, clusters_per_lane):
ideal_ratios = dict()
req_lanes = dict()
for index in lane_maps:
summ = 0
for entry in lane_maps[index]:
if clusters_rem[entry] > 0:
summ += clusters_rem[entry]
for entry in lane_maps[index]:
if not index in ideal_ratios:
ideal_ratios[index] = list()
if clusters_rem[entry] > 0:
ideal_ratios[index].append(clusters_rem[entry]/float(summ))
else:
ideal_ratios[index].append(0.0)
#Minimal number of required lanes per pool
req_lanes[index] = summ/float(clusters_per_lane)
#Have to be rounded up, rounding down when only using duplicates makes no sense
total_lanes = map(math.ceil, req_lanes.values())
return [ideal_ratios, req_lanes, total_lanes]
#Crude way to check that no samples are in different TYPES of lanes
def validate_template_struct(lane_maps):
tempList = list()
for k, v in lane_maps.items():
for index in xrange(1,len(v)): | for values in counter.itervalues():
if values > 1:
raise Exception('Error: This app does NOT handle situations where a sample'
'is present in lanes/well with differing structure!')
#Corrects volumes since conc is non-constant
#Also normalizes the numbers
#Finally translates float -> int without underexpressing anything
def correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes):
# Since some samples are strong and some weaksauce
# 10% in ideal_ratios does not mean 10% of lane volume
# As such, ideal_ratios need to be divided by actual_reads/expected_reads
# Ignores undetermined clusters in calculation
# Assumes sample conc cant be altered; aka only volume is modified
for ind in xrange(1, len(lane_maps.keys())+1):
#Bases w/o sample are not expected
if len(lane_maps[ind]) != 1:
exp = 1/float(len(lane_maps[ind])-1)
else:
exp = 1
laneTypeExpr = 0
counter = 0
for sample in lane_maps[ind]:
if not sample == 'Undetermined':
laneTypeExpr += clusters_expr[sample]
for sample in lane_maps[ind]:
act = clusters_expr[sample]/float(laneTypeExpr)
ideal_ratios[ind][counter] = ideal_ratios[ind][counter]*(exp/act)
counter += 1
#Normalizes numbers
for index in xrange(1, len(ideal_ratios.keys())+1):
curSum = sum(ideal_ratios[index])
for sample in xrange(0, len(ideal_ratios[index])):
if curSum == 0:
ideal_ratios[index][sample] = 0
else:
ideal_ratios[index][sample] = (ideal_ratios[index][sample]/curSum)*100
# Iteratively rounds to whole percent (min pipette for volume) to reach 100%
# ideal_ratio * req_lanes.values() = needed
# acc_ratio * total_lanes = current
# means a sample can take any whole number between the two
acc_ratios = copy.deepcopy(ideal_ratios)
for index in xrange(1, len(ideal_ratios.keys())+1):
for sample in xrange(0, len(ideal_ratios[index])):
acc_ratios[index][sample] = math.ceil(ideal_ratios[index][sample])
if sum(acc_ratios[index]) == 100:
break
else:
while sum(acc_ratios[index]) > 100:
stuck = True
for sample in xrange(1, len(ideal_ratios[index])):
need = ideal_ratios[index][sample]*req_lanes.values()[index-1]
cur = (acc_ratios[index][sample] - 1)*total_lanes[index-1]
if sum(acc_ratios[index]) > 100 and cur >= need:
acc_ratios[index][sample] -= 1
stuck = False
if sum(acc_ratios[index])== 100:
break
if(stuck):
total_lanes[index-1] += 1
return acc_ratios
def generate_output(project, destid, total_lanes, req_lanes, lane_maps, acc_ratios):
#Gathers the container id and well name for all samples in project
#Cred to Denis for providing a base epp
location = dict()
lims = Lims(BASEURI, USERNAME, PASSWORD)
allProjects = lims.get_projects()
for proj in allProjects:
if proj.id == project:
projName = proj.name
break
#All normalization processes for project
norms=['Library Normalization (MiSeq) 4.0', 'Library Normalization (Illumina SBS) 4.0','Library Normalization (HiSeq X) 1.0']
pros=lims.get_processes(type=norms, projectname=projName)
#For all processes
for | if not v[index] == 'Undetermined':
tempList.append(v[index])
counter = Counter(tempList) | random_line_split |
coltest4.py | )
GPIO.setwarnings(False)
GPIO.setup(red_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # red
GPIO.setup(black_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # black
GPIO.setup (led_rot, GPIO.OUT) # rote Led
GPIO.setup (led_green, GPIO.OUT) # rote Led
GPIO.output(led_rot, False)
GPIO.output(led_green, False)
# Calculate gamma correction table based on variable gamma
# Gamma correction ist used for all pixels in the image
gamma_a = bytearray(256)
for i in range(256):
gamma_a[i] = int(pow(i / g_maxin, gamma) * 255.0 + 0.5)
# if debug:
# for z in range(256):
# print "gamma_a %d %d" % (z,gamma_a[z])
# ----------
def float_to_hex(f):
return hex(struct.unpack('<I', struct.pack('<f', f))[0])
#------------
#------Wait for button Press (Black or Red) --------------------------------
def button_pressed():
|
#-------------------------------------------
# ***** Function blink-led **************************
def blink_led(pin,anzahl): # blink led 3 mal bei start und bei shutdown
for i in range(anzahl):
GPIO.output(pin, True)
sleep(0.1)
GPIO.output(pin, False)
sleep(0.1)
# -------------------------
def draw() :
print"Zeichnen................"
sleep (1)
return(0)
#-------------------------
# ----- set all pixel to dark
def clearled(color):
for i in range (striplen):
strip.setPixelColor(i, (Color(color[0],color[1],color[2])))
strip.show()
return(0)
#--------------------------
#---------------------------------------------
def wheel2(pos,how=0):
# Generate rainbow colors within 0-255.
if pos < 85:
if how:
return (pos * 3, 255 - pos * 3, 0)
else:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
if how:
return (255 - pos * 3, 0, pos * 3)
else:
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
if how:
return (0, pos * 3, 255 - pos * 3)
else:
return Color (0, pos * 3, 255 - pos * 3)
#--------------------------------------------
#---------------------------------------------
def wheel (start , how=0, gamma=0):
# Generate rainbow colors within 0-255.
global gamma_a
if start < 85:
# print "%d: %d %d %d" % (start, start * 3, 255 - start * 3, 0)
if how:
if gamma:
return (gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else: # forward
return (start * 3, 255 - start * 3, 0)
else:
if gamma:
return Color(gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else:
return Color(start * 3, 255 - start * 3, 0)
elif start < 170:
# print "%d: %d %d %d" % (start, 255- (start-85) * 3, 0, (start-85)*3 )
start -= 85
if how:
if gamma:
return (gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else: #forward
return (255 - start * 3, 0, start * 3)
else:
if gamma:
return Color(gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else:
return Color(255 - start * 3, 0, start * 3)
else:
# print "%d: %d %d %d" % (start, 0, (start-170) * 3, 255 - (start-170) * 3)
start -= 170
if how:
if gamma:
return (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return (0, start * 3, 255 - start * 3)
else:
if gamma:
return Color (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return Color (0, start * 3, 255 - start * 3)
#--------------------------------------------
def rainbow_alt(strip, wait_ms=20, iterations=1):
if debug: print "Draw rainbow that fades across all pixels at once."
for j in range(256*iterations):
for i in range(striplen):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.setBrightness(80)
strip.show()
time.sleep(wait_ms/1000.0)
# -- Function write_rainbow ----------------------------------
# generate a rainbow pattern with lenght len, starting at pos
def write_rainbow(strip, start, len, gamma=0,bright=60):
if debug: print "write_rainbow start %d len %d gamma %d" % (start,len,gamma)
# use pixel start to start+len
stripl=strip.numPixels()
for i in range( 0,len):
if i < stripl:
strip.setPixelColor(start+i, wheel((i+i*256/len) & 255,0,gamma))
strip.setBrightness(bright)
strip.show()
# -- Function make_rainbow ----------------------------------
# return a rainbow pattern with lenght len
def make_rainbow(len, gamma=0):
if debug: print "return a rainbow pattern with lenght %d" % len
pat=[]
for i in range( 0,len):
pat.append(wheel((i+i*256/len) & 255,1,gamma))
return(pat)
#-----------------------------------------------
def rainbow(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbow start %d end %d iter: %d bright: %d" % (start, end, iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel((i+j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
#--------------------------------------------------
def rainbowCycle(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbowCycle anzahl: %d bright: %d" % (iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel(((i * 256 / 150) + j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
# sleep(0.4)
#----------------------------------------------------
# Define | print "Waiting for Tastendruck..."
while True:
inpblack=1
inpred=1
inpblack=GPIO.input(black_button) # high if NOT pressed !
inpred=GPIO.input(red_button)
# print "Button %d %d" % (inpblack, inpred)
sleep(0.2)
if not inpblack: return(BLACK) # black button went to low
if not inpred: # red button went to low
sleep(1) # check if red is pressed long or short
inpred=GPIO.input(red_button)
sleep(0.1)
if inpred: return(REDSHORT)
else: return(REDLONG)
pass | identifier_body |
coltest4.py | CM)
GPIO.setwarnings(False)
GPIO.setup(red_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # red
GPIO.setup(black_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # black
GPIO.setup (led_rot, GPIO.OUT) # rote Led
GPIO.setup (led_green, GPIO.OUT) # rote Led
GPIO.output(led_rot, False)
GPIO.output(led_green, False)
# Calculate gamma correction table based on variable gamma
# Gamma correction ist used for all pixels in the image
gamma_a = bytearray(256)
for i in range(256):
gamma_a[i] = int(pow(i / g_maxin, gamma) * 255.0 + 0.5)
# if debug:
# for z in range(256):
# print "gamma_a %d %d" % (z,gamma_a[z])
# ----------
def float_to_hex(f):
return hex(struct.unpack('<I', struct.pack('<f', f))[0])
#------------
#------Wait for button Press (Black or Red) --------------------------------
def button_pressed():
print "Waiting for Tastendruck..."
while True:
inpblack=1
inpred=1
inpblack=GPIO.input(black_button) # high if NOT pressed !
inpred=GPIO.input(red_button)
# print "Button %d %d" % (inpblack, inpred)
sleep(0.2)
if not inpblack: return(BLACK) # black button went to low
if not inpred: # red button went to low
sleep(1) # check if red is pressed long or short
inpred=GPIO.input(red_button)
sleep(0.1)
if inpred: return(REDSHORT)
else: return(REDLONG)
pass
#-------------------------------------------
# ***** Function blink-led **************************
def blink_led(pin,anzahl): # blink led 3 mal bei start und bei shutdown
for i in range(anzahl):
GPIO.output(pin, True)
sleep(0.1)
GPIO.output(pin, False)
sleep(0.1)
# -------------------------
def draw() :
print"Zeichnen................"
sleep (1)
return(0)
#-------------------------
# ----- set all pixel to dark
def clearled(color):
for i in range (striplen):
strip.setPixelColor(i, (Color(color[0],color[1],color[2])))
strip.show()
return(0)
#--------------------------
#---------------------------------------------
def wheel2(pos,how=0):
# Generate rainbow colors within 0-255.
if pos < 85:
if how:
return (pos * 3, 255 - pos * 3, 0)
else:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
if how:
return (255 - pos * 3, 0, pos * 3)
else:
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
if how:
return (0, pos * 3, 255 - pos * 3)
else:
return Color (0, pos * 3, 255 - pos * 3)
#--------------------------------------------
#---------------------------------------------
def wheel (start , how=0, gamma=0):
# Generate rainbow colors within 0-255.
global gamma_a
if start < 85:
# print "%d: %d %d %d" % (start, start * 3, 255 - start * 3, 0)
if how:
if gamma:
return (gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else: # forward
return (start * 3, 255 - start * 3, 0)
else:
if gamma:
return Color(gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else:
return Color(start * 3, 255 - start * 3, 0)
elif start < 170:
# print "%d: %d %d %d" % (start, 255- (start-85) * 3, 0, (start-85)*3 )
start -= 85
if how:
if gamma:
return (gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else: #forward
return (255 - start * 3, 0, start * 3)
else: | return Color(gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else:
return Color(255 - start * 3, 0, start * 3)
else:
# print "%d: %d %d %d" % (start, 0, (start-170) * 3, 255 - (start-170) * 3)
start -= 170
if how:
if gamma:
return (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return (0, start * 3, 255 - start * 3)
else:
if gamma:
return Color (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return Color (0, start * 3, 255 - start * 3)
#--------------------------------------------
def rainbow_alt(strip, wait_ms=20, iterations=1):
if debug: print "Draw rainbow that fades across all pixels at once."
for j in range(256*iterations):
for i in range(striplen):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.setBrightness(80)
strip.show()
time.sleep(wait_ms/1000.0)
# -- Function write_rainbow ----------------------------------
# generate a rainbow pattern with lenght len, starting at pos
def write_rainbow(strip, start, len, gamma=0,bright=60):
if debug: print "write_rainbow start %d len %d gamma %d" % (start,len,gamma)
# use pixel start to start+len
stripl=strip.numPixels()
for i in range( 0,len):
if i < stripl:
strip.setPixelColor(start+i, wheel((i+i*256/len) & 255,0,gamma))
strip.setBrightness(bright)
strip.show()
# -- Function make_rainbow ----------------------------------
# return a rainbow pattern with lenght len
def make_rainbow(len, gamma=0):
if debug: print "return a rainbow pattern with lenght %d" % len
pat=[]
for i in range( 0,len):
pat.append(wheel((i+i*256/len) & 255,1,gamma))
return(pat)
#-----------------------------------------------
def rainbow(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbow start %d end %d iter: %d bright: %d" % (start, end, iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel((i+j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
#--------------------------------------------------
def rainbowCycle(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbowCycle anzahl: %d bright: %d" % (iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel(((i * 256 / 150) + j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
# sleep(0.4)
#----------------------------------------------------
# |
if gamma: | random_line_split |
coltest4.py | )
GPIO.setwarnings(False)
GPIO.setup(red_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # red
GPIO.setup(black_button, GPIO.IN, pull_up_down=GPIO.PUD_UP) # black
GPIO.setup (led_rot, GPIO.OUT) # rote Led
GPIO.setup (led_green, GPIO.OUT) # rote Led
GPIO.output(led_rot, False)
GPIO.output(led_green, False)
# Calculate gamma correction table based on variable gamma
# Gamma correction ist used for all pixels in the image
gamma_a = bytearray(256)
for i in range(256):
gamma_a[i] = int(pow(i / g_maxin, gamma) * 255.0 + 0.5)
# if debug:
# for z in range(256):
# print "gamma_a %d %d" % (z,gamma_a[z])
# ----------
def float_to_hex(f):
return hex(struct.unpack('<I', struct.pack('<f', f))[0])
#------------
#------Wait for button Press (Black or Red) --------------------------------
def button_pressed():
print "Waiting for Tastendruck..."
while True:
inpblack=1
inpred=1
inpblack=GPIO.input(black_button) # high if NOT pressed !
inpred=GPIO.input(red_button)
# print "Button %d %d" % (inpblack, inpred)
sleep(0.2)
if not inpblack: return(BLACK) # black button went to low
if not inpred: # red button went to low
sleep(1) # check if red is pressed long or short
inpred=GPIO.input(red_button)
sleep(0.1)
if inpred: return(REDSHORT)
else: return(REDLONG)
pass
#-------------------------------------------
# ***** Function blink-led **************************
def blink_led(pin,anzahl): # blink led 3 mal bei start und bei shutdown
for i in range(anzahl):
GPIO.output(pin, True)
sleep(0.1)
GPIO.output(pin, False)
sleep(0.1)
# -------------------------
def draw() :
print"Zeichnen................"
sleep (1)
return(0)
#-------------------------
# ----- set all pixel to dark
def clearled(color):
for i in range (striplen):
strip.setPixelColor(i, (Color(color[0],color[1],color[2])))
strip.show()
return(0)
#--------------------------
#---------------------------------------------
def wheel2(pos,how=0):
# Generate rainbow colors within 0-255.
if pos < 85:
if how:
return (pos * 3, 255 - pos * 3, 0)
else:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
if how:
return (255 - pos * 3, 0, pos * 3)
else:
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
if how:
return (0, pos * 3, 255 - pos * 3)
else:
return Color (0, pos * 3, 255 - pos * 3)
#--------------------------------------------
#---------------------------------------------
def wheel (start , how=0, gamma=0):
# Generate rainbow colors within 0-255.
global gamma_a
if start < 85:
# print "%d: %d %d %d" % (start, start * 3, 255 - start * 3, 0)
if how:
if gamma:
return (gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else: # forward
return (start * 3, 255 - start * 3, 0)
else:
if gamma:
return Color(gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else:
|
elif start < 170:
# print "%d: %d %d %d" % (start, 255- (start-85) * 3, 0, (start-85)*3 )
start -= 85
if how:
if gamma:
return (gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else: #forward
return (255 - start * 3, 0, start * 3)
else:
if gamma:
return Color(gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else:
return Color(255 - start * 3, 0, start * 3)
else:
# print "%d: %d %d %d" % (start, 0, (start-170) * 3, 255 - (start-170) * 3)
start -= 170
if how:
if gamma:
return (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return (0, start * 3, 255 - start * 3)
else:
if gamma:
return Color (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return Color (0, start * 3, 255 - start * 3)
#--------------------------------------------
def rainbow_alt(strip, wait_ms=20, iterations=1):
if debug: print "Draw rainbow that fades across all pixels at once."
for j in range(256*iterations):
for i in range(striplen):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.setBrightness(80)
strip.show()
time.sleep(wait_ms/1000.0)
# -- Function write_rainbow ----------------------------------
# generate a rainbow pattern with lenght len, starting at pos
def write_rainbow(strip, start, len, gamma=0,bright=60):
if debug: print "write_rainbow start %d len %d gamma %d" % (start,len,gamma)
# use pixel start to start+len
stripl=strip.numPixels()
for i in range( 0,len):
if i < stripl:
strip.setPixelColor(start+i, wheel((i+i*256/len) & 255,0,gamma))
strip.setBrightness(bright)
strip.show()
# -- Function make_rainbow ----------------------------------
# return a rainbow pattern with lenght len
def make_rainbow(len, gamma=0):
if debug: print "return a rainbow pattern with lenght %d" % len
pat=[]
for i in range( 0,len):
pat.append(wheel((i+i*256/len) & 255,1,gamma))
return(pat)
#-----------------------------------------------
def rainbow(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbow start %d end %d iter: %d bright: %d" % (start, end, iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel((i+j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
#--------------------------------------------------
def rainbowCycle(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbowCycle anzahl: %d bright: %d" % (iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel(((i * 256 / 150) + j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
# sleep(0.4)
#----------------------------------------------------
# | return Color(start * 3, 255 - start * 3, 0) | conditional_block |
coltest4.py | return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
if how:
return (255 - pos * 3, 0, pos * 3)
else:
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
if how:
return (0, pos * 3, 255 - pos * 3)
else:
return Color (0, pos * 3, 255 - pos * 3)
#--------------------------------------------
#---------------------------------------------
def wheel (start , how=0, gamma=0):
# Generate rainbow colors within 0-255.
global gamma_a
if start < 85:
# print "%d: %d %d %d" % (start, start * 3, 255 - start * 3, 0)
if how:
if gamma:
return (gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else: # forward
return (start * 3, 255 - start * 3, 0)
else:
if gamma:
return Color(gamma_a[start * 3],gamma_a [ 255 - start * 3], 0)
else:
return Color(start * 3, 255 - start * 3, 0)
elif start < 170:
# print "%d: %d %d %d" % (start, 255- (start-85) * 3, 0, (start-85)*3 )
start -= 85
if how:
if gamma:
return (gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else: #forward
return (255 - start * 3, 0, start * 3)
else:
if gamma:
return Color(gamma_a[255 - start * 3], 0, gamma_a[start * 3])
else:
return Color(255 - start * 3, 0, start * 3)
else:
# print "%d: %d %d %d" % (start, 0, (start-170) * 3, 255 - (start-170) * 3)
start -= 170
if how:
if gamma:
return (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return (0, start * 3, 255 - start * 3)
else:
if gamma:
return Color (0, gamma_a[start * 3], gamma_a[255 - start * 3])
else:
return Color (0, start * 3, 255 - start * 3)
#--------------------------------------------
def rainbow_alt(strip, wait_ms=20, iterations=1):
if debug: print "Draw rainbow that fades across all pixels at once."
for j in range(256*iterations):
for i in range(striplen):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.setBrightness(80)
strip.show()
time.sleep(wait_ms/1000.0)
# -- Function write_rainbow ----------------------------------
# generate a rainbow pattern with lenght len, starting at pos
def write_rainbow(strip, start, len, gamma=0,bright=60):
if debug: print "write_rainbow start %d len %d gamma %d" % (start,len,gamma)
# use pixel start to start+len
stripl=strip.numPixels()
for i in range( 0,len):
if i < stripl:
strip.setPixelColor(start+i, wheel((i+i*256/len) & 255,0,gamma))
strip.setBrightness(bright)
strip.show()
# -- Function make_rainbow ----------------------------------
# return a rainbow pattern with lenght len
def make_rainbow(len, gamma=0):
if debug: print "return a rainbow pattern with lenght %d" % len
pat=[]
for i in range( 0,len):
pat.append(wheel((i+i*256/len) & 255,1,gamma))
return(pat)
#-----------------------------------------------
def rainbow(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbow start %d end %d iter: %d bright: %d" % (start, end, iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel((i+j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
#--------------------------------------------------
def rainbowCycle(strip, start=FULL, end=-1, bright=60, iterations=1, wait_ms=20 ):
if start==FULL:
start=0
end=strip.numPixels()
elif end == -1:
print "rainbow: end not defined"
return()
ctr=end-start
if debug: print "Draw rainbowCycle anzahl: %d bright: %d" % (iterations, bright)
for j in range(256*iterations):
for i in range(ctr):
strip.setPixelColor(i+start, wheel(((i * 256 / 150) + j) & 255))
strip.setBrightness(bright)
strip.show()
time.sleep(wait_ms/1000.0)
# sleep(0.4)
#----------------------------------------------------
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color):
global gamma_a
if debug: print "now in colorwipe"
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
#------------------------------------------------------
# Define functions which animate LEDs in various ways.
def colorWipe2(strip):
global gamma_a
if debug: print "Draw colorwipe2"
step=int(255/(striplen/3)) # Helligkeits step
if debug: print "step: %d " % step
if debug: print "now red"
max=255
for i in range(striplen/3):
color=Color(gamma_a[max],0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(i*step)
# print "i: %d max: %d" % (i,max)
max=255
y=0
if debug: print "now blue"
for i in range(striplen/3,2*striplen/3):
color=Color(0,0,gamma_a[max])
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(y*step)
y=y+1
# print i,max
max=255
y=0
if debug: print "now green"
for i in range(2*striplen/3,striplen):
color=Color(0,gamma_a[max],0)
strip.setPixelColor(i, color)
strip.show()
time.sleep(delay/1000.0)
max=255-(y*step)
y=y+1
# print "i: %d max: %d" % (i,max)
#
clearled(BLACK)
if debug: print "now red umgekehrt"
i=0
max=0
for i in range(striplen/3):
color=Color(gamma_a[max],0,0)
# print max, gamma_a[max]
strip.setPixelColor(i, color)
strip.show()
i=i+1
time.sleep(delay/1000.0)
max=0+(i*step)
time.sleep(delay/1000.0)
# print "i: %d max: %d" % (i,max)
if debug: print "Anzahl show %d: " % i
return(0)
# ---------------------
def | colorWipe3 | identifier_name | |
python_module.py | .now() + datetime.timedelta(minutes= 30)) # 返回时间在当前时间上 +30 分钟
c_time = datetime.datetime.now()
print(c_time) # 当前时间为 2017-05-07 22:52:44.016732
print(c_time.replace(minute=3,hour=2)) # 时间替换 替换时间为‘2017-05-07 02:03:18.181732’
print(datetime.timedelta) # 表示时间间隔,即两个时间点之间的长度
print (datetime.datetime.now() - datetime.timedelta(days=5)) # 返回时间在当前时间上 -5 天
print('\n---------------------module:calendar--------------------')
# python 日历模块
import calendar
print(calendar.calendar(theyear= 2017)) # 返回2017年整年日历
print(calendar.month(2017,5)) # 返回某年某月的日历,返回类型为字符串类型
calendar.setfirstweekday(calendar.WEDNESDAY) # 设置日历的第一天(第一天以星期三开始)
cal = calendar.month(2017, 4)
print (cal)
print(calendar.monthrange(2017,5)) # 返回某个月的第一天和这个月的所有天数
print(calendar.monthcalendar(2017,5)) # 返回某个月以每一周为元素的序列
cal = calendar.HTMLCalendar(calendar.MONDAY)
print(cal.formatmonth(2017, 5)) # 在html中打印某年某月的日历
print(calendar.isleap(2017)) # 判断是否为闰年
print(calendar.leapdays(2000,2017)) # 判断两个年份间闰年的个数
print('\n---------------------module:random--------------------')
import random
# 随机数
print(random.random()) # 返回一个随机小数'0.4800545746046827'
print(random.randint(1,5)) # 返回(1-5)随机整型数据
print(random.randrange(1,10)) # 返回(1-10)随机数据
# 生成随机验证码
code = ''
for i in range(4):
current = random.randrange(0,4)
if current != i:
temp = chr(random.randint(65,90))
else:
temp = random.randint(0,9)
code += str(temp)
print(code)
print('\n---------------------module:os--------------------')
import os
print(os.getcwd()) # 获得当前工作目录
print(os.chdir(os.getcwd())) # 改变当前脚本的工作路径,相当于shell下的cd
print(os.curdir) # 返回当前目录‘.'
print(os.pardir) # 获取当前目录的父目录字符串名‘..'
print(os.makedirs('dirname1/dirname2')) # 可生成多层递归目录
print(os.removedirs('dirname1/dirname2')) # 若目录为空,则删除,并递归到上一级目录,如若也为空,则删除,依此类推
print(os.mkdir('test4')) # 生成单级目录;相当于shell中mkdir dirname
print(os.rmdir('test4')) # 删除单级空目录,若目录不为空则无法删除,报错;相当于shell中rmdir dirname
print(os.listdir(os.getcwd())) # 列出指定目录下的所有文件和子目录,包括隐藏文件,并以列表方式打印
# print(os.remove('log.log')) # 删除一个指定的文件
os.mkdir('oldname')
print(os.rename("oldname","newname")) # 重命名文件/目录)
print(os.stat(os.getcwd())) # 获取文件/目录信息
print(os.pathsep) # 输出用于分割文件路径的字符串';'
print(os.name) # 输出字符串指示当前使用平台。win->'nt'; Linux->'posix'
# print(os.system(command='ls')) # 运行shell命令,直接显示
print(os.environ) # 获得系统的环境变量
print(os.path.abspath(os.getcwd())) # 返回path规范化的绝对路径
print(os.path.split(os.getcwd())) # 将path分割成目录和文件名二元组返回
print(os.path.dirname(os.getcwd())) # 返回path的目录。其实就是os.path.split(path)的第一个元素
print(os.path.basename(os.getcwd())) # 返回path最后的文件名。如果path以/或\结尾,那么就会返回空值。即os.path.split(path)的第二个元素
print(os.path.exists('test')) # 判断path是否存在
print(os.path.isabs(os.getcwd())) # 如果path是绝对路径,返回True
print(os.path.isfile('test')) # 如果path是一个存在的文件,返回True。否则返回False
print(os.path.isdir(os.getcwd())) # 如果path是一个存在的目录,则返回True。否则返回False
print(os.path.getatime(os.getcwd())) # 返回path所指向的文件或者目录的最后存取时间
print(os.path.getmtime(os.getcwd())) # 返回path所指向的文件或者目录的最后修改时间
print('\n---------------------module:sys--------------------')
import sys
print(sys.argv) # 命令行参数List,第一个元素是程序本身路径
# print(sys.exit(0)) # 退出程序,正常退出时exit(0)
print(sys.version) # 获取python的版本信息
print(sys.path) # 返回模块的搜索路径,初始化时使用PYTHONPATH环境变量的值
print(sys.platform) # 返回操作平台的名称
print('\n---------------------module:shutil--------------------')
import shutil
fsrc = open("fsrc.txt", 'w+') #直接打开一个文件,如果文件不存在则创建文件
fdst = open("fdst.txt", 'w') #直接打开一个文件,如果文件不存在则创建文件
fsrc.write('fsrc')
src = "fsrc.txt"
dst = "fdst.txt"
shutil.copyfileobj(fsrc, fdst, length=16*1024) # 将文件内容拷贝到另一个文件中,可以是部分内容
shutil.copyfile(src, dst) # 拷贝文件
shutil.copymode(src, dst) # 仅拷贝权限。内容、组、用户均不变
shutil.copystat(src, dst) # 拷贝状态的信息,包括:mode bits, atime, mtime, flags
shutil.copy(src, dst) # 拷贝文件和权限
shutil.copy2(src, dst) # 拷贝文件和状态信息
shutil.move(src, dst) # 递归的去移动文件
# base_name: 压缩包的文件名,也可以是压缩包的路径。只是文件名时,则保存至当前目录,否则保存至指定路径
# format: 压缩包种类,“zip”, “tar”, “bztar”,“gztar”
# root_dir: 要压缩的文件夹路径(默认当前目录)
# owner: 用户,默认当前用户
# group: 组,默认当前组
# logger: 用于记录日志,通常是logging.Logger对象
shutil.make_archive('archive_base_name', 'zip',os.getcwd()) # 创建压缩包并返回文件路径,例如:zip、tar
print('\n---------------------module:zipfile--------------------')
#shutil 对压缩包的处理是调用 ZipFile 和 TarFile 两个模块来进行的:
# zipfile 压缩解压
import zipfile
# 压缩
z = zipfile.ZipFile('archive_base_name.zip', 'w')
z.write('fdst.txt')
z.close()
# 解压
z = zipfile.ZipFile('archive_base_name.zip', 'r')
z.extractall()
z.close()
print('\n---------------------module:tarfile--------------------')
# tarfile 压缩解压
import tarfile
# 压缩
tar = tarfile.open('your.tar','w')
tar.add('archive_base_name.zip')
# tar.add('/Users/wupeiqi/PycharmProjects/cmdb.zip', arcname='cmdb.zip')
tar.close()
# 解压
tar = tarfile.open('your.tar','r')
tar.extractall() # 可设置解压地址
tar.close()
print('\n---------------------module:xml--------------------')
# xml的格式如下,就是通过<>节点来区别数据结构的:
xmlstr = r'''<?xml version="1.0"?>
<data>
<country name="Liechtenstein">
<rank updated="yes">2</rank>
<year>2008</year>
<gdppc>141100</gdppc>
<neighbor name="Austria" direction="E"/>
<neighbor name="Switzerland" direction="W"/>
</country>
<country name="Singapore">
<rank updated="yes">5</rank>
<year>2011</year>
<gdppc>59900</gdppc>
<neighbor name="Malaysia" direction="N"/>
</country>
<country name="Panama">
| <rank updated="yes">69</rank>
<year>2011</year>
| random_line_split | |
python_module.py | 名。如果path以/或\结尾,那么就会返回空值。即os.path.split(path)的第二个元素
print(os.path.exists('test')) # 判断path是否存在
print(os.path.isabs(os.getcwd())) # 如果path是绝对路径,返回True
print(os.path.isfile('test')) # 如果path是一个存在的文件,返回True。否则返回False
print(os.path.isdir(os.getcwd())) # 如果path是一个存在的目录,则返回True。否则返回False
print(os.path.getatime(os.getcwd())) # 返回path所指向的文件或者目录的最后存取时间
print(os.path.getmtime(os.getcwd())) # 返回path所指向的文件或者目录的最后修改时间
print('\n---------------------module:sys--------------------')
import sys
print(sys.argv) # 命令行参数List,第一个元素是程序本身路径
# print(sys.exit(0)) # 退出程序,正常退出时exit(0)
print(sys.version) # 获取python的版本信息
print(sys.path) # 返回模块的搜索路径,初始化时使用PYTHONPATH环境变量的值
print(sys.platform) # 返回操作平台的名称
print('\n---------------------module:shutil--------------------')
import shutil
fsrc = open("fsrc.txt", 'w+') #直接打开一个文件,如果文件不存在则创建文件
fdst = open("fdst.txt", 'w') #直接打开一个文件,如果文件不存在则创建文件
fsrc.write('fsrc')
src = "fsrc.txt"
dst = "fdst.txt"
shutil.copyfileobj(fsrc, fdst, length=16*1024) # 将文件内容拷贝到另一个文件中,可以是部分内容
shutil.copyfile(src, dst) # 拷贝文件
shutil.copymode(src, dst) # 仅拷贝权限。内容、组、用户均不变
shutil.copystat(src, dst) # 拷贝状态的信息,包括:mode bits, atime, mtime, flags
shutil.copy(src, dst) # 拷贝文件和权限
shutil.copy2(src, dst) # 拷贝文件和状态信息
shutil.move(src, dst) # 递归的去移动文件
# base_name: 压缩包的文件名,也可以是压缩包的路径。只是文件名时,则保存至当前目录,否则保存至指定路径
# format: 压缩包种类,“zip”, “tar”, “bztar”,“gztar”
# root_dir: 要压缩的文件夹路径(默认当前目录)
# owner: 用户,默认当前用户
# group: 组,默认当前组
# logger: 用于记录日志,通常是logging.Logger对象
shutil.make_archive('archive_base_name', 'zip',os.getcwd()) # 创建压缩包并返回文件路径,例如:zip、tar
print('\n---------------------module:zipfile--------------------')
#shutil 对压缩包的处理是调用 ZipFile 和 TarFile 两个模块来进行的:
# zipfile 压缩解压
import zipfile
# 压缩
z = zipfile.ZipFile('archive_base_name.zip', 'w')
z.write('fdst.txt')
z.close()
# 解压
z = zipfile.ZipFile('archive_base_name.zip', 'r')
z.extractall()
z.close()
print('\n---------------------module:tarfile--------------------')
# tarfile 压缩解压
import tarfile
# 压缩
tar = tarfile.open('your.tar','w')
tar.add('archive_base_name.zip')
# tar.add('/Users/wupeiqi/PycharmProjects/cmdb.zip', arcname='cmdb.zip')
tar.close()
# 解压
tar = tarfile.open('your.tar','r')
tar.extractall() # 可设置解压地址
tar.close()
print('\n---------------------module:xml--------------------')
# xml的格式如下,就是通过<>节点来区别数据结构的:
xmlstr = r'''<?xml version="1.0"?>
<data>
<country name="Liechtenstein">
<rank updated="yes">2</rank>
<year>2008</year>
<gdppc>141100</gdppc>
<neighbor name="Austria" direction="E"/>
<neighbor name="Switzerland" direction="W"/>
</country>
<country name="Singapore">
<rank updated="yes">5</rank>
<year>2011</year>
<gdppc>59900</gdppc>
<neighbor name="Malaysia" direction="N"/>
</country>
<country name="Panama">
<rank updated="yes">69</rank>
<year>2011</year>
<gdppc>13600</gdppc>
<neighbor name="Costa Rica" direction="W"/>
<neighbor name="Colombia" direction="E"/>
</country>
</data>
'''
# xml协议在各个语言里的都 是支持的,在python中可以用以下模块操作xml
import xml.etree.ElementTree as ET
fpxml = open('xmltest.xml', 'w+')
fpxml.write(xmlstr)
fpxml.close()
tree = ET.parse("xmltest.xml")
root = tree.getroot()
print(root.tag)
#遍历xml文档
for child in root:
print(child.tag, child.attrib)
for i in child:
print(i.tag,i.text)
#只遍历year 节点
for node in root.iter('year'):
print(node.tag,node.text)
# 修改和删除xml文档内容
import xml.etree.ElementTree as ET
tree = ET.parse("xmltest.xml")
root = tree.getroot()
#修改
for node in root.iter('year'):
new_year = int(node.text) + 1
node.text = str(new_year)
node.set("updated","yes")
tree.write("xmltest.xml")
#删除node
for country in root.findall('country'):
rank = int(country.find('rank').text)
if rank > 50:
root.remove(country)
tree.write('output.xml')
# 自己创建xml文档
import xml.etree.ElementTree as ET
new_xml = ET.Element("namelist")
name = ET.SubElement(new_xml, "name", attrib={"enrolled": "yes"})
age = ET.SubElement(name, "age", attrib={"checked": "no"})
age = ET.SubElement(name, "age")
age.text = '33'
name2 = ET.SubElement(new_xml, "name", attrib={"enrolled": "no"})
age = ET.SubElement(name2, "age")
age.text = '19'
et = ET.ElementTree(new_xml) # 生成文档对象
et.write("test.xml", encoding="utf-8", xml_declaration=True)
ET.dump(new_xml) # 打印生成的格式
print('\n---------------------module:configparser--------------------')
# 好多软件的常见文档格式如下
'''
[DEFAULT]
compressionlevel = 9
serveraliveinterval = 45
compression = yes
forwardx11 = yes
[bitbucket.org]
user = hg
[topsecret.server.com]
host port = 50022
forwardx11 = no
'''
# python 生成一个这样的文档
try:
import configparser
except ImportError as e:
pass
else:
config = configparser.ConfigParser()
config["DEFAULT"] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Host Port'] = '50022'
topsecret['ForwardX11'] = 'no'
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
# 写完了还可以再读出来
config = configparser.ConfigParser()
config.sections()
file = config.read('example.ini')
print(file) # ['example.ini']
title = config.sections()
print(title) # ['bitbucket.org', 'topsecret.server.com']
print('bitbucket.org' in config) # True
print('bytebong.com' in config) # False
print(config['bitbucket.org']['User']) # hg
print(config['DEFAULT']['Compression']) # yes
topsecret = config['topsecret.server.com']
print(topsecret['ForwardX11']) # no
print(topsecret['Host Port']) # 50022
for key in config['topsecret.server.com']:
print(key)
'''
输出结果:
host port
forwardx11
compressionlevel
serveraliveinterval
compression
'''
print(config['topsecret.server.com']['Compression']) # yes
# configparser增删改查语法
config = configparser.ConfigParser()
config.read('i.cfg')
secs = config.sections() # 返回配置文件中的主节点
print (secs)
options = config.options('bitbucket.org')
pr | int(options) | conditional_block | |
retransmission.rs | _water_mark: TSN,
}
impl State {
pub fn new(tx_high_water_mark: TSN) -> State {
State {
timer: None,
measurements: Measurements::new(),
tx_high_water_mark,
}
}
}
/// Use a trait to add retransmission functionality to Association.
///
/// This is awkward, but there really is a huge amount of state in an association, with many parts
/// interdependent on many other parts. This makes it difficult to cleanly separate concerns such
/// as retransmission in an obvious and simple way. (I.e. without a lot of Rc<RefCell<_>>, for
/// instance.)
///
/// Most C network stack implementations I've seen just interleave all the concerns together, and
/// (in my opinion) this reduces the readability. So we can at least put retransmission concerns
/// in a different source file, even if doing so is only cosmetic.
///
/// We could also have just added more inherent methods to Association here, but I'm hoping that
/// using a trait is more clear.
pub trait Retransmission {
fn on_outgoing_data(&mut self, chunk_tsn: TSN);
fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN, earliest_outstanding_tsn: Option<TSN>);
fn on_gap_ack_block(&mut self, start: TSN, end: TSN); // TODO remove
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]);
fn on_timeout(&mut self);
}
impl Retransmission for Association {
fn on_outgoing_data(&mut self, chunk_tsn: TSN) {
// On fresh transmissions, perform RTT measurements.
if chunk_tsn > self.rtx.tx_high_water_mark {
// This is a newly sent chunk (not a retransmission), so take a measurement if needed.
self.rtx.measurements.on_outgoing_chunk(chunk_tsn);
// Raise the high water mark.
self.rtx.tx_high_water_mark = chunk_tsn;
}
// R1) On any transmission, start the rtx timer if it is not already running.
if self.rtx.timer.is_none() {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto))
}
}
fn on_cumulative_ack(
&mut self,
cumulative_tsn_ack: TSN,
earliest_outstanding_tsn: Option<TSN>,
) {
// Perform RTT measurements
self.rtx.measurements.on_cumulative_ack(cumulative_tsn_ack);
if self.data.sent_queue.is_empty() && self.rtx.timer.is_some() {
// R2) If all outstanding data has been acknowledged, then cancel the timer.
self.rtx.timer = None;
} else if let Some(earliest_outstanding_tsn) = earliest_outstanding_tsn {
// R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
if cumulative_tsn_ack >= earliest_outstanding_tsn {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto));
}
}
}
// TODO remove
fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// Perform RTT measurements
self.rtx.measurements.on_gap_ack_block(start, end);
}
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]) {
let mut tsn = cumulative_tsn_ack;
for block in gap_ack_blocks {
let ack_start = cumulative_tsn_ack + block.start as u32;
let ack_end = cumulative_tsn_ack + block.end as u32;
// Chunks in the TSN range [ack_start,ack_end] (inclusive) are assumed to
// have been received. However, the receiver has the option of discarding them and
// having us retransmit them, so they must stay in the sent queue until acknowledged
// via the cumulative TSN.
// Perform RTT measurements, if needed
self.rtx.measurements.on_gap_ack_block(ack_start, ack_end);
// This should always be true if the peer is constructing SACKs properly.
if ack_start > tsn + 1 {
let gap_start = tsn + 1;
let gap_end = ack_start - 1;
// This could just be a for loop, whenever std::iter::Step becomes stable.
let mut gap_tsn = gap_start;
loop {
// TODO: Mark this gap chunk for retransmission.
gap_tsn += 1;
if gap_tsn > gap_end {
break;
}
}
}
// TODO: Store received ranges, so we can know if the peer decides to drop them?
// (So we can implement R4.)
tsn = ack_end;
}
// R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged
// (e.g. it was dropped by the peer), then start the timer.
//
// TODO
}
fn on_timeout(&mut self) {
// E1) Update congestion control as needed.
// - adjust ssthresh according to Section 7.2.3
// - set cwnd to the MTU
// TODO
// E2) Double RTO up to RTO.Max to provide back-off.
self.rtx.measurements.rto *= 2;
let rto_max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
self.rtx.measurements.rto = self.rtx.measurements.rto.min(rto_max);
// E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet
// based on the MTU.
retransmit_immediate(self);
// Any remaining DATA chunks should be "marked for retransmission" and sent as soon
// as cwnd allows.
retransmit_all_except_first(self);
}
}
/// Immediately retransmit the earliest unacknowledged sent chunk. Ideally, we would see how many
/// of the earliest chunks could fit into a packet given the current MTU.
fn retransmit_immediate(association: &mut Association) {
// Retrieve the first unacknowledged chunk. | if let Some(rtx_chunk) = rtx_chunk {
// Re-transmit chunk
println!("re-sending chunk: {:?}", rtx_chunk);
association.send_chunk(Chunk::Data(rtx_chunk));
// E4) Restart timer
association.rtx.timer = Some(
association
.resources
.timer
.sleep(association.rtx.measurements.rto),
)
}
}
/// "Mark" a range of unacknowledged packets for retransmission.
fn retransmit_range(association: &mut Association, first: TSN, last: TSN) {
// TODO: Don't retransmit chunks that were acknowledged in the gap-ack blocks of the most
// recent SACK.
// Re-queue unacknowledged chunks in the specified range.
let bytes =
association
.data
.sent_queue
.transfer_range(&mut association.data.send_queue, first, last);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission.
#[allow(unused)]
fn retransmit_all(association: &mut Association) {
// Re-queue unacknowledged chunks
let bytes = association
.data
.sent_queue
.transfer_all(&mut association.data.send_queue);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission, except for the first. (Which was
/// presumably sent via retransmit_immediate().)
fn retransmit_all_except_first(association: &mut Association) {
if let Some(first) = association.data.sent_queue.front().map(|c| c.tsn) {
if let Some(last) = association.data.sent_queue.back().map(|c| c.tsn) {
if last > first {
retransmit_range(association, first + 1, last);
}
}
}
}
#[derive(Clone, Copy)]
struct SmoothingState {
srtt: Duration, // Smoothed round-trip time
rttvar: Duration, // Round-trip time variation
}
pub struct Measurements {
rtt_measurement: Option<(TSN, Instant)>, // An in-progress RTT measurement.
rtt_smoothing: Option<SmoothingState>,
rto: Duration,
}
/// Clock granularity in nanoseconds. Tokio-timer 0.1 has a granularity of 100ms, and tokio-timer
/// 0.2 has a granularity of 1ms.
/// TODO: Upgrade to tokio-timer 0.2!
const CLOCK_GRANULARITY_NS: u32 = 100_000_000; // 100ms
impl | let rtx_chunk = association.data.sent_queue.front().map(|c| c.clone()); | random_line_split |
retransmission.rs | _mark: TSN,
}
impl State {
pub fn new(tx_high_water_mark: TSN) -> State {
State {
timer: None,
measurements: Measurements::new(),
tx_high_water_mark,
}
}
}
/// Use a trait to add retransmission functionality to Association.
///
/// This is awkward, but there really is a huge amount of state in an association, with many parts
/// interdependent on many other parts. This makes it difficult to cleanly separate concerns such
/// as retransmission in an obvious and simple way. (I.e. without a lot of Rc<RefCell<_>>, for
/// instance.)
///
/// Most C network stack implementations I've seen just interleave all the concerns together, and
/// (in my opinion) this reduces the readability. So we can at least put retransmission concerns
/// in a different source file, even if doing so is only cosmetic.
///
/// We could also have just added more inherent methods to Association here, but I'm hoping that
/// using a trait is more clear.
pub trait Retransmission {
fn on_outgoing_data(&mut self, chunk_tsn: TSN);
fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN, earliest_outstanding_tsn: Option<TSN>);
fn on_gap_ack_block(&mut self, start: TSN, end: TSN); // TODO remove
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]);
fn on_timeout(&mut self);
}
impl Retransmission for Association {
fn on_outgoing_data(&mut self, chunk_tsn: TSN) {
// On fresh transmissions, perform RTT measurements.
if chunk_tsn > self.rtx.tx_high_water_mark {
// This is a newly sent chunk (not a retransmission), so take a measurement if needed.
self.rtx.measurements.on_outgoing_chunk(chunk_tsn);
// Raise the high water mark.
self.rtx.tx_high_water_mark = chunk_tsn;
}
// R1) On any transmission, start the rtx timer if it is not already running.
if self.rtx.timer.is_none() {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto))
}
}
fn on_cumulative_ack(
&mut self,
cumulative_tsn_ack: TSN,
earliest_outstanding_tsn: Option<TSN>,
) {
// Perform RTT measurements
self.rtx.measurements.on_cumulative_ack(cumulative_tsn_ack);
if self.data.sent_queue.is_empty() && self.rtx.timer.is_some() {
// R2) If all outstanding data has been acknowledged, then cancel the timer.
self.rtx.timer = None;
} else if let Some(earliest_outstanding_tsn) = earliest_outstanding_tsn {
// R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
if cumulative_tsn_ack >= earliest_outstanding_tsn {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto));
}
}
}
// TODO remove
fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// Perform RTT measurements
self.rtx.measurements.on_gap_ack_block(start, end);
}
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]) {
let mut tsn = cumulative_tsn_ack;
for block in gap_ack_blocks {
let ack_start = cumulative_tsn_ack + block.start as u32;
let ack_end = cumulative_tsn_ack + block.end as u32;
// Chunks in the TSN range [ack_start,ack_end] (inclusive) are assumed to
// have been received. However, the receiver has the option of discarding them and
// having us retransmit them, so they must stay in the sent queue until acknowledged
// via the cumulative TSN.
// Perform RTT measurements, if needed
self.rtx.measurements.on_gap_ack_block(ack_start, ack_end);
// This should always be true if the peer is constructing SACKs properly.
if ack_start > tsn + 1 {
let gap_start = tsn + 1;
let gap_end = ack_start - 1;
// This could just be a for loop, whenever std::iter::Step becomes stable.
let mut gap_tsn = gap_start;
loop {
// TODO: Mark this gap chunk for retransmission.
gap_tsn += 1;
if gap_tsn > gap_end {
break;
}
}
}
// TODO: Store received ranges, so we can know if the peer decides to drop them?
// (So we can implement R4.)
tsn = ack_end;
}
// R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged
// (e.g. it was dropped by the peer), then start the timer.
//
// TODO
}
fn on_timeout(&mut self) {
// E1) Update congestion control as needed.
// - adjust ssthresh according to Section 7.2.3
// - set cwnd to the MTU
// TODO
// E2) Double RTO up to RTO.Max to provide back-off.
self.rtx.measurements.rto *= 2;
let rto_max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
self.rtx.measurements.rto = self.rtx.measurements.rto.min(rto_max);
// E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet
// based on the MTU.
retransmit_immediate(self);
// Any remaining DATA chunks should be "marked for retransmission" and sent as soon
// as cwnd allows.
retransmit_all_except_first(self);
}
}
/// Immediately retransmit the earliest unacknowledged sent chunk. Ideally, we would see how many
/// of the earliest chunks could fit into a packet given the current MTU.
fn retransmit_immediate(association: &mut Association) {
// Retrieve the first unacknowledged chunk.
let rtx_chunk = association.data.sent_queue.front().map(|c| c.clone());
if let Some(rtx_chunk) = rtx_chunk {
// Re-transmit chunk
println!("re-sending chunk: {:?}", rtx_chunk);
association.send_chunk(Chunk::Data(rtx_chunk));
// E4) Restart timer
association.rtx.timer = Some(
association
.resources
.timer
.sleep(association.rtx.measurements.rto),
)
}
}
/// "Mark" a range of unacknowledged packets for retransmission.
fn retransmit_range(association: &mut Association, first: TSN, last: TSN) |
/// "Mark" all unacknowledged packets for retransmission.
#[allow(unused)]
fn retransmit_all(association: &mut Association) {
// Re-queue unacknowledged chunks
let bytes = association
.data
.sent_queue
.transfer_all(&mut association.data.send_queue);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission, except for the first. (Which was
/// presumably sent via retransmit_immediate().)
fn retransmit_all_except_first(association: &mut Association) {
if let Some(first) = association.data.sent_queue.front().map(|c| c.tsn) {
if let Some(last) = association.data.sent_queue.back().map(|c| c.tsn) {
if last > first {
retransmit_range(association, first + 1, last);
}
}
}
}
#[derive(Clone, Copy)]
struct SmoothingState {
srtt: Duration, // Smoothed round-trip time
rttvar: Duration, // Round-trip time variation
}
pub struct Measurements {
rtt_measurement: Option<(TSN, Instant)>, // An in-progress RTT measurement.
rtt_smoothing: Option<SmoothingState>,
rto: Duration,
}
/// Clock granularity in nanoseconds. Tokio-timer 0.1 has a granularity of 100ms, and tokio-timer
/// 0.2 has a granularity of 1ms.
/// TODO: Upgrade to tokio-timer 0.2!
const CLOCK_GRANULARITY_NS: u32 = 100_000_000; // 100ms
| {
// TODO: Don't retransmit chunks that were acknowledged in the gap-ack blocks of the most
// recent SACK.
// Re-queue unacknowledged chunks in the specified range.
let bytes =
association
.data
.sent_queue
.transfer_range(&mut association.data.send_queue, first, last);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
} | identifier_body |
retransmission.rs | water mark.
self.rtx.tx_high_water_mark = chunk_tsn;
}
// R1) On any transmission, start the rtx timer if it is not already running.
if self.rtx.timer.is_none() {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto))
}
}
fn on_cumulative_ack(
&mut self,
cumulative_tsn_ack: TSN,
earliest_outstanding_tsn: Option<TSN>,
) {
// Perform RTT measurements
self.rtx.measurements.on_cumulative_ack(cumulative_tsn_ack);
if self.data.sent_queue.is_empty() && self.rtx.timer.is_some() {
// R2) If all outstanding data has been acknowledged, then cancel the timer.
self.rtx.timer = None;
} else if let Some(earliest_outstanding_tsn) = earliest_outstanding_tsn {
// R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
if cumulative_tsn_ack >= earliest_outstanding_tsn {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto));
}
}
}
// TODO remove
fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// Perform RTT measurements
self.rtx.measurements.on_gap_ack_block(start, end);
}
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]) {
let mut tsn = cumulative_tsn_ack;
for block in gap_ack_blocks {
let ack_start = cumulative_tsn_ack + block.start as u32;
let ack_end = cumulative_tsn_ack + block.end as u32;
// Chunks in the TSN range [ack_start,ack_end] (inclusive) are assumed to
// have been received. However, the receiver has the option of discarding them and
// having us retransmit them, so they must stay in the sent queue until acknowledged
// via the cumulative TSN.
// Perform RTT measurements, if needed
self.rtx.measurements.on_gap_ack_block(ack_start, ack_end);
// This should always be true if the peer is constructing SACKs properly.
if ack_start > tsn + 1 {
let gap_start = tsn + 1;
let gap_end = ack_start - 1;
// This could just be a for loop, whenever std::iter::Step becomes stable.
let mut gap_tsn = gap_start;
loop {
// TODO: Mark this gap chunk for retransmission.
gap_tsn += 1;
if gap_tsn > gap_end {
break;
}
}
}
// TODO: Store received ranges, so we can know if the peer decides to drop them?
// (So we can implement R4.)
tsn = ack_end;
}
// R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged
// (e.g. it was dropped by the peer), then start the timer.
//
// TODO
}
fn on_timeout(&mut self) {
// E1) Update congestion control as needed.
// - adjust ssthresh according to Section 7.2.3
// - set cwnd to the MTU
// TODO
// E2) Double RTO up to RTO.Max to provide back-off.
self.rtx.measurements.rto *= 2;
let rto_max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
self.rtx.measurements.rto = self.rtx.measurements.rto.min(rto_max);
// E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet
// based on the MTU.
retransmit_immediate(self);
// Any remaining DATA chunks should be "marked for retransmission" and sent as soon
// as cwnd allows.
retransmit_all_except_first(self);
}
}
/// Immediately retransmit the earliest unacknowledged sent chunk. Ideally, we would see how many
/// of the earliest chunks could fit into a packet given the current MTU.
fn retransmit_immediate(association: &mut Association) {
// Retrieve the first unacknowledged chunk.
let rtx_chunk = association.data.sent_queue.front().map(|c| c.clone());
if let Some(rtx_chunk) = rtx_chunk {
// Re-transmit chunk
println!("re-sending chunk: {:?}", rtx_chunk);
association.send_chunk(Chunk::Data(rtx_chunk));
// E4) Restart timer
association.rtx.timer = Some(
association
.resources
.timer
.sleep(association.rtx.measurements.rto),
)
}
}
/// "Mark" a range of unacknowledged packets for retransmission.
fn retransmit_range(association: &mut Association, first: TSN, last: TSN) {
// TODO: Don't retransmit chunks that were acknowledged in the gap-ack blocks of the most
// recent SACK.
// Re-queue unacknowledged chunks in the specified range.
let bytes =
association
.data
.sent_queue
.transfer_range(&mut association.data.send_queue, first, last);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission.
#[allow(unused)]
fn retransmit_all(association: &mut Association) {
// Re-queue unacknowledged chunks
let bytes = association
.data
.sent_queue
.transfer_all(&mut association.data.send_queue);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission, except for the first. (Which was
/// presumably sent via retransmit_immediate().)
fn retransmit_all_except_first(association: &mut Association) {
if let Some(first) = association.data.sent_queue.front().map(|c| c.tsn) {
if let Some(last) = association.data.sent_queue.back().map(|c| c.tsn) {
if last > first {
retransmit_range(association, first + 1, last);
}
}
}
}
#[derive(Clone, Copy)]
struct SmoothingState {
srtt: Duration, // Smoothed round-trip time
rttvar: Duration, // Round-trip time variation
}
pub struct Measurements {
rtt_measurement: Option<(TSN, Instant)>, // An in-progress RTT measurement.
rtt_smoothing: Option<SmoothingState>,
rto: Duration,
}
/// Clock granularity in nanoseconds. Tokio-timer 0.1 has a granularity of 100ms, and tokio-timer
/// 0.2 has a granularity of 1ms.
/// TODO: Upgrade to tokio-timer 0.2!
const CLOCK_GRANULARITY_NS: u32 = 100_000_000; // 100ms
impl Measurements {
pub fn new() -> Measurements {
Measurements {
rtt_measurement: None,
rtt_smoothing: None,
rto: Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_initial),
}
}
/// This should be called for each fresh outgoing chunk (not on retransmissions), so we can
/// decide whether to start a new RTT measurement or not.
pub fn on_outgoing_chunk(&mut self, chunk_tsn: TSN) {
// Start a RTT measurement if one is not already in progress.
if self.rtt_measurement.is_none() {
self.rtt_measurement = Some((chunk_tsn, Instant::now()));
}
}
/// This should be called for each received SACK, so the Measurements can conclude an RTT
/// measurement, if needed.
pub fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn <= cumulative_tsn_ack {
self.complete_rtt_measurement();
}
}
}
/// This should be called for each gap ack block in each received SACK, so the Measurements
/// can conclude an RTT measurement, if needed.
pub fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn >= start && rtt_tsn <= end {
self.complete_rtt_measurement();
}
}
}
/// Conclude the current RTT measurement and adjust SRTT (smoothed RTT), RTTVAR (RTT variance),
/// and RTO (retransmission timeout) accordingly.
fn | complete_rtt_measurement | identifier_name | |
file.rs | ={:?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate | : UserInPtr<u8>, len: usize) -> SysResult {
let path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// cause the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if !in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if !out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64;
let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if !in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if !out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1) != 0 {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = proc | (&self, path | identifier_name |
file.rs | ={:?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate(&self, path: UserInPtr<u8>, len: usize) -> SysResult {
let path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// cause the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if !in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if !out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64; | let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if !in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if !out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1) != 0 {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = proc | random_line_split | |
file.rs | :?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate(&self, path: UserInPtr<u8>, len: usize) -> SysResult {
le | use the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if !in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if !out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64;
let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if !in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if !out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1) != 0 {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = | t path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// ca | identifier_body |
file.rs | :?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate(&self, path: UserInPtr<u8>, len: usize) -> SysResult {
let path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// cause the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if !in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if !out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64;
let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if !in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if !out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1) != 0 {
| flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = | flags |= OpenFlags::CLOEXEC;
} else {
| conditional_block |
cluster.go | Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
// Username for http basic authentication
Username string `json:"username,omitempty" yaml:"username,omitempty"`
// Password for http basic authentication
Password string `json:"password,omitempty" yaml:"password,omitempty"`
// Root CaCertificate for API server(base64 encoded)
RootCACert string `json:"rootCACert,omitempty" yaml:"root_ca_cert,omitempty"`
// Client Certificate(base64 encoded)
ClientCertificate string `json:"clientCertificate,omitempty" yaml:"client_certificate,omitempty"`
// Client private key(base64 encoded)
ClientKey string `json:"clientKey,omitempty" yaml:"client_key,omitempty"`
// Node count in the cluster
NodeCount int64 `json:"nodeCount,omitempty" yaml:"node_count,omitempty"`
// Metadata store specific driver options per cloud provider
Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PersistStore PersistentStore `json:"-" yaml:"-"`
ConfigGetter ConfigGetter `json:"-" yaml:"-"`
Logger logstream.Logger `json:"-" yaml:"-"`
}
// PersistentStore defines the interface for persist options like check and store
type PersistentStore interface {
GetStatus(name string) (string, error)
Get(name string) (Cluster, error)
Remove(name string) error
Store(cluster Cluster) error
PersistStatus(cluster Cluster, status string) error
}
// ConfigGetter defines the interface for getting the driver options.
type ConfigGetter interface {
GetConfig() (types.DriverOptions, error)
}
// Create creates a cluster
func (c *Cluster) Create(ctx context.Context) error {
if c.RootCACert != "" && c.Status == "" {
c.PersistStore.PersistStatus(*c, Init)
}
err := c.createInner(ctx)
if err != nil {
if err == ErrClusterExists {
c.PersistStore.PersistStatus(*c, Running)
} else {
c.PersistStore.PersistStatus(*c, Error)
}
return err
}
return c.PersistStore.PersistStatus(*c, Running)
}
func (c *Cluster) create(ctx context.Context, clusterInfo *types.ClusterInfo) error {
if c.Status == PostCheck {
return nil
}
if err := c.PersistStore.PersistStatus(*c, PreCreating); err != nil {
return err
}
// get cluster config from cli flags or json config
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
// also set metadata value to retrieve the cluster info
for k, v := range c.Metadata {
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Creating); err != nil {
return err
}
// create cluster
info, err := c.Driver.Create(ctx, &driverOpts, clusterInfo)
if info != nil {
transformClusterInfo(c, info)
}
return err
}
func (c *Cluster) PostCheck(ctx context.Context) error {
if err := c.PersistStore.PersistStatus(*c, PostCheck); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) GenerateServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) RemoveLegacyServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
return c.Driver.RemoveLegacyServiceAccount(ctx, toInfo(c))
}
func (c *Cluster) createInner(ctx context.Context) error {
// check if it is already created
c.restore()
var info *types.ClusterInfo
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
info = toInfo(c)
}
if c.Status == Updating || c.Status == Running || c.Status == PostCheck || c.Status == Init {
logrus.Infof("Cluster %s already exists.", c.Name)
return ErrClusterExists
}
if err := c.create(ctx, info); err != nil {
return err
}
return c.PostCheck(ctx)
}
// Update updates a cluster
func (c *Cluster) Update(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
return c.Create(ctx)
}
if c.Status == PreCreating || c.Status == Creating {
logrus.Errorf("Cluster %s has not been created.", c.Name)
return fmt.Errorf("cluster %s has not been created", c.Name)
}
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
driverOpts.StringOptions["name"] = c.Name
for k, v := range c.Metadata {
if k == "state" {
state := make(map[string]interface{})
if err := json.Unmarshal([]byte(v), &state); err == nil {
flattenIfNotExist(state, &driverOpts)
}
continue
}
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Updating); err != nil {
return err
}
info := toInfo(c)
info, err = c.Driver.Update(ctx, info, &driverOpts)
if err != nil {
return err
}
transformClusterInfo(c, info)
return c.PostCheck(ctx)
}
func (c *Cluster) GetVersion(ctx context.Context) (*types.KubernetesVersion, error) {
return c.Driver.GetVersion(ctx, toInfo(c))
}
func (c *Cluster) SetVersion(ctx context.Context, version *types.KubernetesVersion) error {
return c.Driver.SetVersion(ctx, toInfo(c), version)
}
func (c *Cluster) GetClusterSize(ctx context.Context) (*types.NodeCount, error) {
return c.Driver.GetClusterSize(ctx, toInfo(c))
}
func (c *Cluster) SetClusterSize(ctx context.Context, count *types.NodeCount) error {
return c.Driver.SetClusterSize(ctx, toInfo(c), count)
}
func transformClusterInfo(c *Cluster, clusterInfo *types.ClusterInfo) {
c.ClientCertificate = clusterInfo.ClientCertificate
c.ClientKey = clusterInfo.ClientKey
c.RootCACert = clusterInfo.RootCaCertificate
c.Username = clusterInfo.Username
c.Password = clusterInfo.Password
c.Version = clusterInfo.Version
c.Endpoint = clusterInfo.Endpoint
c.NodeCount = clusterInfo.NodeCount
c.Metadata = clusterInfo.Metadata
c.ServiceAccountToken = clusterInfo.ServiceAccountToken
c.Status = clusterInfo.Status
}
func toInfo(c *Cluster) *types.ClusterInfo {
return &types.ClusterInfo{
ClientCertificate: c.ClientCertificate,
ClientKey: c.ClientKey, | RootCaCertificate: c.RootCACert,
Username: c.Username,
Password: c.Password,
Version: c.Version,
Endpoint: c.Endpoint,
NodeCount: c.NodeCount,
Metadata: c.Metadata,
ServiceAccountToken: c.ServiceAccountToken,
Status: c.Status,
}
}
// Remove removes a cluster
func (c *Cluster) Remove(ctx context.Context, forceRemove bool) error {
if err := c.restore(); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
if err := c.Driver.Remove(ctx, toInfo(c)); err != nil {
// Persist store removal must take place despite error to prevent cluster from being stuck in remove state
// TODO: We should add a "forceRemove" action to cluster and then revert this to return an error, so that
// the user can see the problem and take appropriate action
if !forceRemove {
return fmt.Errorf("Error removing cluster [%s] with driver [%s]: %v", c.Name, c.DriverName, err)
}
logrus.Errorf("Error removing cluster [%s] with driver [%s]. Check for stray resources on cloud provider: %v", c.Name, c.DriverName, err)
}
return c.PersistStore.Remove(c.Name)
}
func (c *Cluster) GetCapabilities(ctx context.Context) (*types.Capabilities, error) {
return c.Driver.GetCapabilities(ctx)
}
func (c *Cluster) GetK8SCapabilities(ctx context.Context) (*types.K8SCapabilities, error) {
options, err := c.ConfigGetter.GetConfig()
if err != nil {
return nil, err
}
return c.Driver.GetK8SCapabilities(ctx, &options)
}
func (c *Cluster) GetDriverCreateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverCreateOptions(ctx)
}
func (c *Cluster) GetDriverUpdateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverUpdateOptions(ctx)
}
// Store persists cluster information
func (c *Cluster) Store() error {
return c.PersistStore.Store(*c)
}
func (c *Cluster) restore() error {
cluster, err | random_line_split | |
cluster.go | Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
// Username for http basic authentication
Username string `json:"username,omitempty" yaml:"username,omitempty"`
// Password for http basic authentication
Password string `json:"password,omitempty" yaml:"password,omitempty"`
// Root CaCertificate for API server(base64 encoded)
RootCACert string `json:"rootCACert,omitempty" yaml:"root_ca_cert,omitempty"`
// Client Certificate(base64 encoded)
ClientCertificate string `json:"clientCertificate,omitempty" yaml:"client_certificate,omitempty"`
// Client private key(base64 encoded)
ClientKey string `json:"clientKey,omitempty" yaml:"client_key,omitempty"`
// Node count in the cluster
NodeCount int64 `json:"nodeCount,omitempty" yaml:"node_count,omitempty"`
// Metadata store specific driver options per cloud provider
Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PersistStore PersistentStore `json:"-" yaml:"-"`
ConfigGetter ConfigGetter `json:"-" yaml:"-"`
Logger logstream.Logger `json:"-" yaml:"-"`
}
// PersistentStore defines the interface for persist options like check and store
type PersistentStore interface {
GetStatus(name string) (string, error)
Get(name string) (Cluster, error)
Remove(name string) error
Store(cluster Cluster) error
PersistStatus(cluster Cluster, status string) error
}
// ConfigGetter defines the interface for getting the driver options.
type ConfigGetter interface {
GetConfig() (types.DriverOptions, error)
}
// Create creates a cluster
func (c *Cluster) Create(ctx context.Context) error {
if c.RootCACert != "" && c.Status == "" {
c.PersistStore.PersistStatus(*c, Init)
}
err := c.createInner(ctx)
if err != nil {
if err == ErrClusterExists {
c.PersistStore.PersistStatus(*c, Running)
} else {
c.PersistStore.PersistStatus(*c, Error)
}
return err
}
return c.PersistStore.PersistStatus(*c, Running)
}
func (c *Cluster) create(ctx context.Context, clusterInfo *types.ClusterInfo) error {
if c.Status == PostCheck {
return nil
}
if err := c.PersistStore.PersistStatus(*c, PreCreating); err != nil {
return err
}
// get cluster config from cli flags or json config
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
// also set metadata value to retrieve the cluster info
for k, v := range c.Metadata {
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Creating); err != nil {
return err
}
// create cluster
info, err := c.Driver.Create(ctx, &driverOpts, clusterInfo)
if info != nil {
transformClusterInfo(c, info)
}
return err
}
func (c *Cluster) PostCheck(ctx context.Context) error {
if err := c.PersistStore.PersistStatus(*c, PostCheck); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) GenerateServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) RemoveLegacyServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
return c.Driver.RemoveLegacyServiceAccount(ctx, toInfo(c))
}
func (c *Cluster) | (ctx context.Context) error {
// check if it is already created
c.restore()
var info *types.ClusterInfo
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
info = toInfo(c)
}
if c.Status == Updating || c.Status == Running || c.Status == PostCheck || c.Status == Init {
logrus.Infof("Cluster %s already exists.", c.Name)
return ErrClusterExists
}
if err := c.create(ctx, info); err != nil {
return err
}
return c.PostCheck(ctx)
}
// Update updates a cluster
func (c *Cluster) Update(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
return c.Create(ctx)
}
if c.Status == PreCreating || c.Status == Creating {
logrus.Errorf("Cluster %s has not been created.", c.Name)
return fmt.Errorf("cluster %s has not been created", c.Name)
}
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
driverOpts.StringOptions["name"] = c.Name
for k, v := range c.Metadata {
if k == "state" {
state := make(map[string]interface{})
if err := json.Unmarshal([]byte(v), &state); err == nil {
flattenIfNotExist(state, &driverOpts)
}
continue
}
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Updating); err != nil {
return err
}
info := toInfo(c)
info, err = c.Driver.Update(ctx, info, &driverOpts)
if err != nil {
return err
}
transformClusterInfo(c, info)
return c.PostCheck(ctx)
}
func (c *Cluster) GetVersion(ctx context.Context) (*types.KubernetesVersion, error) {
return c.Driver.GetVersion(ctx, toInfo(c))
}
func (c *Cluster) SetVersion(ctx context.Context, version *types.KubernetesVersion) error {
return c.Driver.SetVersion(ctx, toInfo(c), version)
}
func (c *Cluster) GetClusterSize(ctx context.Context) (*types.NodeCount, error) {
return c.Driver.GetClusterSize(ctx, toInfo(c))
}
func (c *Cluster) SetClusterSize(ctx context.Context, count *types.NodeCount) error {
return c.Driver.SetClusterSize(ctx, toInfo(c), count)
}
func transformClusterInfo(c *Cluster, clusterInfo *types.ClusterInfo) {
c.ClientCertificate = clusterInfo.ClientCertificate
c.ClientKey = clusterInfo.ClientKey
c.RootCACert = clusterInfo.RootCaCertificate
c.Username = clusterInfo.Username
c.Password = clusterInfo.Password
c.Version = clusterInfo.Version
c.Endpoint = clusterInfo.Endpoint
c.NodeCount = clusterInfo.NodeCount
c.Metadata = clusterInfo.Metadata
c.ServiceAccountToken = clusterInfo.ServiceAccountToken
c.Status = clusterInfo.Status
}
func toInfo(c *Cluster) *types.ClusterInfo {
return &types.ClusterInfo{
ClientCertificate: c.ClientCertificate,
ClientKey: c.ClientKey,
RootCaCertificate: c.RootCACert,
Username: c.Username,
Password: c.Password,
Version: c.Version,
Endpoint: c.Endpoint,
NodeCount: c.NodeCount,
Metadata: c.Metadata,
ServiceAccountToken: c.ServiceAccountToken,
Status: c.Status,
}
}
// Remove removes a cluster
func (c *Cluster) Remove(ctx context.Context, forceRemove bool) error {
if err := c.restore(); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
if err := c.Driver.Remove(ctx, toInfo(c)); err != nil {
// Persist store removal must take place despite error to prevent cluster from being stuck in remove state
// TODO: We should add a "forceRemove" action to cluster and then revert this to return an error, so that
// the user can see the problem and take appropriate action
if !forceRemove {
return fmt.Errorf("Error removing cluster [%s] with driver [%s]: %v", c.Name, c.DriverName, err)
}
logrus.Errorf("Error removing cluster [%s] with driver [%s]. Check for stray resources on cloud provider: %v", c.Name, c.DriverName, err)
}
return c.PersistStore.Remove(c.Name)
}
func (c *Cluster) GetCapabilities(ctx context.Context) (*types.Capabilities, error) {
return c.Driver.GetCapabilities(ctx)
}
func (c *Cluster) GetK8SCapabilities(ctx context.Context) (*types.K8SCapabilities, error) {
options, err := c.ConfigGetter.GetConfig()
if err != nil {
return nil, err
}
return c.Driver.GetK8SCapabilities(ctx, &options)
}
func (c *Cluster) GetDriverCreateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverCreateOptions(ctx)
}
func (c *Cluster) GetDriverUpdateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverUpdateOptions(ctx)
}
// Store persists cluster information
func (c *Cluster) Store() error {
return c.PersistStore.Store(*c)
}
func (c *Cluster) restore() error {
cluster, | createInner | identifier_name |
cluster.go | Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
// Username for http basic authentication
Username string `json:"username,omitempty" yaml:"username,omitempty"`
// Password for http basic authentication
Password string `json:"password,omitempty" yaml:"password,omitempty"`
// Root CaCertificate for API server(base64 encoded)
RootCACert string `json:"rootCACert,omitempty" yaml:"root_ca_cert,omitempty"`
// Client Certificate(base64 encoded)
ClientCertificate string `json:"clientCertificate,omitempty" yaml:"client_certificate,omitempty"`
// Client private key(base64 encoded)
ClientKey string `json:"clientKey,omitempty" yaml:"client_key,omitempty"`
// Node count in the cluster
NodeCount int64 `json:"nodeCount,omitempty" yaml:"node_count,omitempty"`
// Metadata store specific driver options per cloud provider
Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PersistStore PersistentStore `json:"-" yaml:"-"`
ConfigGetter ConfigGetter `json:"-" yaml:"-"`
Logger logstream.Logger `json:"-" yaml:"-"`
}
// PersistentStore defines the interface for persist options like check and store
type PersistentStore interface {
GetStatus(name string) (string, error)
Get(name string) (Cluster, error)
Remove(name string) error
Store(cluster Cluster) error
PersistStatus(cluster Cluster, status string) error
}
// ConfigGetter defines the interface for getting the driver options.
type ConfigGetter interface {
GetConfig() (types.DriverOptions, error)
}
// Create creates a cluster
func (c *Cluster) Create(ctx context.Context) error {
if c.RootCACert != "" && c.Status == "" {
c.PersistStore.PersistStatus(*c, Init)
}
err := c.createInner(ctx)
if err != nil {
if err == ErrClusterExists {
c.PersistStore.PersistStatus(*c, Running)
} else {
c.PersistStore.PersistStatus(*c, Error)
}
return err
}
return c.PersistStore.PersistStatus(*c, Running)
}
func (c *Cluster) create(ctx context.Context, clusterInfo *types.ClusterInfo) error {
if c.Status == PostCheck {
return nil
}
if err := c.PersistStore.PersistStatus(*c, PreCreating); err != nil {
return err
}
// get cluster config from cli flags or json config
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
// also set metadata value to retrieve the cluster info
for k, v := range c.Metadata {
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Creating); err != nil {
return err
}
// create cluster
info, err := c.Driver.Create(ctx, &driverOpts, clusterInfo)
if info != nil {
transformClusterInfo(c, info)
}
return err
}
func (c *Cluster) PostCheck(ctx context.Context) error {
if err := c.PersistStore.PersistStatus(*c, PostCheck); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) GenerateServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) RemoveLegacyServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
return c.Driver.RemoveLegacyServiceAccount(ctx, toInfo(c))
}
func (c *Cluster) createInner(ctx context.Context) error | }
// Update updates a cluster
func (c *Cluster) Update(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
return c.Create(ctx)
}
if c.Status == PreCreating || c.Status == Creating {
logrus.Errorf("Cluster %s has not been created.", c.Name)
return fmt.Errorf("cluster %s has not been created", c.Name)
}
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
driverOpts.StringOptions["name"] = c.Name
for k, v := range c.Metadata {
if k == "state" {
state := make(map[string]interface{})
if err := json.Unmarshal([]byte(v), &state); err == nil {
flattenIfNotExist(state, &driverOpts)
}
continue
}
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Updating); err != nil {
return err
}
info := toInfo(c)
info, err = c.Driver.Update(ctx, info, &driverOpts)
if err != nil {
return err
}
transformClusterInfo(c, info)
return c.PostCheck(ctx)
}
func (c *Cluster) GetVersion(ctx context.Context) (*types.KubernetesVersion, error) {
return c.Driver.GetVersion(ctx, toInfo(c))
}
func (c *Cluster) SetVersion(ctx context.Context, version *types.KubernetesVersion) error {
return c.Driver.SetVersion(ctx, toInfo(c), version)
}
func (c *Cluster) GetClusterSize(ctx context.Context) (*types.NodeCount, error) {
return c.Driver.GetClusterSize(ctx, toInfo(c))
}
func (c *Cluster) SetClusterSize(ctx context.Context, count *types.NodeCount) error {
return c.Driver.SetClusterSize(ctx, toInfo(c), count)
}
func transformClusterInfo(c *Cluster, clusterInfo *types.ClusterInfo) {
c.ClientCertificate = clusterInfo.ClientCertificate
c.ClientKey = clusterInfo.ClientKey
c.RootCACert = clusterInfo.RootCaCertificate
c.Username = clusterInfo.Username
c.Password = clusterInfo.Password
c.Version = clusterInfo.Version
c.Endpoint = clusterInfo.Endpoint
c.NodeCount = clusterInfo.NodeCount
c.Metadata = clusterInfo.Metadata
c.ServiceAccountToken = clusterInfo.ServiceAccountToken
c.Status = clusterInfo.Status
}
func toInfo(c *Cluster) *types.ClusterInfo {
return &types.ClusterInfo{
ClientCertificate: c.ClientCertificate,
ClientKey: c.ClientKey,
RootCaCertificate: c.RootCACert,
Username: c.Username,
Password: c.Password,
Version: c.Version,
Endpoint: c.Endpoint,
NodeCount: c.NodeCount,
Metadata: c.Metadata,
ServiceAccountToken: c.ServiceAccountToken,
Status: c.Status,
}
}
// Remove removes a cluster
func (c *Cluster) Remove(ctx context.Context, forceRemove bool) error {
if err := c.restore(); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
if err := c.Driver.Remove(ctx, toInfo(c)); err != nil {
// Persist store removal must take place despite error to prevent cluster from being stuck in remove state
// TODO: We should add a "forceRemove" action to cluster and then revert this to return an error, so that
// the user can see the problem and take appropriate action
if !forceRemove {
return fmt.Errorf("Error removing cluster [%s] with driver [%s]: %v", c.Name, c.DriverName, err)
}
logrus.Errorf("Error removing cluster [%s] with driver [%s]. Check for stray resources on cloud provider: %v", c.Name, c.DriverName, err)
}
return c.PersistStore.Remove(c.Name)
}
func (c *Cluster) GetCapabilities(ctx context.Context) (*types.Capabilities, error) {
return c.Driver.GetCapabilities(ctx)
}
func (c *Cluster) GetK8SCapabilities(ctx context.Context) (*types.K8SCapabilities, error) {
options, err := c.ConfigGetter.GetConfig()
if err != nil {
return nil, err
}
return c.Driver.GetK8SCapabilities(ctx, &options)
}
func (c *Cluster) GetDriverCreateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverCreateOptions(ctx)
}
func (c *Cluster) GetDriverUpdateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverUpdateOptions(ctx)
}
// Store persists cluster information
func (c *Cluster) Store() error {
return c.PersistStore.Store(*c)
}
func (c *Cluster) restore() error {
cluster, | {
// check if it is already created
c.restore()
var info *types.ClusterInfo
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
info = toInfo(c)
}
if c.Status == Updating || c.Status == Running || c.Status == PostCheck || c.Status == Init {
logrus.Infof("Cluster %s already exists.", c.Name)
return ErrClusterExists
}
if err := c.create(ctx, info); err != nil {
return err
}
return c.PostCheck(ctx) | identifier_body |
cluster.go | Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
// Username for http basic authentication
Username string `json:"username,omitempty" yaml:"username,omitempty"`
// Password for http basic authentication
Password string `json:"password,omitempty" yaml:"password,omitempty"`
// Root CaCertificate for API server(base64 encoded)
RootCACert string `json:"rootCACert,omitempty" yaml:"root_ca_cert,omitempty"`
// Client Certificate(base64 encoded)
ClientCertificate string `json:"clientCertificate,omitempty" yaml:"client_certificate,omitempty"`
// Client private key(base64 encoded)
ClientKey string `json:"clientKey,omitempty" yaml:"client_key,omitempty"`
// Node count in the cluster
NodeCount int64 `json:"nodeCount,omitempty" yaml:"node_count,omitempty"`
// Metadata store specific driver options per cloud provider
Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"`
PersistStore PersistentStore `json:"-" yaml:"-"`
ConfigGetter ConfigGetter `json:"-" yaml:"-"`
Logger logstream.Logger `json:"-" yaml:"-"`
}
// PersistentStore defines the interface for persist options like check and store
type PersistentStore interface {
GetStatus(name string) (string, error)
Get(name string) (Cluster, error)
Remove(name string) error
Store(cluster Cluster) error
PersistStatus(cluster Cluster, status string) error
}
// ConfigGetter defines the interface for getting the driver options.
type ConfigGetter interface {
GetConfig() (types.DriverOptions, error)
}
// Create creates a cluster
func (c *Cluster) Create(ctx context.Context) error {
if c.RootCACert != "" && c.Status == "" {
c.PersistStore.PersistStatus(*c, Init)
}
err := c.createInner(ctx)
if err != nil {
if err == ErrClusterExists {
c.PersistStore.PersistStatus(*c, Running)
} else {
c.PersistStore.PersistStatus(*c, Error)
}
return err
}
return c.PersistStore.PersistStatus(*c, Running)
}
func (c *Cluster) create(ctx context.Context, clusterInfo *types.ClusterInfo) error {
if c.Status == PostCheck {
return nil
}
if err := c.PersistStore.PersistStatus(*c, PreCreating); err != nil {
return err
}
// get cluster config from cli flags or json config
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
// also set metadata value to retrieve the cluster info
for k, v := range c.Metadata {
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Creating); err != nil {
return err
}
// create cluster
info, err := c.Driver.Create(ctx, &driverOpts, clusterInfo)
if info != nil {
transformClusterInfo(c, info)
}
return err
}
func (c *Cluster) PostCheck(ctx context.Context) error {
if err := c.PersistStore.PersistStatus(*c, PostCheck); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) GenerateServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
// receive cluster info back
info, err := c.Driver.PostCheck(ctx, toInfo(c))
if err != nil {
return err
}
transformClusterInfo(c, info)
// persist cluster info
return c.Store()
}
func (c *Cluster) RemoveLegacyServiceAccount(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
return c.Driver.RemoveLegacyServiceAccount(ctx, toInfo(c))
}
func (c *Cluster) createInner(ctx context.Context) error {
// check if it is already created
c.restore()
var info *types.ClusterInfo
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
info = toInfo(c)
}
if c.Status == Updating || c.Status == Running || c.Status == PostCheck || c.Status == Init {
logrus.Infof("Cluster %s already exists.", c.Name)
return ErrClusterExists
}
if err := c.create(ctx, info); err != nil {
return err
}
return c.PostCheck(ctx)
}
// Update updates a cluster
func (c *Cluster) Update(ctx context.Context) error {
if err := c.restore(); err != nil {
return err
}
if c.Status == Error {
logrus.Errorf("Cluster %s previously failed to create", c.Name)
return c.Create(ctx)
}
if c.Status == PreCreating || c.Status == Creating {
logrus.Errorf("Cluster %s has not been created.", c.Name)
return fmt.Errorf("cluster %s has not been created", c.Name)
}
driverOpts, err := c.ConfigGetter.GetConfig()
if err != nil {
return err
}
driverOpts.StringOptions["name"] = c.Name
for k, v := range c.Metadata {
if k == "state" {
state := make(map[string]interface{})
if err := json.Unmarshal([]byte(v), &state); err == nil {
flattenIfNotExist(state, &driverOpts)
}
continue
}
driverOpts.StringOptions[k] = v
}
if err := c.PersistStore.PersistStatus(*c, Updating); err != nil {
return err
}
info := toInfo(c)
info, err = c.Driver.Update(ctx, info, &driverOpts)
if err != nil |
transformClusterInfo(c, info)
return c.PostCheck(ctx)
}
func (c *Cluster) GetVersion(ctx context.Context) (*types.KubernetesVersion, error) {
return c.Driver.GetVersion(ctx, toInfo(c))
}
func (c *Cluster) SetVersion(ctx context.Context, version *types.KubernetesVersion) error {
return c.Driver.SetVersion(ctx, toInfo(c), version)
}
func (c *Cluster) GetClusterSize(ctx context.Context) (*types.NodeCount, error) {
return c.Driver.GetClusterSize(ctx, toInfo(c))
}
func (c *Cluster) SetClusterSize(ctx context.Context, count *types.NodeCount) error {
return c.Driver.SetClusterSize(ctx, toInfo(c), count)
}
func transformClusterInfo(c *Cluster, clusterInfo *types.ClusterInfo) {
c.ClientCertificate = clusterInfo.ClientCertificate
c.ClientKey = clusterInfo.ClientKey
c.RootCACert = clusterInfo.RootCaCertificate
c.Username = clusterInfo.Username
c.Password = clusterInfo.Password
c.Version = clusterInfo.Version
c.Endpoint = clusterInfo.Endpoint
c.NodeCount = clusterInfo.NodeCount
c.Metadata = clusterInfo.Metadata
c.ServiceAccountToken = clusterInfo.ServiceAccountToken
c.Status = clusterInfo.Status
}
func toInfo(c *Cluster) *types.ClusterInfo {
return &types.ClusterInfo{
ClientCertificate: c.ClientCertificate,
ClientKey: c.ClientKey,
RootCaCertificate: c.RootCACert,
Username: c.Username,
Password: c.Password,
Version: c.Version,
Endpoint: c.Endpoint,
NodeCount: c.NodeCount,
Metadata: c.Metadata,
ServiceAccountToken: c.ServiceAccountToken,
Status: c.Status,
}
}
// Remove removes a cluster
func (c *Cluster) Remove(ctx context.Context, forceRemove bool) error {
if err := c.restore(); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
if err := c.Driver.Remove(ctx, toInfo(c)); err != nil {
// Persist store removal must take place despite error to prevent cluster from being stuck in remove state
// TODO: We should add a "forceRemove" action to cluster and then revert this to return an error, so that
// the user can see the problem and take appropriate action
if !forceRemove {
return fmt.Errorf("Error removing cluster [%s] with driver [%s]: %v", c.Name, c.DriverName, err)
}
logrus.Errorf("Error removing cluster [%s] with driver [%s]. Check for stray resources on cloud provider: %v", c.Name, c.DriverName, err)
}
return c.PersistStore.Remove(c.Name)
}
func (c *Cluster) GetCapabilities(ctx context.Context) (*types.Capabilities, error) {
return c.Driver.GetCapabilities(ctx)
}
func (c *Cluster) GetK8SCapabilities(ctx context.Context) (*types.K8SCapabilities, error) {
options, err := c.ConfigGetter.GetConfig()
if err != nil {
return nil, err
}
return c.Driver.GetK8SCapabilities(ctx, &options)
}
func (c *Cluster) GetDriverCreateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverCreateOptions(ctx)
}
func (c *Cluster) GetDriverUpdateOptions(ctx context.Context) (*types.DriverFlags, error) {
return c.Driver.GetDriverUpdateOptions(ctx)
}
// Store persists cluster information
func (c *Cluster) Store() error {
return c.PersistStore.Store(*c)
}
func (c *Cluster) restore() error {
cluster, | {
return err
} | conditional_block |
http.class.js | _OK: 200,
HTTP_CREATED: 201,
HTTP_ACCEPTED: 202,
HTTP_NON_AUTHORITATIVE_INFORMATION: 203,
HTTP_NO_CONTENT: 204,
HTTP_RESET_CONTENT: 205,
HTTP_PARTIAL_CONTENT: 206,
HTTP_MULTI_STATUS: 207, // RFC4918
HTTP_ALREADY_REPORTED: 208, // RFC5842
HTTP_IM_USED: 226, // RFC3229
HTTP_MULTIPLE_CHOICES: 300,
HTTP_MOVED_PERMANENTLY: 301,
HTTP_FOUND: 302,
HTTP_SEE_OTHER: 303,
HTTP_NOT_MODIFIED: 304,
HTTP_USE_PROXY: 305,
HTTP_RESERVED: 306,
HTTP_TEMPORARY_REDIRECT: 307,
HTTP_PERMANENTLY_REDIRECT: 308, // RFC7238
HTTP_BAD_REQUEST: 400,
HTTP_UNAUTHORIZED: 401,
HTTP_PAYMENT_REQUIRED: 402,
HTTP_FORBIDDEN: 403,
HTTP_NOT_FOUND: 404,
HTTP_METHOD_NOT_ALLOWED: 405,
HTTP_NOT_ACCEPTABLE: 406,
HTTP_PROXY_AUTHENTICATION_REQUIRED: 407,
HTTP_REQUEST_TIMEOUT: 408,
HTTP_CONFLICT: 409,
HTTP_GONE: 410,
HTTP_LENGTH_REQUIRED: 411,
HTTP_PRECONDITION_FAILED: 412,
HTTP_REQUEST_ENTITY_TOO_LARGE: 413,
HTTP_REQUEST_URI_TOO_LONG: 414,
HTTP_UNSUPPORTED_MEDIA_TYPE: 415,
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE: 416,
HTTP_EXPECTATION_FAILED: 417,
HTTP_I_AM_A_TEAPOT: 418, // RFC2324
HTTP_UNPROCESSABLE_ENTITY: 422, // RFC4918
HTTP_LOCKED: 423, // RFC4918
HTTP_FAILED_DEPENDENCY: 424, // RFC4918
HTTP_RESERVED_FOR_WEBDAV_ADVANCED_COLLECTIONS_EXPIRED_PROPOSAL: 425, // RFC2817
HTTP_UPGRADE_REQUIRED: 426, // RFC2817
HTTP_PRECONDITION_REQUIRED: 428, // RFC6585
HTTP_TOO_MANY_REQUESTS: 429, // RFC6585
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE: 431, // RFC6585
HTTP_INTERNAL_SERVER_ERROR: 500,
HTTP_NOT_IMPLEMENTED: 501,
HTTP_BAD_GATEWAY: 502,
HTTP_SERVICE_UNAVAILABLE: 503,
HTTP_GATEWAY_TIMEOUT: 504,
HTTP_VERSION_NOT_SUPPORTED: 505,
HTTP_VARIANT_ALSO_NEGOTIATES_EXPERIMENTAL: 506, // RFC2295
HTTP_INSUFFICIENT_STORAGE: 507, // RFC4918
HTTP_LOOP_DETECTED: 508, // RFC5842
HTTP_NOT_EXTENDED: 510, // RFC2774
HTTP_NETWORK_AUTHENTICATION_REQUIRED: 511
};
}
static httpCodes() {
return {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
}
static mimeTypes() {
return {
txt: 'text/plain',
htm: 'text/html',
html: 'text/html',
php: 'text/html',
css: 'text/css',
js: 'application/javascript',
json: 'application/json',
xml: 'application/xml',
swf: 'application/x-shockwave-flash',
flv: 'video/x-flv',
// images
png: 'image/png',
jpe: 'image/jpeg',
jpeg: 'image/jpeg',
jpg: 'image/jpeg',
gif: 'image/gif',
bmp: 'image/bmp',
ico: 'image/vnd.microsoft.icon',
tiff: 'image/tiff',
tif: 'image/tiff',
svg: 'image/svg+xml',
svgz: 'image/svg+xml',
// archives
zip: 'application/zip',
rar: 'application/x-rar-compressed',
exe: 'application/x-msdownload',
msi: 'application/x-msdownload',
cab: 'application/vnd.ms-cab-compressed',
// audio/video
mp3: 'audio/mpeg',
qt: 'video/quicktime',
mov: 'video/quicktime',
// adobe
pdf: 'application/pdf',
psd: 'image/vnd.adobe.photoshop',
ai: 'application/postscript',
eps: 'application/postscript',
ps: 'application/postscript',
// ms office
doc: 'application/msword',
rtf: 'application/rtf',
xls: 'application/vnd.ms-excel',
ppt: 'application/vnd.ms-powerpoint',
// open office
odt: 'application/vnd.oasis.opendocument.text',
ods: 'application/vnd.oasis.opendocument.spreadsheet'
}
}
static getHttpCode(code) {
return HttpBox.httpCodes[code]
}
static getMimeType(type) {
return HttpBox.mimeTypes[type]
}
/**
* Get UserAgent info for client
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
static getUserAgentForClient() {
return useragent.parse(navigator.userAgent);
}
getMethod() |
getHeaders() {
return this.request.headers
}
isGet() {
return (this.request.method === 'GET')
}
isPost() {
return (this.request.method === 'POST')
}
isPut() {
return (this.request.method === 'PUT')
}
isDelete() {
return (this.request.method === 'DELETE')
}
isJson() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/json');
}
isXml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/xml');
}
isHtml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'text/html');
}
isForm() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/x-www-form-urlencoded');
}
| {
return this.request.method
} | identifier_body |
http.class.js | HTTP_OK: 200,
HTTP_CREATED: 201,
HTTP_ACCEPTED: 202,
HTTP_NON_AUTHORITATIVE_INFORMATION: 203,
HTTP_NO_CONTENT: 204,
HTTP_RESET_CONTENT: 205,
HTTP_PARTIAL_CONTENT: 206,
HTTP_MULTI_STATUS: 207, // RFC4918
HTTP_ALREADY_REPORTED: 208, // RFC5842
HTTP_IM_USED: 226, // RFC3229
HTTP_MULTIPLE_CHOICES: 300,
HTTP_MOVED_PERMANENTLY: 301,
HTTP_FOUND: 302,
HTTP_SEE_OTHER: 303,
HTTP_NOT_MODIFIED: 304,
HTTP_USE_PROXY: 305,
HTTP_RESERVED: 306,
HTTP_TEMPORARY_REDIRECT: 307,
HTTP_PERMANENTLY_REDIRECT: 308, // RFC7238
HTTP_BAD_REQUEST: 400,
HTTP_UNAUTHORIZED: 401,
HTTP_PAYMENT_REQUIRED: 402,
HTTP_FORBIDDEN: 403,
HTTP_NOT_FOUND: 404,
HTTP_METHOD_NOT_ALLOWED: 405,
HTTP_NOT_ACCEPTABLE: 406,
HTTP_PROXY_AUTHENTICATION_REQUIRED: 407,
HTTP_REQUEST_TIMEOUT: 408,
HTTP_CONFLICT: 409,
HTTP_GONE: 410,
HTTP_LENGTH_REQUIRED: 411,
HTTP_PRECONDITION_FAILED: 412,
HTTP_REQUEST_ENTITY_TOO_LARGE: 413,
HTTP_REQUEST_URI_TOO_LONG: 414,
HTTP_UNSUPPORTED_MEDIA_TYPE: 415,
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE: 416,
HTTP_EXPECTATION_FAILED: 417,
HTTP_I_AM_A_TEAPOT: 418, // RFC2324
HTTP_UNPROCESSABLE_ENTITY: 422, // RFC4918
HTTP_LOCKED: 423, // RFC4918
HTTP_FAILED_DEPENDENCY: 424, // RFC4918
HTTP_RESERVED_FOR_WEBDAV_ADVANCED_COLLECTIONS_EXPIRED_PROPOSAL: 425, // RFC2817
HTTP_UPGRADE_REQUIRED: 426, // RFC2817
HTTP_PRECONDITION_REQUIRED: 428, // RFC6585
HTTP_TOO_MANY_REQUESTS: 429, // RFC6585
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE: 431, // RFC6585
HTTP_INTERNAL_SERVER_ERROR: 500,
HTTP_NOT_IMPLEMENTED: 501,
HTTP_BAD_GATEWAY: 502,
HTTP_SERVICE_UNAVAILABLE: 503,
HTTP_GATEWAY_TIMEOUT: 504,
HTTP_VERSION_NOT_SUPPORTED: 505,
HTTP_VARIANT_ALSO_NEGOTIATES_EXPERIMENTAL: 506, // RFC2295
HTTP_INSUFFICIENT_STORAGE: 507, // RFC4918
HTTP_LOOP_DETECTED: 508, // RFC5842
HTTP_NOT_EXTENDED: 510, // RFC2774
HTTP_NETWORK_AUTHENTICATION_REQUIRED: 511
};
}
static httpCodes() {
return {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
}
static mimeTypes() {
return {
txt: 'text/plain',
htm: 'text/html',
html: 'text/html',
php: 'text/html',
css: 'text/css',
js: 'application/javascript',
json: 'application/json',
xml: 'application/xml',
swf: 'application/x-shockwave-flash',
flv: 'video/x-flv',
// images
png: 'image/png',
jpe: 'image/jpeg',
jpeg: 'image/jpeg',
jpg: 'image/jpeg',
gif: 'image/gif',
bmp: 'image/bmp',
ico: 'image/vnd.microsoft.icon',
tiff: 'image/tiff',
tif: 'image/tiff',
svg: 'image/svg+xml',
svgz: 'image/svg+xml',
// archives
zip: 'application/zip',
rar: 'application/x-rar-compressed',
exe: 'application/x-msdownload',
msi: 'application/x-msdownload',
cab: 'application/vnd.ms-cab-compressed',
// audio/video
mp3: 'audio/mpeg',
qt: 'video/quicktime',
mov: 'video/quicktime',
// adobe
pdf: 'application/pdf',
psd: 'image/vnd.adobe.photoshop',
ai: 'application/postscript',
eps: 'application/postscript',
ps: 'application/postscript',
// ms office
doc: 'application/msword',
rtf: 'application/rtf',
xls: 'application/vnd.ms-excel',
ppt: 'application/vnd.ms-powerpoint',
// open office
odt: 'application/vnd.oasis.opendocument.text',
ods: 'application/vnd.oasis.opendocument.spreadsheet'
}
}
static getHttpCode(code) {
return HttpBox.httpCodes[code]
}
static getMimeType(type) {
return HttpBox.mimeTypes[type]
}
/**
* Get UserAgent info for client
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
static getUserAgentForClient() {
return useragent.parse(navigator.userAgent);
}
getMethod() {
return this.request.method
}
getHeaders() {
return this.request.headers
}
isGet() {
return (this.request.method === 'GET')
}
isPost() { | return (this.request.method === 'PUT')
}
isDelete() {
return (this.request.method === 'DELETE')
}
isJson() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/json');
}
isXml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/xml');
}
isHtml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'text/html');
}
isForm() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/x-www-form-urlencoded');
}
is | return (this.request.method === 'POST')
}
isPut() { | random_line_split |
http.class.js | HTTP_UNPROCESSABLE_ENTITY: 422, // RFC4918
HTTP_LOCKED: 423, // RFC4918
HTTP_FAILED_DEPENDENCY: 424, // RFC4918
HTTP_RESERVED_FOR_WEBDAV_ADVANCED_COLLECTIONS_EXPIRED_PROPOSAL: 425, // RFC2817
HTTP_UPGRADE_REQUIRED: 426, // RFC2817
HTTP_PRECONDITION_REQUIRED: 428, // RFC6585
HTTP_TOO_MANY_REQUESTS: 429, // RFC6585
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE: 431, // RFC6585
HTTP_INTERNAL_SERVER_ERROR: 500,
HTTP_NOT_IMPLEMENTED: 501,
HTTP_BAD_GATEWAY: 502,
HTTP_SERVICE_UNAVAILABLE: 503,
HTTP_GATEWAY_TIMEOUT: 504,
HTTP_VERSION_NOT_SUPPORTED: 505,
HTTP_VARIANT_ALSO_NEGOTIATES_EXPERIMENTAL: 506, // RFC2295
HTTP_INSUFFICIENT_STORAGE: 507, // RFC4918
HTTP_LOOP_DETECTED: 508, // RFC5842
HTTP_NOT_EXTENDED: 510, // RFC2774
HTTP_NETWORK_AUTHENTICATION_REQUIRED: 511
};
}
static httpCodes() {
return {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
}
static mimeTypes() {
return {
txt: 'text/plain',
htm: 'text/html',
html: 'text/html',
php: 'text/html',
css: 'text/css',
js: 'application/javascript',
json: 'application/json',
xml: 'application/xml',
swf: 'application/x-shockwave-flash',
flv: 'video/x-flv',
// images
png: 'image/png',
jpe: 'image/jpeg',
jpeg: 'image/jpeg',
jpg: 'image/jpeg',
gif: 'image/gif',
bmp: 'image/bmp',
ico: 'image/vnd.microsoft.icon',
tiff: 'image/tiff',
tif: 'image/tiff',
svg: 'image/svg+xml',
svgz: 'image/svg+xml',
// archives
zip: 'application/zip',
rar: 'application/x-rar-compressed',
exe: 'application/x-msdownload',
msi: 'application/x-msdownload',
cab: 'application/vnd.ms-cab-compressed',
// audio/video
mp3: 'audio/mpeg',
qt: 'video/quicktime',
mov: 'video/quicktime',
// adobe
pdf: 'application/pdf',
psd: 'image/vnd.adobe.photoshop',
ai: 'application/postscript',
eps: 'application/postscript',
ps: 'application/postscript',
// ms office
doc: 'application/msword',
rtf: 'application/rtf',
xls: 'application/vnd.ms-excel',
ppt: 'application/vnd.ms-powerpoint',
// open office
odt: 'application/vnd.oasis.opendocument.text',
ods: 'application/vnd.oasis.opendocument.spreadsheet'
}
}
static getHttpCode(code) {
return HttpBox.httpCodes[code]
}
static getMimeType(type) {
return HttpBox.mimeTypes[type]
}
/**
* Get UserAgent info for client
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
static getUserAgentForClient() {
return useragent.parse(navigator.userAgent);
}
getMethod() {
return this.request.method
}
getHeaders() {
return this.request.headers
}
isGet() {
return (this.request.method === 'GET')
}
isPost() {
return (this.request.method === 'POST')
}
isPut() {
return (this.request.method === 'PUT')
}
isDelete() {
return (this.request.method === 'DELETE')
}
isJson() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/json');
}
isXml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/xml');
}
isHtml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'text/html');
}
isForm() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/x-www-form-urlencoded');
}
isAjax() {
const requestType = this.request.headers['HTTP_X_REQUESTED_WITH'];
return _.startsWith(_.trim(requestType), 'XMLHttpRequest');
}
isClient() {
return this.context.isClient;
}
isServerHost() {
return this.context.isServer && this.request;
}
isServerStatic() {
return this.context.isServer && !this.request;
}
/**
* Get request data
*
* @return Promise
*/
getRequestData() {
const self = this;
//----------------------------
return new Promise((resolve, reject) => {
let body = '';
let postData = {};
// -------------------------------------
self.request.on('data', function (data) {
body += data;
// Too much POST data, kill the connection!
// 1e6 === 1 * Math.pow(10, 6) === 1 * 1000000 ~~~ 1MB
if (body.length > 1e6) {
// FLOOD ATTACK OR FAULTY CLIENT, NUKE REQUEST
self.request.connection.destroy()
reject('Http Error: Too much GET data.');
}
});
self.request.on('end', function () {
if (self.isJson()) {
postData = JSON.parse(body);
} else {
postData = qs.parse(body);
}
resolve(postData)
})
})
}
/**
* Get UserAgent info for server
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
| getUserAgent | identifier_name | |
http.class.js | ,
HTTP_METHOD_NOT_ALLOWED: 405,
HTTP_NOT_ACCEPTABLE: 406,
HTTP_PROXY_AUTHENTICATION_REQUIRED: 407,
HTTP_REQUEST_TIMEOUT: 408,
HTTP_CONFLICT: 409,
HTTP_GONE: 410,
HTTP_LENGTH_REQUIRED: 411,
HTTP_PRECONDITION_FAILED: 412,
HTTP_REQUEST_ENTITY_TOO_LARGE: 413,
HTTP_REQUEST_URI_TOO_LONG: 414,
HTTP_UNSUPPORTED_MEDIA_TYPE: 415,
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE: 416,
HTTP_EXPECTATION_FAILED: 417,
HTTP_I_AM_A_TEAPOT: 418, // RFC2324
HTTP_UNPROCESSABLE_ENTITY: 422, // RFC4918
HTTP_LOCKED: 423, // RFC4918
HTTP_FAILED_DEPENDENCY: 424, // RFC4918
HTTP_RESERVED_FOR_WEBDAV_ADVANCED_COLLECTIONS_EXPIRED_PROPOSAL: 425, // RFC2817
HTTP_UPGRADE_REQUIRED: 426, // RFC2817
HTTP_PRECONDITION_REQUIRED: 428, // RFC6585
HTTP_TOO_MANY_REQUESTS: 429, // RFC6585
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE: 431, // RFC6585
HTTP_INTERNAL_SERVER_ERROR: 500,
HTTP_NOT_IMPLEMENTED: 501,
HTTP_BAD_GATEWAY: 502,
HTTP_SERVICE_UNAVAILABLE: 503,
HTTP_GATEWAY_TIMEOUT: 504,
HTTP_VERSION_NOT_SUPPORTED: 505,
HTTP_VARIANT_ALSO_NEGOTIATES_EXPERIMENTAL: 506, // RFC2295
HTTP_INSUFFICIENT_STORAGE: 507, // RFC4918
HTTP_LOOP_DETECTED: 508, // RFC5842
HTTP_NOT_EXTENDED: 510, // RFC2774
HTTP_NETWORK_AUTHENTICATION_REQUIRED: 511
};
}
static httpCodes() {
return {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
}
static mimeTypes() {
return {
txt: 'text/plain',
htm: 'text/html',
html: 'text/html',
php: 'text/html',
css: 'text/css',
js: 'application/javascript',
json: 'application/json',
xml: 'application/xml',
swf: 'application/x-shockwave-flash',
flv: 'video/x-flv',
// images
png: 'image/png',
jpe: 'image/jpeg',
jpeg: 'image/jpeg',
jpg: 'image/jpeg',
gif: 'image/gif',
bmp: 'image/bmp',
ico: 'image/vnd.microsoft.icon',
tiff: 'image/tiff',
tif: 'image/tiff',
svg: 'image/svg+xml',
svgz: 'image/svg+xml',
// archives
zip: 'application/zip',
rar: 'application/x-rar-compressed',
exe: 'application/x-msdownload',
msi: 'application/x-msdownload',
cab: 'application/vnd.ms-cab-compressed',
// audio/video
mp3: 'audio/mpeg',
qt: 'video/quicktime',
mov: 'video/quicktime',
// adobe
pdf: 'application/pdf',
psd: 'image/vnd.adobe.photoshop',
ai: 'application/postscript',
eps: 'application/postscript',
ps: 'application/postscript',
// ms office
doc: 'application/msword',
rtf: 'application/rtf',
xls: 'application/vnd.ms-excel',
ppt: 'application/vnd.ms-powerpoint',
// open office
odt: 'application/vnd.oasis.opendocument.text',
ods: 'application/vnd.oasis.opendocument.spreadsheet'
}
}
static getHttpCode(code) {
return HttpBox.httpCodes[code]
}
static getMimeType(type) {
return HttpBox.mimeTypes[type]
}
/**
* Get UserAgent info for client
* {
"isMobile":false,
"isDesktop":true,
"isBot":false,
"isIE":false,
"isChrome":true,
.....
"browser":"Chrome",
"version":"17.0.963.79",
"os":"Windows 7",
"platform":"Microsoft Windows",
"source":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.79..."
}
* @return Object
*/
static getUserAgentForClient() {
return useragent.parse(navigator.userAgent);
}
getMethod() {
return this.request.method
}
getHeaders() {
return this.request.headers
}
isGet() {
return (this.request.method === 'GET')
}
isPost() {
return (this.request.method === 'POST')
}
isPut() {
return (this.request.method === 'PUT')
}
isDelete() {
return (this.request.method === 'DELETE')
}
isJson() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/json');
}
isXml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/xml');
}
isHtml() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'text/html');
}
isForm() {
const contentType = this.request.headers['content-type'];
return _.startsWith(_.trim(contentType), 'application/x-www-form-urlencoded');
}
isAjax() {
const requestType = this.request.headers['HTTP_X_REQUESTED_WITH'];
return _.startsWith(_.trim(requestType), 'XMLHttpRequest');
}
isClient() {
return this.context.isClient;
}
isServerHost() {
return this.context.isServer && this.request;
}
isServerStatic() {
return this.context.isServer && !this.request;
}
/**
* Get request data
*
* @return Promise
*/
getRequestData() {
const self = this;
//----------------------------
return new Promise((resolve, reject) => {
let body = '';
let postData = {};
// -------------------------------------
self.request.on('data', function (data) {
body += data;
// Too much POST data, kill the connection!
// 1e6 === 1 * Math.pow(10, 6) === 1 * 1000000 ~~~ 1MB
if (body.length > 1e6) {
// FLOOD ATTACK OR FAULTY CLIENT, NUKE REQUEST
self.request.connection.destroy()
reject('Http Error: Too much GET data.');
}
});
self.request.on('end', function () {
if (self.isJson()) | {
postData = JSON.parse(body);
} | conditional_block | |
generator.go | information based on the information provided
// in the pathway.
type Generator struct {
personGenerator *person.Generator
patientClassGenerator patientClassGenerator
allergyGenerator *codedelement.AllergyGenerator
diagnosisGenerator diagnosisOrProcedureGenerator
procedureGenerator diagnosisOrProcedureGenerator
messageConfig *config.HL7Config
doctors *doctor.Doctors
headerGenerator *header.Generator
orderGenerator *order.Generator
documentGenerator *document.Generator
}
type diagnosisOrProcedureGenerator interface {
RandomOrFromPathway(*pathway.DateTime, *pathway.DiagnosisOrProcedure) *ir.DiagnosisOrProcedure
}
// NewPerson returns a new person based on pathway.Person.
func (g Generator) NewPerson(pathwayPerson *pathway.Person) *ir.Person {
return g.personGenerator.NewPerson(pathwayPerson)
}
// UpdateFromPathway updates PatientInfo with information from pathway.
// It Updates:
// - person information
// - diagnoses
// - procedures
// - allergies
func (g Generator) UpdateFromPathway(patientInfo *ir.PatientInfo, updatePerson *pathway.UpdatePerson) {
if updatePerson.Person != nil {
g.personGenerator.UpdatePersonFromPathway(patientInfo.Person, updatePerson.Person)
}
g.setDiagnoses(patientInfo, updatePerson.Diagnoses)
g.setProcedures(patientInfo, updatePerson.Procedures)
g.AddAllergies(patientInfo, updatePerson.Allergies)
}
// NewPatient returns a new patient based on Person information and a doctor provided.
func (g Generator) NewPatient(person *ir.Person, doctor *ir.Doctor) *state.Patient {
p := &state.Patient{
PatientInfo: &ir.PatientInfo{
Class: g.messageConfig.PatientClass.Outpatient,
Person: person,
// The Hospital Service might be overridden later with the doctor's specialty.
HospitalService: g.messageConfig.HospitalService,
AttendingDoctor: doctor,
},
// The code downstream assumes that Orders exists.
Orders: make(map[string]*ir.Order),
Documents: make(map[string]*ir.Document),
}
// If none of the g.messageConfig.PrimaryFacility fields is set, we want the resulting HL7 message to have the entire
// PD1.3 Patient Primary Facility field empty. This is achieved by leaving p.PatientInfo.PrimaryFacility nil.
if g.messageConfig.PrimaryFacility != nil |
if doctor != nil {
docWithSpecialty := g.doctors.GetByID(doctor.ID)
if docWithSpecialty != nil && docWithSpecialty.Specialty != "" {
p.PatientInfo.HospitalService = docWithSpecialty.Specialty
}
}
return p
}
// NewDoctor returns a new doctor based on the Consultant information from the pathway.
// If consultant is not specified, it returns a random doctor.
// Otherwise, it attempts to lookup an existic doctor basd on consultant ID. If any doctor is found, it returns it.
// Othwerise creates a new doctor from Consultant information, with the default speciality defined in
// messageConfig.HospitalService.
func (g Generator) NewDoctor(c *pathway.Consultant) *ir.Doctor {
if c == nil {
return g.doctors.GetRandomDoctor()
}
if doctor := g.doctors.GetByID(*c.ID); doctor != nil {
return doctor
}
newDoctor := &ir.Doctor{
// A valid pathway.Consultant has all the fields set, so we can just dereference.
ID: *c.ID,
Surname: *c.Surname,
Prefix: *c.Prefix,
FirstName: *c.FirstName,
Specialty: g.messageConfig.HospitalService,
}
g.doctors.Add(newDoctor)
return newDoctor
}
// ResetPatient returns a Patient based on the given Patient.
// Medical History (Orders, Encounters) and general information is kept, but other
// information is cleared as if the patient was a new patient.
func (g Generator) ResetPatient(p *state.Patient) *state.Patient {
newP := g.NewPatient(p.PatientInfo.Person, p.PatientInfo.AttendingDoctor)
newP.Orders = p.Orders
newP.PatientInfo.HospitalService = p.PatientInfo.HospitalService
newP.PatientInfo.Encounters = p.PatientInfo.Encounters
newP.PastVisits = p.PastVisits
newP.PatientInfo.PrimaryFacility = p.PatientInfo.PrimaryFacility
newP.PatientInfo.Allergies = p.PatientInfo.Allergies
return newP
}
// AddAllergies adds allergies specified in the pathway to the patientInfo:
// - If there are any allergies specified in the pathways, they are always added to existing allergies on the patientInfo.
// - If the allergies were not specified in the pathway (ie. allergies is nil) and the allergies on the patientInfo
// have not been initialised yet (ie are also nil), initialise them to an empty slice (to make sure we'll not make an
// attempt to generate them on the next ADT-like event, as that would increase the likelihood of the patient having
// allergies), and then generate them.
// - If the allergies from the pathway are explicitly set to empty slice, the allergies on the patient info are also set
// to empty slice.
func (g Generator) AddAllergies(patientInfo *ir.PatientInfo, allergies []pathway.Allergy) {
switch {
case len(allergies) > 0:
// If the pathway allergies are set, add them to the existing ones.
if patientInfo.Allergies == nil {
patientInfo.Allergies = []*ir.Allergy{}
}
patientInfo.Allergies = append(patientInfo.Allergies, g.getDedupedAllergiesFromPathway(patientInfo, allergies)...)
case allergies == nil && patientInfo.Allergies == nil:
// Initialise the allergies to an empty slice so that they're not nil anymore.
patientInfo.Allergies = []*ir.Allergy{}
patientInfo.Allergies = append(patientInfo.Allergies, g.allergyGenerator.GenerateRandomDistinctAllergies()...)
case allergies != nil && len(allergies) == 0:
// Allergies were set explicitly as an empty slice in the pathway.
patientInfo.Allergies = []*ir.Allergy{}
}
}
// getDedupedAllergiesFromPathway returns the list of allergies from the pathway after de-duplication:
// if the allergy is set twice in the pathway, it's added to the list only once. If the allergy
// already exists for the patient, it's not added to the list.
// Note: if the same Allergy is specified with eg. different severity or reaction, it'll be added to
// the list, as there is no way of deleting / amending existing pathwayAllergies.
func (g Generator) getDedupedAllergiesFromPathway(patientInfo *ir.PatientInfo, pathwayAllergies []pathway.Allergy) []*ir.Allergy {
var dedupedAllergies []*ir.Allergy
existing := make(map[ir.Allergy]bool)
for _, a := range patientInfo.Allergies {
existing[*a] = true
}
for _, a := range pathwayAllergies {
code, description := g.allergyGenerator.DeriveCodeAndDescription(a.Code, a.Description)
idt := g.allergyGenerator.DeriveIdentificationDateTime(a)
cs := g.allergyGenerator.DeriveCodingSystem(g.messageConfig.Allergy, a)
allergy := &ir.Allergy{
Type: a.Type,
Description: ir.CodedElement{
ID: code,
Text: description,
CodingSystem: cs,
},
Severity: a.Severity,
Reaction: a.Reaction,
IdentificationDateTime: idt,
}
if !existing[*allergy] {
existing[*allergy] = true
dedupedAllergies = append(dedupedAllergies, allergy)
}
}
return dedupedAllergies
}
func (g Generator) setDiagnoses(patientInfo *ir.PatientInfo, diagnoses []*pathway.DiagnosisOrProcedure) {
patientInfo.Diagnoses = make([]*ir.DiagnosisOrProcedure, len(diagnoses))
g.setDiagnosesOrProcedures(patientInfo.Diagnoses, diagnoses, g.diagnosisGenerator)
}
func (g Generator) setProcedures(patientInfo *ir.PatientInfo, procedures []*pathway.DiagnosisOrProcedure) {
patientInfo.Procedures = make([]*ir.DiagnosisOrProcedure, len(procedures))
g.setDiagnosesOrProcedures(patientInfo.Procedures, procedures, g.procedureGenerator)
}
func (g Generator) setDiagnosesOrProcedures(diagnosisOrProcedure []*ir.DiagnosisOrProcedure, fromPathway []*pathway.DiagnosisOrProcedure, dpg diagnosisOrProcedureGenerator) {
for i, p := range fromPathway {
diagnosisOrProcedure[i] = dpg.RandomOrFromPathway(p.DateTime, p)
// By design, diagnoses and procedures don't reuse the clinician from the pathway.
// Past diagnoses and procedures could have been done by other clinicians, not the current one,
// so we do not | {
p.PatientInfo.PrimaryFacility = &ir.PrimaryFacility{
Organization: g.messageConfig.PrimaryFacility.OrganizationName,
ID: g.messageConfig.PrimaryFacility.IDNumber,
}
} | conditional_block |
generator.go | ir.Doctor {
if c == nil {
return g.doctors.GetRandomDoctor()
}
if doctor := g.doctors.GetByID(*c.ID); doctor != nil {
return doctor
}
newDoctor := &ir.Doctor{
// A valid pathway.Consultant has all the fields set, so we can just dereference.
ID: *c.ID,
Surname: *c.Surname,
Prefix: *c.Prefix,
FirstName: *c.FirstName,
Specialty: g.messageConfig.HospitalService,
}
g.doctors.Add(newDoctor)
return newDoctor
}
// ResetPatient returns a Patient based on the given Patient.
// Medical History (Orders, Encounters) and general information is kept, but other
// information is cleared as if the patient was a new patient.
func (g Generator) ResetPatient(p *state.Patient) *state.Patient {
newP := g.NewPatient(p.PatientInfo.Person, p.PatientInfo.AttendingDoctor)
newP.Orders = p.Orders
newP.PatientInfo.HospitalService = p.PatientInfo.HospitalService
newP.PatientInfo.Encounters = p.PatientInfo.Encounters
newP.PastVisits = p.PastVisits
newP.PatientInfo.PrimaryFacility = p.PatientInfo.PrimaryFacility
newP.PatientInfo.Allergies = p.PatientInfo.Allergies
return newP
}
// AddAllergies adds allergies specified in the pathway to the patientInfo:
// - If there are any allergies specified in the pathways, they are always added to existing allergies on the patientInfo.
// - If the allergies were not specified in the pathway (ie. allergies is nil) and the allergies on the patientInfo
// have not been initialised yet (ie are also nil), initialise them to an empty slice (to make sure we'll not make an
// attempt to generate them on the next ADT-like event, as that would increase the likelihood of the patient having
// allergies), and then generate them.
// - If the allergies from the pathway are explicitly set to empty slice, the allergies on the patient info are also set
// to empty slice.
func (g Generator) AddAllergies(patientInfo *ir.PatientInfo, allergies []pathway.Allergy) {
switch {
case len(allergies) > 0:
// If the pathway allergies are set, add them to the existing ones.
if patientInfo.Allergies == nil {
patientInfo.Allergies = []*ir.Allergy{}
}
patientInfo.Allergies = append(patientInfo.Allergies, g.getDedupedAllergiesFromPathway(patientInfo, allergies)...)
case allergies == nil && patientInfo.Allergies == nil:
// Initialise the allergies to an empty slice so that they're not nil anymore.
patientInfo.Allergies = []*ir.Allergy{}
patientInfo.Allergies = append(patientInfo.Allergies, g.allergyGenerator.GenerateRandomDistinctAllergies()...)
case allergies != nil && len(allergies) == 0:
// Allergies were set explicitly as an empty slice in the pathway.
patientInfo.Allergies = []*ir.Allergy{}
}
}
// getDedupedAllergiesFromPathway returns the list of allergies from the pathway after de-duplication:
// if the allergy is set twice in the pathway, it's added to the list only once. If the allergy
// already exists for the patient, it's not added to the list.
// Note: if the same Allergy is specified with eg. different severity or reaction, it'll be added to
// the list, as there is no way of deleting / amending existing pathwayAllergies.
func (g Generator) getDedupedAllergiesFromPathway(patientInfo *ir.PatientInfo, pathwayAllergies []pathway.Allergy) []*ir.Allergy {
var dedupedAllergies []*ir.Allergy
existing := make(map[ir.Allergy]bool)
for _, a := range patientInfo.Allergies {
existing[*a] = true
}
for _, a := range pathwayAllergies {
code, description := g.allergyGenerator.DeriveCodeAndDescription(a.Code, a.Description)
idt := g.allergyGenerator.DeriveIdentificationDateTime(a)
cs := g.allergyGenerator.DeriveCodingSystem(g.messageConfig.Allergy, a)
allergy := &ir.Allergy{
Type: a.Type,
Description: ir.CodedElement{
ID: code,
Text: description,
CodingSystem: cs,
},
Severity: a.Severity,
Reaction: a.Reaction,
IdentificationDateTime: idt,
}
if !existing[*allergy] {
existing[*allergy] = true
dedupedAllergies = append(dedupedAllergies, allergy)
}
}
return dedupedAllergies
}
func (g Generator) setDiagnoses(patientInfo *ir.PatientInfo, diagnoses []*pathway.DiagnosisOrProcedure) {
patientInfo.Diagnoses = make([]*ir.DiagnosisOrProcedure, len(diagnoses))
g.setDiagnosesOrProcedures(patientInfo.Diagnoses, diagnoses, g.diagnosisGenerator)
}
func (g Generator) setProcedures(patientInfo *ir.PatientInfo, procedures []*pathway.DiagnosisOrProcedure) {
patientInfo.Procedures = make([]*ir.DiagnosisOrProcedure, len(procedures))
g.setDiagnosesOrProcedures(patientInfo.Procedures, procedures, g.procedureGenerator)
}
func (g Generator) setDiagnosesOrProcedures(diagnosisOrProcedure []*ir.DiagnosisOrProcedure, fromPathway []*pathway.DiagnosisOrProcedure, dpg diagnosisOrProcedureGenerator) {
for i, p := range fromPathway {
diagnosisOrProcedure[i] = dpg.RandomOrFromPathway(p.DateTime, p)
// By design, diagnoses and procedures don't reuse the clinician from the pathway.
// Past diagnoses and procedures could have been done by other clinicians, not the current one,
// so we do not want to use the pathway's clinician.
diagnosisOrProcedure[i].Clinician = g.NewDoctor(nil)
}
}
// NewRegistrationPatientClassAndType returns a PatientClassAndType for a patient newly registered.
func (g Generator) NewRegistrationPatientClassAndType() *config.PatientClassAndType {
return g.patientClassGenerator.Random()
}
// NewOrder returns a new order based on order information from the pathway and eventTime.
func (g Generator) NewOrder(o *pathway.Order, eventTime time.Time) *ir.Order {
return g.orderGenerator.NewOrder(o, eventTime)
}
// OrderWithClinicalNote creates an order with a Clinical Note based on the pathway.
func (g Generator) OrderWithClinicalNote(ctx context.Context, o *ir.Order, n *pathway.ClinicalNote, eventTime time.Time) (*ir.Order, error) {
return g.orderGenerator.OrderWithClinicalNote(ctx, o, n, eventTime)
}
// SetResults sets results on an existing Order based on the results information from the pathway.
// If order is nil, this also creates an Order using details in pathway.Result.
// Returns an error of the retults cannot be created.
func (g Generator) SetResults(o *ir.Order, r *pathway.Results, eventTime time.Time) (*ir.Order, error) {
return g.orderGenerator.SetResults(o, r, eventTime)
}
// NewVisitID generates a new visit identifier.
func (g Generator) NewVisitID() uint64 {
return rand.Uint64()
}
// NewHeader returns a new header for the given step.
func (g *Generator) NewHeader(step *pathway.Step) *message.HeaderInfo {
return g.headerGenerator.NewHeader(step)
}
// NewDocument returns a NewDocument for MDM^T02 messages.
func (g Generator) NewDocument(eventTime time.Time, d *pathway.Document) *ir.Document {
return g.documentGenerator.Document(eventTime, d)
}
// UpdateDocumentContent updates the given document for MDM^T02 messages.
func (g Generator) UpdateDocumentContent(dm *ir.Document, dp *pathway.Document) error {
return g.documentGenerator.UpdateDocumentContent(dm, dp)
}
// Config contains the configuration for Generator.
type Config struct {
Clock clock.Clock
HL7Config *config.HL7Config
Header *config.Header
AddressGenerator person.AddressGenerator
MRNGenerator id.Generator
PlacerGenerator id.Generator
FillerGenerator id.Generator
textGenerator text.Generator
NotesGenerator order.NotesGenerator
DateGenerator codedelement.DateGenerator
Data *config.Data
Doctors *doctor.Doctors
MsgCtrlGenerator *header.MessageControlGenerator
OrderProfiles *orderprofile.OrderProfiles
}
// NewGenerator creates a new Generator.
func NewGenerator(cfg Config) *Generator | {
ag := cfg.AddressGenerator
if ag == nil {
ag = &address.Generator{Nouns: cfg.Data.Nouns, Address: cfg.Data.Address}
}
mrnGenerator := cfg.MRNGenerator
if mrnGenerator == nil {
mrnGenerator = &randomIDGenerator{}
}
placerGenerator := cfg.PlacerGenerator
if placerGenerator == nil {
placerGenerator = &randomIDGenerator{}
}
fillerGenerator := cfg.FillerGenerator
if fillerGenerator == nil {
fillerGenerator = &randomIDGenerator{}
} | identifier_body | |
generator.go | related information based on the information provided
// in the pathway.
type Generator struct {
personGenerator *person.Generator
patientClassGenerator patientClassGenerator
allergyGenerator *codedelement.AllergyGenerator
diagnosisGenerator diagnosisOrProcedureGenerator
procedureGenerator diagnosisOrProcedureGenerator
messageConfig *config.HL7Config
doctors *doctor.Doctors
headerGenerator *header.Generator
orderGenerator *order.Generator
documentGenerator *document.Generator
}
type diagnosisOrProcedureGenerator interface {
RandomOrFromPathway(*pathway.DateTime, *pathway.DiagnosisOrProcedure) *ir.DiagnosisOrProcedure
}
// NewPerson returns a new person based on pathway.Person.
func (g Generator) NewPerson(pathwayPerson *pathway.Person) *ir.Person {
return g.personGenerator.NewPerson(pathwayPerson)
}
// UpdateFromPathway updates PatientInfo with information from pathway.
// It Updates:
// - person information
// - diagnoses
// - procedures
// - allergies
func (g Generator) UpdateFromPathway(patientInfo *ir.PatientInfo, updatePerson *pathway.UpdatePerson) {
if updatePerson.Person != nil {
g.personGenerator.UpdatePersonFromPathway(patientInfo.Person, updatePerson.Person)
}
g.setDiagnoses(patientInfo, updatePerson.Diagnoses)
g.setProcedures(patientInfo, updatePerson.Procedures)
g.AddAllergies(patientInfo, updatePerson.Allergies)
}
// NewPatient returns a new patient based on Person information and a doctor provided.
func (g Generator) NewPatient(person *ir.Person, doctor *ir.Doctor) *state.Patient {
p := &state.Patient{
PatientInfo: &ir.PatientInfo{
Class: g.messageConfig.PatientClass.Outpatient,
Person: person,
// The Hospital Service might be overridden later with the doctor's specialty.
HospitalService: g.messageConfig.HospitalService,
AttendingDoctor: doctor,
},
// The code downstream assumes that Orders exists.
Orders: make(map[string]*ir.Order),
Documents: make(map[string]*ir.Document),
}
// If none of the g.messageConfig.PrimaryFacility fields is set, we want the resulting HL7 message to have the entire | Organization: g.messageConfig.PrimaryFacility.OrganizationName,
ID: g.messageConfig.PrimaryFacility.IDNumber,
}
}
if doctor != nil {
docWithSpecialty := g.doctors.GetByID(doctor.ID)
if docWithSpecialty != nil && docWithSpecialty.Specialty != "" {
p.PatientInfo.HospitalService = docWithSpecialty.Specialty
}
}
return p
}
// NewDoctor returns a new doctor based on the Consultant information from the pathway.
// If consultant is not specified, it returns a random doctor.
// Otherwise, it attempts to lookup an existic doctor basd on consultant ID. If any doctor is found, it returns it.
// Othwerise creates a new doctor from Consultant information, with the default speciality defined in
// messageConfig.HospitalService.
func (g Generator) NewDoctor(c *pathway.Consultant) *ir.Doctor {
if c == nil {
return g.doctors.GetRandomDoctor()
}
if doctor := g.doctors.GetByID(*c.ID); doctor != nil {
return doctor
}
newDoctor := &ir.Doctor{
// A valid pathway.Consultant has all the fields set, so we can just dereference.
ID: *c.ID,
Surname: *c.Surname,
Prefix: *c.Prefix,
FirstName: *c.FirstName,
Specialty: g.messageConfig.HospitalService,
}
g.doctors.Add(newDoctor)
return newDoctor
}
// ResetPatient returns a Patient based on the given Patient.
// Medical History (Orders, Encounters) and general information is kept, but other
// information is cleared as if the patient was a new patient.
func (g Generator) ResetPatient(p *state.Patient) *state.Patient {
newP := g.NewPatient(p.PatientInfo.Person, p.PatientInfo.AttendingDoctor)
newP.Orders = p.Orders
newP.PatientInfo.HospitalService = p.PatientInfo.HospitalService
newP.PatientInfo.Encounters = p.PatientInfo.Encounters
newP.PastVisits = p.PastVisits
newP.PatientInfo.PrimaryFacility = p.PatientInfo.PrimaryFacility
newP.PatientInfo.Allergies = p.PatientInfo.Allergies
return newP
}
// AddAllergies adds allergies specified in the pathway to the patientInfo:
// - If there are any allergies specified in the pathways, they are always added to existing allergies on the patientInfo.
// - If the allergies were not specified in the pathway (ie. allergies is nil) and the allergies on the patientInfo
// have not been initialised yet (ie are also nil), initialise them to an empty slice (to make sure we'll not make an
// attempt to generate them on the next ADT-like event, as that would increase the likelihood of the patient having
// allergies), and then generate them.
// - If the allergies from the pathway are explicitly set to empty slice, the allergies on the patient info are also set
// to empty slice.
func (g Generator) AddAllergies(patientInfo *ir.PatientInfo, allergies []pathway.Allergy) {
switch {
case len(allergies) > 0:
// If the pathway allergies are set, add them to the existing ones.
if patientInfo.Allergies == nil {
patientInfo.Allergies = []*ir.Allergy{}
}
patientInfo.Allergies = append(patientInfo.Allergies, g.getDedupedAllergiesFromPathway(patientInfo, allergies)...)
case allergies == nil && patientInfo.Allergies == nil:
// Initialise the allergies to an empty slice so that they're not nil anymore.
patientInfo.Allergies = []*ir.Allergy{}
patientInfo.Allergies = append(patientInfo.Allergies, g.allergyGenerator.GenerateRandomDistinctAllergies()...)
case allergies != nil && len(allergies) == 0:
// Allergies were set explicitly as an empty slice in the pathway.
patientInfo.Allergies = []*ir.Allergy{}
}
}
// getDedupedAllergiesFromPathway returns the list of allergies from the pathway after de-duplication:
// if the allergy is set twice in the pathway, it's added to the list only once. If the allergy
// already exists for the patient, it's not added to the list.
// Note: if the same Allergy is specified with eg. different severity or reaction, it'll be added to
// the list, as there is no way of deleting / amending existing pathwayAllergies.
func (g Generator) getDedupedAllergiesFromPathway(patientInfo *ir.PatientInfo, pathwayAllergies []pathway.Allergy) []*ir.Allergy {
var dedupedAllergies []*ir.Allergy
existing := make(map[ir.Allergy]bool)
for _, a := range patientInfo.Allergies {
existing[*a] = true
}
for _, a := range pathwayAllergies {
code, description := g.allergyGenerator.DeriveCodeAndDescription(a.Code, a.Description)
idt := g.allergyGenerator.DeriveIdentificationDateTime(a)
cs := g.allergyGenerator.DeriveCodingSystem(g.messageConfig.Allergy, a)
allergy := &ir.Allergy{
Type: a.Type,
Description: ir.CodedElement{
ID: code,
Text: description,
CodingSystem: cs,
},
Severity: a.Severity,
Reaction: a.Reaction,
IdentificationDateTime: idt,
}
if !existing[*allergy] {
existing[*allergy] = true
dedupedAllergies = append(dedupedAllergies, allergy)
}
}
return dedupedAllergies
}
func (g Generator) setDiagnoses(patientInfo *ir.PatientInfo, diagnoses []*pathway.DiagnosisOrProcedure) {
patientInfo.Diagnoses = make([]*ir.DiagnosisOrProcedure, len(diagnoses))
g.setDiagnosesOrProcedures(patientInfo.Diagnoses, diagnoses, g.diagnosisGenerator)
}
func (g Generator) setProcedures(patientInfo *ir.PatientInfo, procedures []*pathway.DiagnosisOrProcedure) {
patientInfo.Procedures = make([]*ir.DiagnosisOrProcedure, len(procedures))
g.setDiagnosesOrProcedures(patientInfo.Procedures, procedures, g.procedureGenerator)
}
func (g Generator) setDiagnosesOrProcedures(diagnosisOrProcedure []*ir.DiagnosisOrProcedure, fromPathway []*pathway.DiagnosisOrProcedure, dpg diagnosisOrProcedureGenerator) {
for i, p := range fromPathway {
diagnosisOrProcedure[i] = dpg.RandomOrFromPathway(p.DateTime, p)
// By design, diagnoses and procedures don't reuse the clinician from the pathway.
// Past diagnoses and procedures could have been done by other clinicians, not the current one,
// so we do not want to | // PD1.3 Patient Primary Facility field empty. This is achieved by leaving p.PatientInfo.PrimaryFacility nil.
if g.messageConfig.PrimaryFacility != nil {
p.PatientInfo.PrimaryFacility = &ir.PrimaryFacility{ | random_line_split |
generator.go | information based on the information provided
// in the pathway.
type Generator struct {
personGenerator *person.Generator
patientClassGenerator patientClassGenerator
allergyGenerator *codedelement.AllergyGenerator
diagnosisGenerator diagnosisOrProcedureGenerator
procedureGenerator diagnosisOrProcedureGenerator
messageConfig *config.HL7Config
doctors *doctor.Doctors
headerGenerator *header.Generator
orderGenerator *order.Generator
documentGenerator *document.Generator
}
type diagnosisOrProcedureGenerator interface {
RandomOrFromPathway(*pathway.DateTime, *pathway.DiagnosisOrProcedure) *ir.DiagnosisOrProcedure
}
// NewPerson returns a new person based on pathway.Person.
func (g Generator) NewPerson(pathwayPerson *pathway.Person) *ir.Person {
return g.personGenerator.NewPerson(pathwayPerson)
}
// UpdateFromPathway updates PatientInfo with information from pathway.
// It Updates:
// - person information
// - diagnoses
// - procedures
// - allergies
func (g Generator) UpdateFromPathway(patientInfo *ir.PatientInfo, updatePerson *pathway.UpdatePerson) {
if updatePerson.Person != nil {
g.personGenerator.UpdatePersonFromPathway(patientInfo.Person, updatePerson.Person)
}
g.setDiagnoses(patientInfo, updatePerson.Diagnoses)
g.setProcedures(patientInfo, updatePerson.Procedures)
g.AddAllergies(patientInfo, updatePerson.Allergies)
}
// NewPatient returns a new patient based on Person information and a doctor provided.
func (g Generator) NewPatient(person *ir.Person, doctor *ir.Doctor) *state.Patient {
p := &state.Patient{
PatientInfo: &ir.PatientInfo{
Class: g.messageConfig.PatientClass.Outpatient,
Person: person,
// The Hospital Service might be overridden later with the doctor's specialty.
HospitalService: g.messageConfig.HospitalService,
AttendingDoctor: doctor,
},
// The code downstream assumes that Orders exists.
Orders: make(map[string]*ir.Order),
Documents: make(map[string]*ir.Document),
}
// If none of the g.messageConfig.PrimaryFacility fields is set, we want the resulting HL7 message to have the entire
// PD1.3 Patient Primary Facility field empty. This is achieved by leaving p.PatientInfo.PrimaryFacility nil.
if g.messageConfig.PrimaryFacility != nil {
p.PatientInfo.PrimaryFacility = &ir.PrimaryFacility{
Organization: g.messageConfig.PrimaryFacility.OrganizationName,
ID: g.messageConfig.PrimaryFacility.IDNumber,
}
}
if doctor != nil {
docWithSpecialty := g.doctors.GetByID(doctor.ID)
if docWithSpecialty != nil && docWithSpecialty.Specialty != "" {
p.PatientInfo.HospitalService = docWithSpecialty.Specialty
}
}
return p
}
// NewDoctor returns a new doctor based on the Consultant information from the pathway.
// If consultant is not specified, it returns a random doctor.
// Otherwise, it attempts to lookup an existic doctor basd on consultant ID. If any doctor is found, it returns it.
// Othwerise creates a new doctor from Consultant information, with the default speciality defined in
// messageConfig.HospitalService.
func (g Generator) NewDoctor(c *pathway.Consultant) *ir.Doctor {
if c == nil {
return g.doctors.GetRandomDoctor()
}
if doctor := g.doctors.GetByID(*c.ID); doctor != nil {
return doctor
}
newDoctor := &ir.Doctor{
// A valid pathway.Consultant has all the fields set, so we can just dereference.
ID: *c.ID,
Surname: *c.Surname,
Prefix: *c.Prefix,
FirstName: *c.FirstName,
Specialty: g.messageConfig.HospitalService,
}
g.doctors.Add(newDoctor)
return newDoctor
}
// ResetPatient returns a Patient based on the given Patient.
// Medical History (Orders, Encounters) and general information is kept, but other
// information is cleared as if the patient was a new patient.
func (g Generator) ResetPatient(p *state.Patient) *state.Patient {
newP := g.NewPatient(p.PatientInfo.Person, p.PatientInfo.AttendingDoctor)
newP.Orders = p.Orders
newP.PatientInfo.HospitalService = p.PatientInfo.HospitalService
newP.PatientInfo.Encounters = p.PatientInfo.Encounters
newP.PastVisits = p.PastVisits
newP.PatientInfo.PrimaryFacility = p.PatientInfo.PrimaryFacility
newP.PatientInfo.Allergies = p.PatientInfo.Allergies
return newP
}
// AddAllergies adds allergies specified in the pathway to the patientInfo:
// - If there are any allergies specified in the pathways, they are always added to existing allergies on the patientInfo.
// - If the allergies were not specified in the pathway (ie. allergies is nil) and the allergies on the patientInfo
// have not been initialised yet (ie are also nil), initialise them to an empty slice (to make sure we'll not make an
// attempt to generate them on the next ADT-like event, as that would increase the likelihood of the patient having
// allergies), and then generate them.
// - If the allergies from the pathway are explicitly set to empty slice, the allergies on the patient info are also set
// to empty slice.
func (g Generator) AddAllergies(patientInfo *ir.PatientInfo, allergies []pathway.Allergy) {
switch {
case len(allergies) > 0:
// If the pathway allergies are set, add them to the existing ones.
if patientInfo.Allergies == nil {
patientInfo.Allergies = []*ir.Allergy{}
}
patientInfo.Allergies = append(patientInfo.Allergies, g.getDedupedAllergiesFromPathway(patientInfo, allergies)...)
case allergies == nil && patientInfo.Allergies == nil:
// Initialise the allergies to an empty slice so that they're not nil anymore.
patientInfo.Allergies = []*ir.Allergy{}
patientInfo.Allergies = append(patientInfo.Allergies, g.allergyGenerator.GenerateRandomDistinctAllergies()...)
case allergies != nil && len(allergies) == 0:
// Allergies were set explicitly as an empty slice in the pathway.
patientInfo.Allergies = []*ir.Allergy{}
}
}
// getDedupedAllergiesFromPathway returns the list of allergies from the pathway after de-duplication:
// if the allergy is set twice in the pathway, it's added to the list only once. If the allergy
// already exists for the patient, it's not added to the list.
// Note: if the same Allergy is specified with eg. different severity or reaction, it'll be added to
// the list, as there is no way of deleting / amending existing pathwayAllergies.
func (g Generator) getDedupedAllergiesFromPathway(patientInfo *ir.PatientInfo, pathwayAllergies []pathway.Allergy) []*ir.Allergy {
var dedupedAllergies []*ir.Allergy
existing := make(map[ir.Allergy]bool)
for _, a := range patientInfo.Allergies {
existing[*a] = true
}
for _, a := range pathwayAllergies {
code, description := g.allergyGenerator.DeriveCodeAndDescription(a.Code, a.Description)
idt := g.allergyGenerator.DeriveIdentificationDateTime(a)
cs := g.allergyGenerator.DeriveCodingSystem(g.messageConfig.Allergy, a)
allergy := &ir.Allergy{
Type: a.Type,
Description: ir.CodedElement{
ID: code,
Text: description,
CodingSystem: cs,
},
Severity: a.Severity,
Reaction: a.Reaction,
IdentificationDateTime: idt,
}
if !existing[*allergy] {
existing[*allergy] = true
dedupedAllergies = append(dedupedAllergies, allergy)
}
}
return dedupedAllergies
}
func (g Generator) setDiagnoses(patientInfo *ir.PatientInfo, diagnoses []*pathway.DiagnosisOrProcedure) {
patientInfo.Diagnoses = make([]*ir.DiagnosisOrProcedure, len(diagnoses))
g.setDiagnosesOrProcedures(patientInfo.Diagnoses, diagnoses, g.diagnosisGenerator)
}
func (g Generator) | (patientInfo *ir.PatientInfo, procedures []*pathway.DiagnosisOrProcedure) {
patientInfo.Procedures = make([]*ir.DiagnosisOrProcedure, len(procedures))
g.setDiagnosesOrProcedures(patientInfo.Procedures, procedures, g.procedureGenerator)
}
func (g Generator) setDiagnosesOrProcedures(diagnosisOrProcedure []*ir.DiagnosisOrProcedure, fromPathway []*pathway.DiagnosisOrProcedure, dpg diagnosisOrProcedureGenerator) {
for i, p := range fromPathway {
diagnosisOrProcedure[i] = dpg.RandomOrFromPathway(p.DateTime, p)
// By design, diagnoses and procedures don't reuse the clinician from the pathway.
// Past diagnoses and procedures could have been done by other clinicians, not the current one,
// so we do not want | setProcedures | identifier_name |
spannerautoscaler_controller.go |
scaleDownInterval time.Duration
clock utilclock.Clock
log logr.Logger
mu sync.RWMutex
}
var _ ctrlreconcile.Reconciler = (*SpannerAutoscalerReconciler)(nil)
type Option func(*SpannerAutoscalerReconciler)
func WithSyncers(syncers map[types.NamespacedName]syncer.Syncer) Option {
return func(r *SpannerAutoscalerReconciler) {
r.syncers = syncers
}
}
func WithScaleDownInterval(scaleDownInterval time.Duration) Option {
return func(r *SpannerAutoscalerReconciler) {
r.scaleDownInterval = scaleDownInterval
}
}
func WithClock(clock utilclock.Clock) Option {
return func(r *SpannerAutoscalerReconciler) {
r.clock = clock
}
}
func WithLog(log logr.Logger) Option {
return func(r *SpannerAutoscalerReconciler) {
r.log = log.WithName("spannerautoscaler")
}
}
// NewSpannerAutoscalerReconciler returns a new SpannerAutoscalerReconciler.
func NewSpannerAutoscalerReconciler(
ctrlClient ctrlclient.Client,
apiReader ctrlclient.Reader,
scheme *runtime.Scheme,
recorder record.EventRecorder,
logger logr.Logger,
opts ...Option,
) *SpannerAutoscalerReconciler {
r := &SpannerAutoscalerReconciler{
ctrlClient: ctrlClient,
apiReader: apiReader,
scheme: scheme,
recorder: recorder,
syncers: make(map[types.NamespacedName]syncer.Syncer),
scaleDownInterval: 55 * time.Minute,
clock: utilclock.RealClock{},
log: logger,
}
for _, opt := range opts {
opt(r)
}
return r
}
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=spanner.mercari.com,resources=spannerautoscalers/finalizers,verbs=update
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get,resourceNames=spanner-autoscaler-gcp-sa
// Reconcile implements ctrlreconcile.Reconciler.
func (r *SpannerAutoscalerReconciler) Reconcile(ctx context.Context, req ctrlreconcile.Request) (ctrlreconcile.Result, error) {
nn := req.NamespacedName
log := r.log.WithValues("namespaced name", nn)
r.mu.RLock()
s, syncerExists := r.syncers[nn]
r.mu.RUnlock()
var sa spannerv1beta1.SpannerAutoscaler
if err := r.ctrlClient.Get(ctx, nn, &sa); err != nil {
err = ctrlclient.IgnoreNotFound(err)
if err != nil {
log.Error(err, "failed to get spanner-autoscaler")
return ctrlreconcile.Result{}, err
}
log.V(2).Info("checking if a syncer exists")
if syncerExists {
s.Stop()
r.mu.Lock()
delete(r.syncers, nn)
r.mu.Unlock()
log.Info("stopped syncer")
}
return ctrlreconcile.Result{}, nil
}
// TODO: move this to the defaulting webhook
if sa.Spec.ScaleConfig.ScaledownStepSize == 0 {
sa.Spec.ScaleConfig.ScaledownStepSize = defaultScaledownStepSize
}
log.V(1).Info("resource status", "spannerautoscaler", sa)
credentials, err := r.fetchCredentials(ctx, &sa)
if err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "ServiceAccountRequired", err.Error())
log.Error(err, "failed to fetch service account")
return ctrlreconcile.Result{}, err
}
// If the syncer does not exist, start a syncer.
if !syncerExists {
log.V(2).Info("syncer does not exist, starting a new syncer")
ctx = logging.WithContext(ctx, log)
if err := r.startSyncer(ctx, nn, sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedStartSyncer", err.Error())
log.Error(err, "failed to start syncer")
return ctrlreconcile.Result{}, err
}
return ctrlreconcile.Result{}, nil
}
// If target spanner instance or service account have been changed, then just replace syncer.
if s.UpdateTarget(sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials) {
s.Stop()
r.mu.Lock()
delete(r.syncers, nn)
r.mu.Unlock()
if err := r.startSyncer(ctx, nn, sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID, credentials); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedStartSyncer", err.Error())
log.Error(err, "failed to start syncer")
return ctrlreconcile.Result{}, err
}
log.Info("replaced syncer", "namespaced name", sa)
return ctrlreconcile.Result{}, nil
}
log.V(1).Info("checking to see if we need to calculate processing units", "sa", sa)
if !r.needCalcProcessingUnits(&sa) {
return ctrlreconcile.Result{}, nil
}
// TODO: change this to pass the object instead of so many parameters
desiredProcessingUnits := calcDesiredProcessingUnits(
sa.Status.CurrentHighPriorityCPUUtilization,
normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType),
sa.Spec.ScaleConfig.TargetCPUUtilization.HighPriority,
normalizeProcessingUnitsOrNodes(sa.Spec.ScaleConfig.ProcessingUnits.Min, sa.Spec.ScaleConfig.Nodes.Min, sa.Spec.ScaleConfig.ComputeType),
normalizeProcessingUnitsOrNodes(sa.Spec.ScaleConfig.ProcessingUnits.Max, sa.Spec.ScaleConfig.Nodes.Max, sa.Spec.ScaleConfig.ComputeType),
sa.Spec.ScaleConfig.ScaledownStepSize,
)
now := r.clock.Now()
log.V(1).Info("processing units need to be changed", "desiredProcessingUnits", desiredProcessingUnits, "sa.Status", sa.Status)
if !r.needUpdateProcessingUnits(&sa, desiredProcessingUnits, now) {
return ctrlreconcile.Result{}, nil
}
if err := s.UpdateInstance(ctx, desiredProcessingUnits); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedUpdateInstance", err.Error())
log.Error(err, "failed to update spanner instance status")
return ctrlreconcile.Result{}, err
}
r.recorder.Eventf(&sa, corev1.EventTypeNormal, "Updated", "Updated processing units of %s/%s from %d to %d", sa.Spec.TargetInstance.ProjectID, sa.Spec.TargetInstance.InstanceID,
normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType), desiredProcessingUnits)
log.Info("updated nodes via google cloud api", "before", normalizeProcessingUnitsOrNodes(sa.Status.CurrentProcessingUnits, sa.Status.CurrentNodes, sa.Spec.ScaleConfig.ComputeType), "after", desiredProcessingUnits)
saCopy := sa.DeepCopy()
saCopy.Status.DesiredProcessingUnits = desiredProcessingUnits
saCopy.Status.DesiredNodes = desiredProcessingUnits / 1000
saCopy.Status.LastScaleTime = metav1.Time{Time: now}
if err = r.ctrlClient.Status().Update(ctx, saCopy); err != nil {
r.recorder.Event(&sa, corev1.EventTypeWarning, "FailedUpdateStatus", err.Error())
log.Error(err, "failed to update spanner autoscaler status")
return ctrlreconcile.Result{}, err
}
return ctrlreconcile.Result{}, nil
}
// TODO: convert all internal computations to processing units only
func normalizeProcessingUnitsOrNodes(pu, nodes int, computeType spannerv1beta1.ComputeType) int {
switch computeType {
case spannerv1beta1.ComputeTypePU:
return pu
case spannerv1beta1.ComputeTypeNode:
return nodes * 1000
default:
return -1
}
}
// SetupWithManager sets up the controller with ctrlmanager.Manager.
func (r *SpannerAutoscalerReconciler) SetupWithManager(mgr ctrlmanager.Manager) error {
opts := ctrlcontroller.Options{
Reconciler: r,
}
return ctrlbuilder.ControllerManagedBy(mgr).
For(&spannerv1beta1.SpannerAutoscaler{}).
WithOptions(opts).
Complete(r)
}
func (r *SpannerAutoscalerReconciler) | (ctx context.Context, nn types.NamespacedName, projectID, instanceID string, credentials *syncer.Credentials) error {
log := logging.FromContext | startSyncer | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.