hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
daa23b88ceb8bf087be4f6e681687b1a4883adc2
636
py
Python
beep_alarm.py
XC-Li/Raspberry_Projects
48b61832641fea1dcbd24b651266fe767d8cd254
[ "MIT" ]
null
null
null
beep_alarm.py
XC-Li/Raspberry_Projects
48b61832641fea1dcbd24b651266fe767d8cd254
[ "MIT" ]
null
null
null
beep_alarm.py
XC-Li/Raspberry_Projects
48b61832641fea1dcbd24b651266fe767d8cd254
[ "MIT" ]
null
null
null
from time import ctime from time import sleep from sakshat import SAKSHAT from sakspins import SAKSPins as Pins saks = SAKSHAT() alarm = [2011] def tact_event_handler(pin, status): global alarm_run if pin == Pins.TACT_RIGHT: print("Stop timer") alarm_run = False try: while True: current_time = ctime() current_time = current_time[11:13] + current_time[14:16] print(current_time) if int(current_time) in alarm: saks.buzzer.beep(1) sleep(2) except KeyboardInterrupt: print("End") saks.ledrow.off() saks.buzzer.off()
23.555556
65
0.625786
83
636
4.662651
0.542169
0.170543
0.072351
0
0
0
0
0
0
0
0
0.030837
0.286164
636
27
66
23.555556
0.821586
0
0
0
0
0
0.021277
0
0
0
0
0
0
1
0.043478
false
0
0.173913
0
0.217391
0.130435
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daa528c94b1125fa8f5f9603f88f3f3061379760
18,429
py
Python
ntopo/train.py
JonasZehn/ntopo
d3e17ca4cfb1d7a71c4c4f0c965cfcdc67d53fa9
[ "MIT" ]
4
2021-10-30T15:10:31.000Z
2021-12-27T08:56:17.000Z
ntopo/train.py
liyuesolo/ntopo-1
d3e17ca4cfb1d7a71c4c4f0c965cfcdc67d53fa9
[ "MIT" ]
null
null
null
ntopo/train.py
liyuesolo/ntopo-1
d3e17ca4cfb1d7a71c4c4f0c965cfcdc67d53fa9
[ "MIT" ]
1
2022-01-02T13:09:15.000Z
2022-01-02T13:09:15.000Z
import os import time import numpy as np import tensorflow as tf from tqdm import tqdm from ntopo.monitors import SimulationMonitor from ntopo.physics import compute_elasticity_energies, compute_opt_energy, compute_volume_penalty from ntopo.filter import apply_sensitivity_filter from ntopo.utils import write_to_file, get_sample_generator, get_single_random_q_sample_generator, get_q_sample_generator, stratified_sampling from ntopo.oc import compute_oc_multi_batch from ntopo.render import save_densities_to_file def get_train_disp_step(opt, problem, disp_model, density_model, disp_variables): @tf.function def _train_disp_step(samples): with tf.GradientTape() as tape: tape.watch(disp_variables) internal_energy, force_loss = compute_elasticity_energies( problem, disp_model, density_model, samples, training=True) reg_loss = tf.keras.backend.sum(disp_model.losses) loss = internal_energy + force_loss + reg_loss dLdwx = tape.gradient(loss, disp_variables) opt.apply_gradients(zip(dLdwx, disp_variables)) return loss, internal_energy, force_loss, reg_loss return _train_disp_step def run_simulation(problem, disp_model, train_disp_step, n_sim_iterations, sim_sample_generator, saving=False, save_path='.', save_postfix=''): simulation_monitor = SimulationMonitor(n_sim_iterations) progress_bar = tqdm(simulation_monitor, total=n_sim_iterations) for disp_iter in progress_bar: start_time = time.time() input_samples = next(sim_sample_generator) loss, internal_energy, force_loss, reg_loss = train_disp_step(input_samples) simulation_monitor.monitor(loss) end_time = time.time() loss = loss.numpy().item() internal_energy = internal_energy.numpy().item() duration = end_time - start_time reg_loss = reg_loss.numpy().item() progress_bar.set_description(f'loss {loss:.3e} int. energy {internal_energy:.3e}, dur.: {duration:.3e}, reg loss {reg_loss:.3e}') progress_bar.refresh() if saving: simulation_monitor.save_plot(save_path, '', save_postfix) def get_train_density_step(opt, problem, disp_model, density_model, density_variables, vol_penalty_strength, target_volume_ratio): sample_volume = problem.domain_volume target_volume = problem.free_volume * target_volume_ratio @tf.function def _train_densities_step(sample_positions): with tf.GradientTape() as tape: tape.watch(density_variables) energy, densities = compute_opt_energy( problem, disp_model, density_model, sample_positions) penalty = compute_volume_penalty(densities, sample_volume=sample_volume, vol_penalty_strength=vol_penalty_strength, target_volume=target_volume) reg_loss = tf.keras.backend.sum(density_model.losses) loss = energy + penalty + reg_loss dLdwx = tape.gradient(loss, density_variables) opt.apply_gradients(zip(dLdwx, density_variables)) return loss, penalty, reg_loss return _train_densities_step @tf.function def compute_sensitivities(problem, disp_model, density_model, sample_positions, use_oc, vol_penalty_strength, target_volume_ratio=None): sample_volume = problem.domain_volume target_volume = problem.free_volume * target_volume_ratio with tf.GradientTape() as tape: energy, densities = compute_opt_energy( problem, disp_model, density_model, sample_positions) if use_oc: loss = energy else: penalty = compute_volume_penalty(densities, sample_volume=sample_volume, vol_penalty_strength=vol_penalty_strength, target_volume=target_volume) loss = energy + penalty old_densities = densities grads = tape.gradient(loss, densities) return old_densities, grads @tf.function def compute_target_densities_gradient_descent(old_densities, sensitivities): projected_sensitivities = [tf.math.maximum(0.0, tf.math.minimum( 1.0, old_densities[i] - sensitivities[i])) - old_densities[i] for i in range(len(old_densities))] step_size = 0.05 / tf.math.reduce_mean([tf.math.reduce_mean(tf.math.abs(si)) for si in projected_sensitivities]) return [old_densities[i] - step_size * sensitivities[i] for i in range(len(old_densities))] @tf.function def optimize_densities_mse(opt, density_model, sample_positions, targets, density_variables): with tf.GradientTape() as tape: tape.watch(density_variables) err = density_model(sample_positions, training=True) - targets reg_loss = tf.keras.backend.sum(density_model.losses) reconstruction_loss = tf.reduce_mean(err*err, keepdims=True) loss = reconstruction_loss + reg_loss dLdwrho = tape.gradient(loss, density_variables) opt.apply_gradients(zip(dLdwrho, density_variables)) return loss, reconstruction_loss, reg_loss def save_model_configs(disp_model, density_model, save_path): write_to_file(disp_model.to_json(), os.path.join( save_path, 'disp_model_config.json')) write_to_file(density_model.to_json(), os.path.join( save_path, 'density_model_config.json')) def save_model_weights(disp_model, density_model, save_path, save_postfix): disp_model.save_weights(os.path.join( save_path, 'disp_model' + save_postfix)) density_model.save_weights(os.path.join( save_path, 'density_model' + save_postfix)) def train_non_mmse(problem, disp_model, density_model, opt_disp, opt_density, opt_sample_generator, sim_sample_generator, vol_penalty_strength, target_volume_ratio, save_path, save_interval, n_opt_iterations, n_sim_iterations ): train_disp_step = get_train_disp_step( opt_disp, problem, disp_model, density_model=density_model, disp_variables=disp_model.trainable_variables) train_density_step = get_train_density_step( opt_density, problem, disp_model, density_model=density_model, density_variables=density_model.trainable_variables, vol_penalty_strength=vol_penalty_strength, target_volume_ratio=target_volume_ratio) save_model_configs(disp_model, density_model, save_path) def save_state(save_postfix): save_model_weights(disp_model, density_model, save_path, save_postfix) problem.plot_densities(density_model, save_path, '', save_postfix) iteration = 0 saving = True save_postfix = f'-{iteration:06d}' run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations, sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix) if saving: problem.plot_displacement(disp_model, save_path, '', save_postfix) save_state(save_postfix) for iteration in range(1, n_opt_iterations + 1): print('Optimization iteration ', iteration) saving = (iteration % save_interval == 0) save_postfix = f'-{iteration:06d}' run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations, sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix) if saving: problem.plot_displacement(disp_model, save_path, '', save_postfix) sample_positions = next(opt_sample_generator) train_density_step(sample_positions) if saving: save_state(save_postfix) def train_mmse(problem, disp_model, density_model, opt_disp, opt_density, opt_sample_generator, sim_sample_generator, n_opt_samples, vol_penalty_strength, target_volume_ratio, save_path, filter, filter_radius, use_oc, save_interval, n_opt_iterations, n_sim_iterations, n_opt_batches, oc_config): density_variables = density_model.trainable_variables train_disp_step = get_train_disp_step( opt_disp, problem, disp_model, density_model=density_model, disp_variables=disp_model.trainable_variables) save_model_configs(disp_model, density_model, save_path) def save_state(save_postfix, target_densities=None): save_model_weights(disp_model, density_model, save_path, save_postfix) problem.plot_densities(density_model, save_path, '', save_postfix) if target_densities is not None and problem.dim == 2: save_densities_to_file(np.reshape(target_densities[0], (n_opt_samples[1], n_opt_samples[0])), filename=os.path.join( save_path, 'density' + save_postfix + '-target0.png')) iteration = 0 saving = True save_postfix = f'-{iteration:06d}' run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations, sim_sample_generator=sim_sample_generator, saving=True, save_path=save_path, save_postfix=save_postfix) if saving: problem.plot_displacement(disp_model, save_path, '', save_postfix) save_state(save_postfix) for iteration in range(1, n_opt_iterations + 1): print('Optimization iteration ', iteration) saving = (iteration % save_interval == 0) save_postfix = f'-{iteration:06d}' run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations, sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix) if saving: problem.plot_displacement(disp_model, save_path, '', save_postfix) old_densities = [] sensitivities = [] sample_positions = [] for _ in range(n_opt_batches): input_samples = next(opt_sample_generator) old_di, sensitivities_i = compute_sensitivities( problem, disp_model, density_model, input_samples, use_oc, vol_penalty_strength=vol_penalty_strength, target_volume_ratio=target_volume_ratio) if filter == 'sensitivity': sensitivities_i = apply_sensitivity_filter( input_samples, old_di, sensitivities_i, n_samples=n_opt_samples, domain=problem.domain, dim=problem.dim, radius=filter_radius) else: assert filter in ('none', ), 'not supported filter' old_densities.append(old_di) sensitivities.append(sensitivities_i) sample_positions.append(input_samples) if use_oc: target_densities = compute_oc_multi_batch( old_densities=old_densities, sensitivities=sensitivities, sample_volume=problem.domain_volume, target_volume=problem.free_volume * target_volume_ratio, max_move=oc_config['max_move'], damping_parameter=oc_config['damping_parameter']) else: target_densities = compute_target_densities_gradient_descent( old_densities=old_densities, sensitivities=sensitivities) progress_bar = tqdm(range(n_opt_batches)) for i in progress_bar: loss, reconstruction_loss, reg_loss = optimize_densities_mse( opt_density, density_model, sample_positions[i], target_densities[i], density_variables) loss = loss.numpy().item() reconstruction_loss = reconstruction_loss.numpy().item() reg_loss = reg_loss.numpy().item() progress_bar.set_description(f'loss {loss} rec. loss {reconstruction_loss} reg loss {reg_loss}') progress_bar.refresh() if saving: save_state(save_postfix, target_densities) def train_mmse_space(problem, disp_model, density_model, opt_disp, opt_density, n_sim_samples, n_opt_samples, opt_sample_generator, vol_penalty_strength, target_volume_ratio, save_path, filter, filter_radius, use_oc, save_interval, n_opt_iterations, n_sim_iterations, n_opt_batches, n_q_samples, oc_config): density_variables = density_model.trainable_variables train_disp_step = get_train_disp_step( opt_disp, problem, disp_model, density_model=density_model, disp_variables=disp_model.trainable_variables) save_model_configs(disp_model, density_model, save_path) def save_state(save_postfix, target_densities=None): disp_model.save_weights(os.path.join( save_path, 'disp_model' + save_postfix)) density_model.save_weights(os.path.join( save_path, 'density_model' + save_postfix)) problem.plot_densities(density_model, save_path, '', save_postfix) iteration = 0 saving = True save_postfix = f'-{iteration:06d}' sim_sample_generator = get_single_random_q_sample_generator(problem.q_domain, problem.domain, n_sim_samples) run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=2*n_sim_iterations, sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix) if saving: qs = stratified_sampling(problem.q_domain, n_cells=[ n_q_samples], n_points_per_cell=1, dtype=np.float32).flatten() for q in qs: save_postfix_q = f'-{iteration:06d}-q={q:.6f}' print('q', q) problem.plot_displacement( disp_model, save_path, '', save_postfix_q, q=np.array([[q]])) save_state(save_postfix) for iteration in range(1, n_opt_iterations + 1): print('Optimization iteration ', iteration) saving = (iteration % save_interval == 0) print('saving', saving) target_samples_all_q = [] target_densities_all_q = [] qs = stratified_sampling(problem.q_domain, n_cells=[ n_q_samples], n_points_per_cell=1, dtype=np.float32).flatten() for q in qs: save_postfix_q = f'-{iteration:06d}-q={q:.6f}' if problem.volume_ratio_q_idx != -1: assert problem.volume_ratio_q_idx == 0 target_volume_ratio = q old_densities = [] sensitivities = [] sample_positions_with_q = [] sim_sample_generator = get_q_sample_generator( q, problem.domain, n_samples=n_sim_samples) run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations, sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix_q) if saving: problem.plot_displacement( disp_model, save_path, '', save_postfix_q, q=np.array([[q]])) for _ in range(n_opt_batches): input_samples = next(opt_sample_generator) q_vec = np.ones((np.prod(n_opt_samples), 1), dtype=np.float32) * q input_samples_with_q = np.concatenate( (input_samples, q_vec), axis=1) old_di, sensitivities_i = compute_sensitivities( problem, disp_model, density_model, input_samples_with_q, use_oc, vol_penalty_strength=vol_penalty_strength, target_volume_ratio=target_volume_ratio) if filter == 'sensitivity': sensitivities_i = apply_sensitivity_filter( input_samples, old_di, sensitivities_i, n_samples=n_opt_samples, domain=problem.domain, dim=problem.dim, radius=filter_radius) else: assert filter in ('none', ), 'not supported filter' old_densities.append(old_di) sensitivities.append(sensitivities_i) sample_positions_with_q.append(input_samples_with_q) if use_oc: target_densities = compute_oc_multi_batch( old_densities, sensitivities, sample_volume=problem.domain_volume, target_volume=problem.free_volume * target_volume_ratio, max_move=oc_config['max_move'], damping_parameter=oc_config['damping_parameter']) else: target_densities = compute_target_densities_gradient_descent( old_densities=old_densities, sensitivities=sensitivities) target_samples_all_q.append(sample_positions_with_q) target_densities_all_q.append(target_densities) n_batch = len(target_samples_all_q) * len(target_samples_all_q[0]) n_samples_total = n_batch * np.prod(n_opt_samples) target_samples_all_q = tf.reshape( target_samples_all_q, [n_samples_total, problem.dim + problem.q_dim]) target_densities_all_q = tf.reshape( target_densities_all_q, [n_samples_total, 1]) indices = np.arange(n_samples_total) np.random.shuffle(indices) n_per_batch = n_samples_total // n_batch progress_bar = tqdm(range(n_batch)) for i in progress_bar: batch_samples = tf.gather(target_samples_all_q, tf.constant( indices[i*n_per_batch:(i+1)*n_per_batch]), axis=0) batch_densities = tf.gather(target_densities_all_q, tf.constant( indices[i*n_per_batch:(i+1)*n_per_batch]), axis=0) loss, reconstruction_loss, reg_loss = optimize_densities_mse( opt_density, density_model, batch_samples, batch_densities, density_variables) loss = loss.numpy().item() reconstruction_loss = reconstruction_loss.numpy().item() reg_loss = reg_loss.numpy().item() progress_bar.set_description(f'loss {loss} rec. loss {reconstruction_loss} reg loss {reg_loss}') progress_bar.refresh() if saving: save_postfix = f'-{iteration:06d}' save_state(save_postfix, target_densities)
44.514493
169
0.675566
2,249
18,429
5.128946
0.08715
0.045774
0.028088
0.040052
0.778674
0.714348
0.673429
0.640659
0.619506
0.586909
0
0.004672
0.245103
18,429
413
170
44.622276
0.824468
0
0
0.603659
0
0.003049
0.036846
0.008845
0
0
0
0
0.009146
1
0.04878
false
0
0.033537
0
0.103659
0.015244
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
daa5d4cc65519f90d470b60fa1a16f721ffb4184
8,019
py
Python
appointment/views.py
aksheus/patient_appointment_system
d718f676e5b20197c8629e9eb9f9a47eb94b3ffe
[ "Apache-2.0" ]
null
null
null
appointment/views.py
aksheus/patient_appointment_system
d718f676e5b20197c8629e9eb9f9a47eb94b3ffe
[ "Apache-2.0" ]
null
null
null
appointment/views.py
aksheus/patient_appointment_system
d718f676e5b20197c8629e9eb9f9a47eb94b3ffe
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render,get_object_or_404 from django.http import HttpResponse,Http404,HttpResponseRedirect from appointment.models import Patient,Appointment from django.template import RequestContext,loader from django.core.urlresolvers import reverse from django.utils import timezone from datetime import datetime,timedelta import phonenumbers from django.core.mail import EmailMessage from django.views.generic import View # Create your views here. class Index(View): def get(self,request): dt=[] now=timezone.now() one_day=timedelta(days=1) two_day=timedelta(days=2) h=timedelta(hours=1) while now.hour!=int(9): #start time now=now-h s=timedelta(seconds=1) while now.second != int(0): now=now-s m=timedelta(minutes=1) while now.minute!=int(0): now=now-m m=timedelta(minutes=10) dt.append(now) won=now while won.hour!=int(13): #check 1 loop logic won=won+m dt.append(won) dt.pop() for x in xrange(len(dt)): won=dt[x]+one_day dt.append(won) won=dt[x]+two_day dt.append(won) #now dt filled with all possible appointments remove #one's already booked i.e in database a=Appointment.objects.all() a=[x.appointment_datetime for x in a] display_list=[str(x) for x in list(set(dt)-set(a))] #remove already booked appointments display_list.sort() for x in xrange(len(display_list)): bugfix=list(display_list[x]) bugfix=bugfix[:19] display_list[x]="".join([y for y in bugfix]) context={'display_list': display_list} return render(request,'appointment/index.html',context) class Form_handle(View): def post(self,request): """create patient object check wether it is already in db if it is don't store check wether the appointment is within 15 days of the previous one if it is 'review' else it is 'fresh'.Retrieve the particular patient object from db create the appointment object and point it to the patient if patient ain't in db store patient and create appointment pointing to that patient and store it""" F=request.POST try: pp=Patient.objects.get(patient_name=F['name'], patient_email=F['email'] ) try: app=pp.appointment except Appointment.DoesNotExist: pass comp=datetime.strptime(F['datetime'],'%Y-%m-%d %H:%M:%S') if comp.day-app.appointment_datetime.day <= 15: #review store_app=Appointment( appointment_datetime=comp, fresh_or_review=True, appointment_problem=F['problem']) store_app.save() pp.appointment=store_app pp.save() mail_to_doctor=EmailMessage('appointment for %s'%pp.patient_name, store_app.appointment_problem, to=['spvijayal@gmail.com'] ) mail_to_doctor.send() #returns 1 on success or SMTP standard errors mess='''Respected Sir/Madam, Your review appointment is scheduled on %s'''%F['datetime'] mail_to_patient=EmailMessage('clinic\'s name', mess, to=['%s'%pp.patient_email] ) mail_to_patient.send() else: store_app=Appointment( appointment_datetime=comp, appointment_problem=F['problem']) store_app.save() pp.appointment=store_app pp.save() mail_to_doctor=EmailMessage('appointment for %s'%pp.patient_name, store_app.appointment_problem, to=['spvijayal@gmail.com'] ) mail_to_doctor.send() mess='''Respected Sir/Madam, Your fresh appointment is scheduled on %s'''%F['datetime'] mail_to_patient=EmailMessage('clinic\'s name', mess, to=['%s'%pp.patient_email] ) mail_to_patient.send() return HttpResponseRedirect('results/') except Patient.DoesNotExist: try: z=phonenumbers.parse(F['phonenum'],"IN") except phonenumbers.NumberParseException: cont={'error_message': ' Invalid Phone Number '} return render(request,'appointment/index_l.html',cont) if int(F['age']) >= 120 or int(F['age']) < 1: con={'error_message': '%s is your age eh !! Nice try'%F['age']} return render(request,'appointment/index_l.html',con) if len(F['phonenum'][3:])!=10: cont={'error_message': ' Invalid Phone Number '} return render(request,'appointment/index_l.html',cont) try: u=(int(x) for x in F['phonenum'][1:]) for uu in u: uu=type(uu) except ValueError: cont={'error_message': ' Invalid Phone Number '} return render(request,'appointment/index_l.html',cont) if not phonenumbers.is_possible_number(z): cont={'error_message': ' Invalid Phone Number '} return render(request,'appointment/index_l.html',cont) if not phonenumbers.is_valid_number: cont={'error_message': ' Invalid Phone Number '} return render(request,'appointment/index_l.html',cont) email_doms=['aol.com','comcast.net','facebook.com', 'gmail.com', 'hotmail.com','msn.com' 'outlook.com','yahoo.com','yahoo.co.in' ] if str(F['email']).split('@')[0] == '': err_mail={'error_message':' Invalid email address '} return render(request,'appointment/index_l.html',err_mail) if F['email'].split('@')[1] not in email_doms : err_mail={'error_message':' No support for email by %s'%F['email'].split('@')[1]} return render(request,'appointment/index_l.html',err_mail) comp=datetime.strptime(F['datetime'],'%Y-%m-%d %H:%M:%S') store_app=Appointment( appointment_datetime=comp, appointment_problem=F['problem']) store_app.save() p=Patient(appointment=store_app, patient_name=F['name'], patient_age=int(F['age']), patient_sex=F['sex'], patient_email=F['email'], patient_phone=F['phonenum']) p.save() mail_to_doctor=EmailMessage('appointment for %s'%p.patient_name, store_app.appointment_problem, to=['spvijayal@gmail.com'] ) mail_to_doctor.send() mess='''Respected Sir/Madam, We are glad to offer our services,Kindly visit the clinic on %s'''%F['datetime'] mail_to_patient=EmailMessage('clinic\'s name', mess, to=['%s'%p.patient_email] ) mail_to_patient.send() return HttpResponseRedirect('results/') class Results(View): def get(self,request): return render(request,'appointment/index_l.html')
41.765625
120
0.529118
886
8,019
4.671558
0.23702
0.023194
0.045905
0.072481
0.458806
0.414835
0.404687
0.385359
0.37497
0.329307
0
0.007259
0.364385
8,019
192
121
41.765625
0.804787
0.076069
0
0.407407
0
0.006173
0.180766
0.032421
0
0
0
0
0
1
0.018519
false
0.006173
0.061728
0.006173
0.17284
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daa650264ca8e83a7c86604ab68eff2fe2f3357c
357
py
Python
casepro/msgs/migrations/0054_auto_20161201_1454.py
rapidpro/ureport-partners
16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd
[ "BSD-3-Clause" ]
21
2015-07-21T15:57:49.000Z
2021-11-04T18:26:35.000Z
casepro/msgs/migrations/0054_auto_20161201_1454.py
rapidpro/ureport-partners
16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd
[ "BSD-3-Clause" ]
357
2015-05-22T07:26:45.000Z
2022-03-12T01:08:28.000Z
casepro/msgs/migrations/0054_auto_20161201_1454.py
rapidpro/ureport-partners
16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd
[ "BSD-3-Clause" ]
24
2015-05-28T12:30:25.000Z
2021-11-19T01:57:38.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.9.10 on 2016-12-01 14:54 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("msgs", "0053_faq")] operations = [migrations.AlterField(model_name="outgoing", name="text", field=models.TextField(max_length=800))]
27.461538
116
0.731092
48
357
5.270833
0.833333
0
0
0
0
0
0
0
0
0
0
0.07717
0.128852
357
12
117
29.75
0.736334
0.190476
0
0
1
0
0.083916
0
0
0
0
0
0
1
0
false
0
0.4
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
daa71efd1db40a31c64d874f50729bf5397d345d
1,906
py
Python
tests/tableTests.py
AlexImmer/WikitablesToTriples
5d86489483ca32d19fc11c48707b671fb9dbfcb1
[ "MIT" ]
3
2015-09-10T15:18:53.000Z
2016-06-18T14:12:36.000Z
tests/tableTests.py
AlexImmer/WikitablesToTriples
5d86489483ca32d19fc11c48707b671fb9dbfcb1
[ "MIT" ]
2
2020-05-28T11:08:30.000Z
2021-03-31T18:40:30.000Z
tests/tableTests.py
AlexImmer/WikitablesToTriples
5d86489483ca32d19fc11c48707b671fb9dbfcb1
[ "MIT" ]
1
2020-01-29T09:20:11.000Z
2020-01-29T09:20:11.000Z
import unittest from wikitables.table import Table from testClasses import TestPage with open ("tests/data/List_of_national_parks_of_the_United_States.html", "r") as html_page: text = html_page.read().replace('\n', '') class TestTableInit(unittest.TestCase): def setUp(self): self.page = TestPage(text) def test_table_init(self): tables = self.page.tables self.assertEqual(len(tables), 1) class TestTableMethods(unittest.TestCase): def setUp(self): self.page = TestPage(text) self.table = self.page.tables[0] def test_repr(self): result = "Table in section 'National Parks'" self.assertEqual(result, self.table.__repr__()) def test_section(self): article = 'National Parks' self.assertEqual(article, self.table._section()) def test_key_extraction(self): # key is 'name' on first position self.assertEqual(0, self.table.key) def test_is_key(self): self.assertTrue(self.table.is_key(0)) self.assertTrue(self.table.is_key('Name')) self.assertFalse(self.table.is_key(1)) self.assertFalse(self.table.is_key('Photo')) def test_get_item(self): self.assertTrue('http://dbpedia.org/resource/Acadia_National_Park' in self.table.__getitem__(0)) self.assertTrue('http://dbpedia.org/resource/Acadia_National_Park' in self.table.__getitem__('Name')) def test_skip(self): self.assertFalse(self.table.skip()) def test_name_match(self): values = ['located_in', 'Location'] match = self.table.name_match(values[0], values[1]) id_match = self.table.name_match(values[0], 2) self.assertEqual(match, id_match) self.assertTrue(match <= 1.0 and match >= 0.0) def test_str_col_name(self): self.assertEqual(self.table._str_column_name(0), self.table._str_column_name('Name'))
32.862069
109
0.675236
258
1,906
4.767442
0.290698
0.109756
0.035772
0.045528
0.372358
0.336585
0.243902
0.195122
0.195122
0.117073
0
0.009785
0.195698
1,906
57
110
33.438596
0.792564
0.016264
0
0.097561
0
0
0.128137
0.0315
0
0
0
0
0.341463
1
0.268293
false
0
0.073171
0
0.390244
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
daa7627c934e5e470f68c42789921d00362402ef
2,395
py
Python
terraform-provider-hpam/mock-hpam-server/fake_hpam.py
f-guichard/terraform-provider-examples
cbef217eb82df1f1c8798af2eebc065f2357b1a7
[ "Apache-2.0" ]
null
null
null
terraform-provider-hpam/mock-hpam-server/fake_hpam.py
f-guichard/terraform-provider-examples
cbef217eb82df1f1c8798af2eebc065f2357b1a7
[ "Apache-2.0" ]
null
null
null
terraform-provider-hpam/mock-hpam-server/fake_hpam.py
f-guichard/terraform-provider-examples
cbef217eb82df1f1c8798af2eebc065f2357b1a7
[ "Apache-2.0" ]
null
null
null
# -*- coding: UTF-8 -*- #Othello.java style : single file program import os import time from flask import Flask from flask import jsonify from flask import request # Global variables section _CREATE_DELAY = 2 PORT = 30026 # Affectation port a updater pour CloudFoundry CONTROLLER_VERSION = "v1" _CONTROLLER_NAME = "Asset Mgmt Controller" _26E_URL = "/"+CONTROLLER_VERSION+"/26e" _26E_ID = "/"+CONTROLLER_VERSION+"/26e/<id>" _HELPER_RESPONSE = { _CONTROLLER_NAME: CONTROLLER_VERSION, "GET "+_26E_URL : { "method": "GET", "parameters": "", "code retour": "200" }, "GET "+_26E_ID : { "method": "GET", "parameters": "un identifiant de vips", "code retour": "200" }, "POST "+_26E_URL : { "method": "POST", "parameters": "json body like {}", "code retour": "201" }, "PATCH "+_26E_ID : { "method": "PATCH", "parameters": "json body like : {vipid : 'DESCRIPTION':'DESCRIPTION'}", "code retour": "200" }, "DELETE "+_26E_ID : { "method": "DELETE", "parameters": "un identifiant de vip", "code retour": "200" } } ramDic = {} app = Flask(__name__) @app.route('/') def index(): return 'WORKING' @app.route('/help') def help(): return jsonify(_HELPER_RESPONSE) @app.route(_26E_URL, methods=['GET']) def list_assets(): #PEP 448 response = jsonify(*ramDic) response.status_code = 200 return response @app.route(_26E_ID, methods=['GET']) def list_asset(id): response = jsonify(ramDic.get(id)) response.status_code = 200 return response @app.route(_26E_URL, methods=['POST']) def create_assets(): body = request.get_json(force=True) ramDic[str(len(ramDic))] = body response = jsonify({'id':str(len(ramDic)-1)},{"obj":ramDic.get(str(len(ramDic)-1))}) response.status_code = 201 time.sleep(_CREATE_DELAY) return response @app.route(_26E_ID, methods=['PATCH']) def patch_assets(): response = jsonify('NOT IMPLEMENTED YET') response.status_code = 200 return response @app.route(_26E_ID, methods=['DELETE']) def delete_assets(id): response = jsonify(ramDic.pop(id)) response.status_code = 200 return response app.debug = True app.run(host='0.0.0.0', port=PORT)
24.947917
89
0.604175
283
2,395
4.918728
0.314488
0.028736
0.057471
0.068247
0.194684
0.194684
0.166667
0.142241
0.112069
0.079023
0
0.041505
0.245511
2,395
95
90
25.210526
0.728832
0.05762
0
0.194805
0
0
0.192022
0.012987
0
0
0
0
0
1
0.090909
false
0
0.064935
0.025974
0.246753
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daaa7152b73fa0a13aa5df80e0a7b9ce4f28141b
1,489
py
Python
setup.py
OpenMindInnovation/quetzal
3940dfe8e3d2a1060ec89ba4e575365563042bf9
[ "BSD-3-Clause" ]
null
null
null
setup.py
OpenMindInnovation/quetzal
3940dfe8e3d2a1060ec89ba4e575365563042bf9
[ "BSD-3-Clause" ]
null
null
null
setup.py
OpenMindInnovation/quetzal
3940dfe8e3d2a1060ec89ba4e575365563042bf9
[ "BSD-3-Clause" ]
null
null
null
# http://flask.pocoo.org/docs/1.0/patterns/packages/ from setuptools import setup, find_packages import versioneer authors = [ ('David Ojeda', 'david@dojeda.com'), ] author_names = ', '.join(tup[0] for tup in authors) author_emails = ', '.join(tup[1] for tup in authors) setup( name='quetzal', packages=find_packages(exclude=['docs', 'migrations', 'tests']), namespace_packages=['quetzal'], include_package_data=True, python_requires='>=3.6, ~=3.7', install_requires=[ 'Flask', 'werkzeug', 'Flask-Login', 'Flask-Principal', 'connexion', 'celery', 'kombu', 'Flask-Celery-Helper', 'SQLAlchemy', 'Flask-SQLAlchemy', 'Flask-Migrate', 'alembic', 'psycopg2-binary', 'sqlparse', 'requests', 'Click', 'syslog-rfc5424-formatter', 'apscheduler', 'gunicorn', 'google-cloud-storage', ], author=author_names, author_email=author_emails, classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: Flask', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Database', 'Topic :: Scientific/Engineering', 'Topic :: System :: Archiving', ], version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), zip_safe=False, )
25.672414
68
0.577569
144
1,489
5.868056
0.659722
0.028402
0.018935
0.035503
0
0
0
0
0
0
0
0.013774
0.268637
1,489
57
69
26.122807
0.762167
0.03358
0
0.039216
0
0
0.374391
0.032011
0
0
0
0
0
1
0
false
0
0.039216
0
0.039216
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daad3d4644a92884bdcd19bcc887b15d6229fc69
849
py
Python
mundo3-EstruturasCompostas/102-funcaoFatorial.py
jonasht/CursoEmVideo-CursoDePython3
a1bbf1fe4226b1828213742ee5a440278d903fd1
[ "MIT" ]
null
null
null
mundo3-EstruturasCompostas/102-funcaoFatorial.py
jonasht/CursoEmVideo-CursoDePython3
a1bbf1fe4226b1828213742ee5a440278d903fd1
[ "MIT" ]
null
null
null
mundo3-EstruturasCompostas/102-funcaoFatorial.py
jonasht/CursoEmVideo-CursoDePython3
a1bbf1fe4226b1828213742ee5a440278d903fd1
[ "MIT" ]
null
null
null
#Exercício Python 102: # Crie um programa que tenha uma função fatorial() # que receba dois parâmetros: # o primeiro que indique o número a calcular e # outro chamado show, que será um valor lógico (opcional) # indicando se será mostrado ou não na tela o processo de # cálculo do fatorial. def fatorial(n, show=False): ''' -> calcula um fatorial de um numero :para n: o numero para ser calculado :para show: (opcional) mostrar ou não aconta (False/True) :return: o valor do fatorial 'numero' ''' fatorial = [i for i in range(1, 1+n)] soma = 1 resposta = '' for i in fatorial: soma *= i if show: for i in fatorial: resposta += f'{i} X ' return f'{resposta[:-2] }= {soma}' if resposta else soma print(fatorial(5, True)) print(fatorial(9)) help(fatorial)
28.3
61
0.632509
126
849
4.261905
0.531746
0.022346
0.03352
0.052142
0
0
0
0
0
0
0
0.014469
0.267373
849
29
62
29.275862
0.848875
0.526502
0
0.153846
0
0
0.081522
0
0
0
0
0
0
1
0.076923
false
0
0
0
0.153846
0.153846
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daadc2cf5cec9fff6add080dc7c4dc6375da7cf0
3,732
py
Python
ergasia4tel.py
nantinkpsl/Ergasies.python
160c8367a87903f6b444323f117351822bd62aa3
[ "CC0-1.0" ]
null
null
null
ergasia4tel.py
nantinkpsl/Ergasies.python
160c8367a87903f6b444323f117351822bd62aa3
[ "CC0-1.0" ]
null
null
null
ergasia4tel.py
nantinkpsl/Ergasies.python
160c8367a87903f6b444323f117351822bd62aa3
[ "CC0-1.0" ]
null
null
null
f = open("two_cities_ascii.txt", "r")#Άνοιγμα αρχείου για διάβασμα text=f.read()#Εκχώρηση του περιεχόμενου του αρχείου στηνν μεταβλητή text ως string f.close()#Κλείσιμο του αρχείου L = []#Δημιουργία κενής λίστας με σκοπό να βάλουμε ΄όλες τις λέξεις χωρίς τα σύμβολα """Καθάρισμα του κειμένου από όλους τους χαρακτήρες που δεν είναι γράμματα""" text=text.replace(',',' ') text=text.replace('.',' ') text=text.replace('!',' ') text=text.replace('"',' ') text=text.replace('#',' ') text=text.replace('$',' ') text=text.replace('%',' ') text=text.replace('^',' ') text=text.replace('&',' ') text=text.replace('(',' ') text=text.replace(')',' ') text=text.replace('*',' ') text=text.replace('+',' ') text=text.replace('-',' ') text=text.replace('/',' ') text=text.replace('0',' ') text=text.replace('1',' ') text=text.replace('2',' ') text=text.replace('3',' ') text=text.replace('4',' ') text=text.replace('5',' ') text=text.replace('6',' ') text=text.replace('7',' ') text=text.replace('8',' ') text=text.replace('9',' ') text=text.replace(':',' ') text=text.replace(';',' ') text=text.replace('<',' ') text=text.replace('=',' ') text=text.replace('>',' ') text=text.replace('?',' ') text=text.replace('@',' ') text=text.replace('[',' ') text=text.replace('\ ',' ') text=text.replace(']',' ') text=text.replace('_',' ') text=text.replace('}',' ') text=text.replace('|',' ') text=text.replace('{',' ') text=text.replace('~',' ') text=text.replace('`',' ') L = text.split()#Διαχωρισμός των λέξεων με βάση τον κενό χαρακτήρα και εκχώρηση αυτών στην λίστα #Έλεγχος και εμφάνιση των ζευγαριών των λέξεων της λίστας των οποίων το μήκος χαρακτήρων είναι στο σύνολο ακριβώς 20 i=0 while(i<len(L)-1): removeitems=L[i] j=i+1 while(j<len(L)-2): n=len(L[i]+L[j]) if n==20: print('Ζευγάρι λέξεων: ',L[i],'-',L[j]) if(i<j): L.pop(i) L.pop(j+1) else: L.pop(i) L.pop(j) j=j+1 i=i+1 #Δημιουργία λιστών για την τοποθέτηση των λέξεων που απέμειναν με βάση το μήκος τους ls1=[] ls2=[] ls3=[] ls4=[] ls5=[] ls6=[] ls7=[] ls8=[] ls9=[] ls10=[] ls11=[] ls12=[] ls13=[] ls14=[] ls15=[] ls16=[] ls17=[] ls18=[] ls19=[] for i in range(len(L)): if len(L[i])==1: ls1.append(L[i]) elif len(L[i])==2: ls2.append(L[i]) elif len(L[i])==3: ls3.append(L[i]) elif len(L[i])==4: ls4.append(L[i]) elif len(L[i])==5: ls5.append(L[i]) elif len(L[i])==6: ls6.append(L[i]) elif len(L[i])==7: ls7.append(L[i]) elif len(L[i])==8: ls8.append(L[i]) elif len(L[i])==9: ls9.append(L[i]) elif len(L[i])==10: ls10.append(L[i]) elif len(L[i])==11: ls11.append(L[i]) elif len(L[i])==12: ls12.append(L[i]) elif len(L[i])==13: ls13.append(L[i]) elif len(L[i])==14: ls14.append(L[i]) elif len(L[i])==15: ls15.append(L[i]) elif len(L[i])==16: ls16.append(L[i]) elif len(L[i])==17: ls17.append(L[i]) elif len(L[i])==18: ls18.append(L[i]) elif len(L[i])==19: ls19.append(L[i]) #Εμφάνιση των ζευγαριών που απέμειναν στην λίστα με βάση το μήκος τους print("*******************Λέξεις που απομένουν*****************") print(" 1",ls1,"\n","2",ls2,"\n","3",ls3,"\n","4",ls4,"\n","5",ls5,"\n","6",ls6,"\n","7",ls7,"\n","8",ls8,"\n","9",ls9,"\n","10",ls10,"\n","11",ls11,"\n","12",ls12,"\n","13",ls13,"\n","14",ls14,"\n","15",ls15,"\n","16",ls16,"\n","17",ls17,"\n","18",ls18,"\n","19",ls19)
27.850746
270
0.521704
563
3,732
3.454707
0.234458
0.168638
0.316195
0.293059
0.438046
0.414396
0.404113
0.246787
0.246787
0.246787
0
0.056636
0.21463
3,732
133
271
28.06015
0.606619
0.144427
0
0.016393
0
0
0.081681
0.017143
0
0
0
0
0
1
0
false
0
0
0
0
0.02459
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
daae9df32d246183515db8e74d555c8793c010e1
488
py
Python
shop/migrations/0014_auto_20200120_2035.py
rafkolacz/PodajLape
be7358ffa6c6e5e6f6fe4bc7ada46f0d7df055d6
[ "PostgreSQL" ]
null
null
null
shop/migrations/0014_auto_20200120_2035.py
rafkolacz/PodajLape
be7358ffa6c6e5e6f6fe4bc7ada46f0d7df055d6
[ "PostgreSQL" ]
null
null
null
shop/migrations/0014_auto_20200120_2035.py
rafkolacz/PodajLape
be7358ffa6c6e5e6f6fe4bc7ada46f0d7df055d6
[ "PostgreSQL" ]
null
null
null
# Generated by Django 3.0.1 on 2020-01-20 19:35 from django.db import migrations, models import shop.models class Migration(migrations.Migration): dependencies = [ ('shop', '0013_auto_20200120_1949'), ] operations = [ migrations.AlterField( model_name='product', name='image', field=models.ImageField(blank=True, default='http://placehold.it/700x400', null=True, upload_to=shop.models.get_image_path), ), ]
24.4
136
0.639344
58
488
5.258621
0.775862
0.065574
0
0
0
0
0
0
0
0
0
0.099462
0.237705
488
19
137
25.684211
0.72043
0.092213
0
0
1
0
0.14966
0.052154
0
0
0
0
0
1
0
false
0
0.153846
0
0.384615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
daaeb5f227baa564bbc4e4ee7a2ab88b1d145fb1
416
py
Python
users/migrations/0007_alter_starred_concerts_id.py
MorganRoberts00/MyConcerts
650185a8421d52d052763fbf03ec046583863632
[ "MIT" ]
1
2022-01-12T23:08:39.000Z
2022-01-12T23:08:39.000Z
users/migrations/0007_alter_starred_concerts_id.py
Zachhi/MyConcerts
650185a8421d52d052763fbf03ec046583863632
[ "MIT" ]
null
null
null
users/migrations/0007_alter_starred_concerts_id.py
Zachhi/MyConcerts
650185a8421d52d052763fbf03ec046583863632
[ "MIT" ]
null
null
null
# Generated by Django 3.2.9 on 2021-12-01 16:33 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0006_auto_20211117_2144'), ] operations = [ migrations.AlterField( model_name='starred_concerts', name='id', field=models.BigIntegerField(primary_key=True, serialize=False), ), ]
21.894737
76
0.620192
45
416
5.6
0.866667
0
0
0
0
0
0
0
0
0
0
0.101974
0.269231
416
18
77
23.111111
0.726974
0.108173
0
0
1
0
0.124661
0.062331
0
0
0
0
0
1
0
false
0
0.083333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
daaeeb623720fda94eff96a09b813f98fd47e55e
4,048
py
Python
facebook_data_analysis/conversation_analysis/friends_network.py
Anogio/facebook_data_analysis
a43a64bf4892e8bb705a96805912f726f194f319
[ "MIT" ]
null
null
null
facebook_data_analysis/conversation_analysis/friends_network.py
Anogio/facebook_data_analysis
a43a64bf4892e8bb705a96805912f726f194f319
[ "MIT" ]
null
null
null
facebook_data_analysis/conversation_analysis/friends_network.py
Anogio/facebook_data_analysis
a43a64bf4892e8bb705a96805912f726f194f319
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt import networkx as nx import pandas as pd from facebook_data_analysis.conversation_analysis.activity_graphs import save_graph from facebook_data_analysis.global_vars import messages_cols from facebook_data_analysis.tools.helpers import cached from sklearn.manifold import MDS from tqdm import tqdm @cached("people_distance") def get_people_distances( messages_df, conversations_df, min_messages_to_appear=10 ): # pylint: disable=too-many-locals group_conversations = conversations_df.loc[conversations_df["n_participants"] > 2][ messages_cols.conv_id ] messages_in_group_conversations = messages_df[ messages_df[messages_cols.conv_id].isin(group_conversations.values) ] friends_in_group_conversations = messages_in_group_conversations.groupby( messages_cols.sender )[messages_cols.timestamp].count() selected_friends = friends_in_group_conversations[ friends_in_group_conversations > min_messages_to_appear ].index.values messages_in_group_conversations = messages_in_group_conversations[ messages_in_group_conversations[messages_cols.sender].isin(selected_friends) ] participants_list_by_conversation = messages_in_group_conversations.groupby( messages_cols.conv_id )[messages_cols.sender].unique() messages_by_person_by_conversation = messages_in_group_conversations.groupby( [messages_cols.conv_id, messages_cols.sender] )[messages_cols.timestamp].count() total_messages_by_person = messages_in_group_conversations.groupby( [messages_cols.sender] )[messages_cols.timestamp].count() print("Counting common conversation messages for all friends...") co_occurrence = pd.DataFrame(0, index=selected_friends, columns=selected_friends) for conversation in tqdm(participants_list_by_conversation.index): participants = participants_list_by_conversation[conversation] messages_by_person = messages_by_person_by_conversation[conversation] for participant1 in range( # pylint: disable=consider-using-enumerate # TODO len(participants) ): for participant2 in range(participant1, len(participants)): exchanged_messages = ( messages_by_person[participants[participant1]] + messages_by_person[participants[participant2]] ) co_occurrence.loc[ participants[participant1], participants[participant2] ] += exchanged_messages if participant1 != participant2: co_occurrence.loc[ participants[participant2], participants[participant1] ] += exchanged_messages print() print("Adjusting for total number of messages...") with tqdm(total=len(selected_friends) ** 2) as pbar: for friend1 in selected_friends: for friend2 in selected_friends: co_occurrence.loc[friend1, friend2] = co_occurrence.loc[ friend1, friend2 ] / ( total_messages_by_person[friend1] + total_messages_by_person[friend2] ) pbar.update() distance = 1 - co_occurrence return distance @cached("projection_coordinates") def get_projection_coordinates(distance): mds = MDS(n_components=2, verbose=1, n_jobs=-1, dissimilarity="precomputed") coordinates = mds.fit_transform(distance.values) return pd.DataFrame(coordinates, index=distance.index) @save_graph("friends_graph") def friends_plot(coordinates): _, ax = plt.subplots() coordinates.plot(0, 1, kind="scatter", ax=ax) for k, v in coordinates.iterrows(): ax.annotate(k, v) return ax @save_graph("friends_network") def friends_network(distance, threshold): graph = nx.convert_matrix.from_pandas_adjacency(distance < threshold) _, ax = plt.subplots() nx.draw(graph, ax=ax) return ax
38.552381
87
0.704298
447
4,048
6.058166
0.270694
0.057607
0.081241
0.082718
0.259232
0.172083
0.172083
0.155096
0.155096
0.121123
0
0.00947
0.217391
4,048
104
88
38.923077
0.845328
0.019269
0
0.170455
0
0
0.048916
0.005547
0
0
0
0.009615
0
1
0.045455
false
0
0.090909
0
0.181818
0.034091
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daaf9ad2bbd982879bb0f1d6c500eb2886d1827f
18,405
py
Python
sdk/python/pulumi_buildkite/team.py
grapl-security/pulumi-buildkite
f801ecb661d82da6b939b13f5520038e3b6e891f
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_buildkite/team.py
grapl-security/pulumi-buildkite
f801ecb661d82da6b939b13f5520038e3b6e891f
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_buildkite/team.py
grapl-security/pulumi-buildkite
f801ecb661d82da6b939b13f5520038e3b6e891f
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['TeamArgs', 'Team'] @pulumi.input_type class TeamArgs: def __init__(__self__, *, default_member_role: pulumi.Input[str], default_team: pulumi.Input[bool], privacy: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, members_can_create_pipelines: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Team resource. :param pulumi.Input[str] default_member_role: Default role to assign to a team member. :param pulumi.Input[bool] default_team: Whether to assign this team to a user by default. :param pulumi.Input[str] privacy: The privacy level to set the team too. :param pulumi.Input[str] description: The description to assign to the team. :param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create. :param pulumi.Input[str] name: The name of the team. """ pulumi.set(__self__, "default_member_role", default_member_role) pulumi.set(__self__, "default_team", default_team) pulumi.set(__self__, "privacy", privacy) if description is not None: pulumi.set(__self__, "description", description) if members_can_create_pipelines is not None: pulumi.set(__self__, "members_can_create_pipelines", members_can_create_pipelines) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="defaultMemberRole") def default_member_role(self) -> pulumi.Input[str]: """ Default role to assign to a team member. """ return pulumi.get(self, "default_member_role") @default_member_role.setter def default_member_role(self, value: pulumi.Input[str]): pulumi.set(self, "default_member_role", value) @property @pulumi.getter(name="defaultTeam") def default_team(self) -> pulumi.Input[bool]: """ Whether to assign this team to a user by default. """ return pulumi.get(self, "default_team") @default_team.setter def default_team(self, value: pulumi.Input[bool]): pulumi.set(self, "default_team", value) @property @pulumi.getter def privacy(self) -> pulumi.Input[str]: """ The privacy level to set the team too. """ return pulumi.get(self, "privacy") @privacy.setter def privacy(self, value: pulumi.Input[str]): pulumi.set(self, "privacy", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ The description to assign to the team. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="membersCanCreatePipelines") def members_can_create_pipelines(self) -> Optional[pulumi.Input[bool]]: """ Whether team members can create. """ return pulumi.get(self, "members_can_create_pipelines") @members_can_create_pipelines.setter def members_can_create_pipelines(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "members_can_create_pipelines", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the team. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class _TeamState: def __init__(__self__, *, default_member_role: Optional[pulumi.Input[str]] = None, default_team: Optional[pulumi.Input[bool]] = None, description: Optional[pulumi.Input[str]] = None, members_can_create_pipelines: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, privacy: Optional[pulumi.Input[str]] = None, slug: Optional[pulumi.Input[str]] = None, uuid: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Team resources. :param pulumi.Input[str] default_member_role: Default role to assign to a team member. :param pulumi.Input[bool] default_team: Whether to assign this team to a user by default. :param pulumi.Input[str] description: The description to assign to the team. :param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create. :param pulumi.Input[str] name: The name of the team. :param pulumi.Input[str] privacy: The privacy level to set the team too. :param pulumi.Input[str] slug: The name of the team. :param pulumi.Input[str] uuid: The UUID for the team. """ if default_member_role is not None: pulumi.set(__self__, "default_member_role", default_member_role) if default_team is not None: pulumi.set(__self__, "default_team", default_team) if description is not None: pulumi.set(__self__, "description", description) if members_can_create_pipelines is not None: pulumi.set(__self__, "members_can_create_pipelines", members_can_create_pipelines) if name is not None: pulumi.set(__self__, "name", name) if privacy is not None: pulumi.set(__self__, "privacy", privacy) if slug is not None: pulumi.set(__self__, "slug", slug) if uuid is not None: pulumi.set(__self__, "uuid", uuid) @property @pulumi.getter(name="defaultMemberRole") def default_member_role(self) -> Optional[pulumi.Input[str]]: """ Default role to assign to a team member. """ return pulumi.get(self, "default_member_role") @default_member_role.setter def default_member_role(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "default_member_role", value) @property @pulumi.getter(name="defaultTeam") def default_team(self) -> Optional[pulumi.Input[bool]]: """ Whether to assign this team to a user by default. """ return pulumi.get(self, "default_team") @default_team.setter def default_team(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "default_team", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ The description to assign to the team. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="membersCanCreatePipelines") def members_can_create_pipelines(self) -> Optional[pulumi.Input[bool]]: """ Whether team members can create. """ return pulumi.get(self, "members_can_create_pipelines") @members_can_create_pipelines.setter def members_can_create_pipelines(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "members_can_create_pipelines", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the team. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def privacy(self) -> Optional[pulumi.Input[str]]: """ The privacy level to set the team too. """ return pulumi.get(self, "privacy") @privacy.setter def privacy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "privacy", value) @property @pulumi.getter def slug(self) -> Optional[pulumi.Input[str]]: """ The name of the team. """ return pulumi.get(self, "slug") @slug.setter def slug(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "slug", value) @property @pulumi.getter def uuid(self) -> Optional[pulumi.Input[str]]: """ The UUID for the team. """ return pulumi.get(self, "uuid") @uuid.setter def uuid(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "uuid", value) class Team(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, default_member_role: Optional[pulumi.Input[str]] = None, default_team: Optional[pulumi.Input[bool]] = None, description: Optional[pulumi.Input[str]] = None, members_can_create_pipelines: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, privacy: Optional[pulumi.Input[str]] = None, __props__=None): """ ## # Resource: team This resource allows you to create and manage teams. Buildkite Documentation: https://buildkite.com/docs/pipelines/permissions Note: You must first enable Teams on your organization. ## Example Usage ```python import pulumi import pulumi_buildkite as buildkite team = buildkite.Team("team", default_member_role="MEMBER", default_team=True, privacy="VISIBLE") ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] default_member_role: Default role to assign to a team member. :param pulumi.Input[bool] default_team: Whether to assign this team to a user by default. :param pulumi.Input[str] description: The description to assign to the team. :param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create. :param pulumi.Input[str] name: The name of the team. :param pulumi.Input[str] privacy: The privacy level to set the team too. """ ... @overload def __init__(__self__, resource_name: str, args: TeamArgs, opts: Optional[pulumi.ResourceOptions] = None): """ ## # Resource: team This resource allows you to create and manage teams. Buildkite Documentation: https://buildkite.com/docs/pipelines/permissions Note: You must first enable Teams on your organization. ## Example Usage ```python import pulumi import pulumi_buildkite as buildkite team = buildkite.Team("team", default_member_role="MEMBER", default_team=True, privacy="VISIBLE") ``` :param str resource_name: The name of the resource. :param TeamArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(TeamArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, default_member_role: Optional[pulumi.Input[str]] = None, default_team: Optional[pulumi.Input[bool]] = None, description: Optional[pulumi.Input[str]] = None, members_can_create_pipelines: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, privacy: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.plugin_download_url is None: opts.plugin_download_url = _utilities.get_plugin_download_url() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = TeamArgs.__new__(TeamArgs) if default_member_role is None and not opts.urn: raise TypeError("Missing required property 'default_member_role'") __props__.__dict__["default_member_role"] = default_member_role if default_team is None and not opts.urn: raise TypeError("Missing required property 'default_team'") __props__.__dict__["default_team"] = default_team __props__.__dict__["description"] = description __props__.__dict__["members_can_create_pipelines"] = members_can_create_pipelines __props__.__dict__["name"] = name if privacy is None and not opts.urn: raise TypeError("Missing required property 'privacy'") __props__.__dict__["privacy"] = privacy __props__.__dict__["slug"] = None __props__.__dict__["uuid"] = None super(Team, __self__).__init__( 'buildkite:index/team:Team', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, default_member_role: Optional[pulumi.Input[str]] = None, default_team: Optional[pulumi.Input[bool]] = None, description: Optional[pulumi.Input[str]] = None, members_can_create_pipelines: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, privacy: Optional[pulumi.Input[str]] = None, slug: Optional[pulumi.Input[str]] = None, uuid: Optional[pulumi.Input[str]] = None) -> 'Team': """ Get an existing Team resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] default_member_role: Default role to assign to a team member. :param pulumi.Input[bool] default_team: Whether to assign this team to a user by default. :param pulumi.Input[str] description: The description to assign to the team. :param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create. :param pulumi.Input[str] name: The name of the team. :param pulumi.Input[str] privacy: The privacy level to set the team too. :param pulumi.Input[str] slug: The name of the team. :param pulumi.Input[str] uuid: The UUID for the team. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _TeamState.__new__(_TeamState) __props__.__dict__["default_member_role"] = default_member_role __props__.__dict__["default_team"] = default_team __props__.__dict__["description"] = description __props__.__dict__["members_can_create_pipelines"] = members_can_create_pipelines __props__.__dict__["name"] = name __props__.__dict__["privacy"] = privacy __props__.__dict__["slug"] = slug __props__.__dict__["uuid"] = uuid return Team(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="defaultMemberRole") def default_member_role(self) -> pulumi.Output[str]: """ Default role to assign to a team member. """ return pulumi.get(self, "default_member_role") @property @pulumi.getter(name="defaultTeam") def default_team(self) -> pulumi.Output[bool]: """ Whether to assign this team to a user by default. """ return pulumi.get(self, "default_team") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ The description to assign to the team. """ return pulumi.get(self, "description") @property @pulumi.getter(name="membersCanCreatePipelines") def members_can_create_pipelines(self) -> pulumi.Output[Optional[bool]]: """ Whether team members can create. """ return pulumi.get(self, "members_can_create_pipelines") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the team. """ return pulumi.get(self, "name") @property @pulumi.getter def privacy(self) -> pulumi.Output[str]: """ The privacy level to set the team too. """ return pulumi.get(self, "privacy") @property @pulumi.getter def slug(self) -> pulumi.Output[str]: """ The name of the team. """ return pulumi.get(self, "slug") @property @pulumi.getter def uuid(self) -> pulumi.Output[str]: """ The UUID for the team. """ return pulumi.get(self, "uuid")
38.34375
134
0.629394
2,157
18,405
5.122856
0.077886
0.093575
0.08362
0.075656
0.844344
0.823891
0.792127
0.762986
0.753665
0.7381
0
0.000074
0.26721
18,405
479
135
38.4238
0.819233
0.249878
0
0.659259
1
0
0.098952
0.029386
0
0
0
0
0
1
0.159259
false
0.003704
0.018519
0
0.274074
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
dab0e6323421a3aea4c254b4a72550f557156f41
56
py
Python
myself.py
febycloud/PROG8420
147b4877a33221b93c9a74f90819c397179f7332
[ "MIT" ]
null
null
null
myself.py
febycloud/PROG8420
147b4877a33221b93c9a74f90819c397179f7332
[ "MIT" ]
null
null
null
myself.py
febycloud/PROG8420
147b4877a33221b93c9a74f90819c397179f7332
[ "MIT" ]
null
null
null
import sample print('what is your name') print(sample.d)
18.666667
26
0.767857
10
56
4.3
0.8
0
0
0
0
0
0
0
0
0
0
0
0.107143
56
3
27
18.666667
0.86
0
0
0
0
0
0.298246
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0.666667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
dab0f9b97d8498de478ba17e126e3ab6a3a5d932
3,739
py
Python
profiles_api/migrations/0002_category_course_image_instructor_review_video.py
MohamedAboubasha/profiles-rest-api
0e5ede6b4771396c9f3399d05be06c19b9ac1530
[ "MIT" ]
null
null
null
profiles_api/migrations/0002_category_course_image_instructor_review_video.py
MohamedAboubasha/profiles-rest-api
0e5ede6b4771396c9f3399d05be06c19b9ac1530
[ "MIT" ]
null
null
null
profiles_api/migrations/0002_category_course_image_instructor_review_video.py
MohamedAboubasha/profiles-rest-api
0e5ede6b4771396c9f3399d05be06c19b9ac1530
[ "MIT" ]
null
null
null
# Generated by Django 2.2 on 2022-01-08 21:51 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('profiles_api', '0001_initial'), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255)), ], ), migrations.CreateModel( name='Course', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255)), ('description', models.CharField(max_length=1023)), ('overview', models.CharField(max_length=1023)), ('price', models.IntegerField()), ('created_at', models.DateField(auto_now_add=True)), ('updated_at', models.DateField(auto_now=True)), ('language', models.CharField(max_length=255)), ('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='profiles_api.Category')), ], ), migrations.CreateModel( name='Video', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255)), ('description', models.CharField(max_length=1023)), ('video_path', models.FileField(upload_to='uploads/<django.db.models.fields.related.ForeignKey>/videos/')), ('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles_api.Course')), ], ), migrations.CreateModel( name='Review', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('review', models.CharField(max_length=511)), ('rating', models.IntegerField()), ('created_at', models.DateField(auto_now_add=True)), ('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles_api.Course')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Instructor', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255)), ('image_path', models.CharField(max_length=255)), ('description', models.CharField(max_length=1023)), ('rating', models.IntegerField()), ('courses', models.ManyToManyField(to='profiles_api.Course')), ], ), migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(choices=[('cover', 'cover'), ('overview', 'overview')], max_length=255)), ('image_path', models.FileField(upload_to='uploads/<django.db.models.fields.related.ForeignKey>/images/')), ('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles_api.Course')), ], ), ]
47.935897
133
0.581171
373
3,739
5.66756
0.219839
0.085147
0.093661
0.124882
0.732734
0.695364
0.683065
0.650426
0.650426
0.650426
0
0.02123
0.269323
3,739
77
134
48.558442
0.752562
0.0115
0
0.619718
1
0
0.140769
0.03817
0
0
0
0
0
1
0
false
0
0.042254
0
0.084507
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
dab15481ef318e82c0705f93a221be1485c98088
616
py
Python
src/backends/python/flask/core/repos.py
ferlikh/many-backends
933b94e13e368b9d73936e006f26d33aec7212b7
[ "MIT" ]
null
null
null
src/backends/python/flask/core/repos.py
ferlikh/many-backends
933b94e13e368b9d73936e006f26d33aec7212b7
[ "MIT" ]
null
null
null
src/backends/python/flask/core/repos.py
ferlikh/many-backends
933b94e13e368b9d73936e006f26d33aec7212b7
[ "MIT" ]
null
null
null
from core import ( config, db ) class Repo(): def __init__(self, table): self.table = table def create_one(self, model): return db.create_one(self.table, model) def update_one(self, model): return db.update_one(self.table, model) def delete_one(self, id): return db.delete_one(self.table, id) def find_one(self, where=None): return db.find_one(self.table, where=where) def find_many(self, where=None, limit=None): return db.find_many(self.table, where=where, limit=limit) for module in config.modules: globals()[module] = Repo(module)
30.8
65
0.660714
91
616
4.318681
0.307692
0.142494
0.122137
0.091603
0.203562
0
0
0
0
0
0
0
0.219156
616
20
66
30.8
0.817048
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.055556
0.277778
0.722222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
dab23516379326ead1d968570c74ed69eecbf055
2,863
py
Python
frictionless/settings.py
Grandient/frictionless-py
8c41b96f2047d573702fe7b34ff7ffbd32c3d815
[ "MIT" ]
247
2020-08-04T16:42:09.000Z
2022-03-30T11:54:54.000Z
frictionless/settings.py
Grandient/frictionless-py
8c41b96f2047d573702fe7b34ff7ffbd32c3d815
[ "MIT" ]
444
2020-07-29T09:13:59.000Z
2022-03-31T14:54:57.000Z
frictionless/settings.py
Grandient/frictionless-py
8c41b96f2047d573702fe7b34ff7ffbd32c3d815
[ "MIT" ]
60
2020-09-04T11:39:34.000Z
2022-03-23T18:59:51.000Z
import os import json import gzip import zipfile # Helpers def read_asset(*paths): dirname = os.path.dirname(__file__) with open(os.path.join(dirname, "assets", *paths)) as file: return file.read().strip() # General UNDEFINED = object() VERSION = read_asset("VERSION") COMPRESSION_FORMATS = ["zip", "gz"] INQUIRY_PROFILE = json.loads(read_asset("profiles", "inquiry.json")) PIPELINE_PROFILE = json.loads(read_asset("profiles", "pipeline.json")) REPORT_PROFILE = json.loads(read_asset("profiles", "report.json")) STATUS_PROFILE = json.loads(read_asset("profiles", "status.json")) SCHEMA_PROFILE = json.loads(read_asset("profiles", "schema", "general.json")) RESOURCE_PROFILE = json.loads(read_asset("profiles", "resource", "general.json")) TABULAR_RESOURCE_PROFILE = json.loads(read_asset("profiles", "resource", "tabular.json")) PACKAGE_PROFILE = json.loads(read_asset("profiles", "package", "general.json")) FISCAL_PACKAGE_PROFILE = json.loads(read_asset("profiles", "package", "fiscal.json")) TABULAR_PACKAGE_PROFILE = json.loads(read_asset("profiles", "package", "tabular.json")) GEOJSON_PROFILE = json.loads(read_asset("profiles", "geojson", "general.json")) TOPOJSON_PROFILE = json.loads(read_asset("profiles", "geojson", "topojson.json")) # Defaults DEFAULT_SCHEME = "file" DEFAULT_FORMAT = "csv" DEFAULT_HASHING = "md5" DEFAULT_ENCODING = "utf-8" DEFAULT_INNERPATH = "" DEFAULT_COMPRESSION = "" DEFAULT_HEADER = True DEFAULT_HEADER_ROWS = [1] DEFAULT_HEADER_JOIN = " " DEFAULT_HEADER_CASE = True DEFAULT_FLOAT_NUMBERS = False DEFAULT_MISSING_VALUES = [""] DEFAULT_LIMIT_ERRORS = 1000 DEFAULT_LIMIT_MEMORY = 1000 DEFAULT_BUFFER_SIZE = 10000 DEFAULT_SAMPLE_SIZE = 100 DEFAULT_ENCODING_CONFIDENCE = 0.5 DEFAULT_FIELD_CONFIDENCE = 0.9 DEFAULT_PACKAGE_PROFILE = "data-package" DEFAULT_RESOURCE_PROFILE = "data-resource" DEFAULT_TABULAR_RESOURCE_PROFILE = "tabular-data-resource" DEFAULT_TRUE_VALUES = ["true", "True", "TRUE", "1"] DEFAULT_FALSE_VALUES = ["false", "False", "FALSE", "0"] DEFAULT_DATETIME_PATTERN = "%Y-%m-%dT%H:%M:%S%z" DEFAULT_DATE_PATTERN = "%Y-%m-%d" DEFAULT_TIME_PATTERN = "%H:%M:%S%z" DEFAULT_BARE_NUMBER = True DEFAULT_FLOAT_NUMBER = False DEFAULT_GROUP_CHAR = "" DEFAULT_DECIMAL_CHAR = "." DEFAULT_SERVER_PORT = 8000 DEFAULT_CANDIDATES = [ {"type": "yearmonth"}, {"type": "geopoint"}, {"type": "duration"}, {"type": "geojson"}, {"type": "object"}, {"type": "array"}, {"type": "datetime"}, {"type": "time"}, {"type": "date"}, {"type": "integer"}, {"type": "number"}, {"type": "boolean"}, {"type": "year"}, {"type": "string"}, ] # Backports # It can be removed after dropping support for Python 3.6 and Python 3.7 COMPRESSION_EXCEPTIONS = ( (zipfile.BadZipFile, gzip.BadGzipFile) if hasattr(gzip, "BadGzipFile") else (zipfile.BadZipFile) )
29.515464
89
0.71219
358
2,863
5.438547
0.354749
0.064715
0.098613
0.123267
0.259887
0.248588
0.163842
0.122753
0
0
0
0.013142
0.122948
2,863
96
90
29.822917
0.762246
0.036675
0
0
0
0
0.21766
0.007631
0
0
0
0
0
1
0.013333
false
0
0.053333
0
0.08
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dab536d1d9d21df99c2db8bf403753c5070c87f7
4,747
py
Python
A_gen_literal_po.py
Dharma-Sagar/tx-template
93a3005445a21a249107e01c446c9237e886de8e
[ "Apache-2.0" ]
null
null
null
A_gen_literal_po.py
Dharma-Sagar/tx-template
93a3005445a21a249107e01c446c9237e886de8e
[ "Apache-2.0" ]
null
null
null
A_gen_literal_po.py
Dharma-Sagar/tx-template
93a3005445a21a249107e01c446c9237e886de8e
[ "Apache-2.0" ]
1
2022-03-25T15:25:00.000Z
2022-03-25T15:25:00.000Z
from pathlib import Path import re import sys from uuid import uuid4 import polib from antx import transfer from botok import Text class Po: def __init__(self): self.transfer = Transfer() self.file = polib.POFile() self.file.metadata = { 'MIME-Version': '1.0', 'Content-Type': 'text/plain; charset=utf-8', 'Content-Transfer-Encoding': '8bit', } def _create_entry(self, msgid, msgstr="", msgctxt=None, comment=None, tcomment=None): """ :param msgid: string, the entry msgid. :param msgstr: string, the entry msgstr. :param msgctxt: string, the entry context. :param comment: string, the entry comment. :param tcomment: string, the entry translator comment. """ entry = polib.POEntry( msgid=msgid, msgstr=msgstr, msgctxt=msgctxt, comment=comment, tcomment=tcomment ) self.file.append(entry) def write_to_file(self, filename): self.file.save(filename) def lines_to_entries(self, dump, po_file): lines = self.transfer.generate_entries(dump, po_file) for num, l in enumerate(lines): line, ctxt = l no_notes = self.remove_peydurma_notes(line) if no_notes == "": no_notes, line = line, no_notes no_notes = re.sub('\[.+?\]', '', no_notes) # segment t = Text(no_notes) no_notes = t.tokenize_words_raw_text # format tokens no_notes = re.sub('([^།་_]) ([^_།་])', '\g<1>␣\g<2>', no_notes) # affixed particles no_notes = re.sub('_', ' ', no_notes) # spaces self._create_entry(msgid=no_notes, msgctxt=ctxt, tcomment=line) def txt_to_po(self, filename): lines = filename.read_text(encoding='utf-8') outfile = filename.parent / (filename.stem + ".po") self.lines_to_entries(lines, outfile) self.write_to_file(outfile) @staticmethod def remove_pagination(line): note = re.split(r'(\[.*?\])', line) if len(note) > 1: return ''.join([a for a in note if not a.startswith('\[')]) else: return "" @staticmethod def remove_peydurma_notes(line): note = re.split(r'(<.*?>)', line) if len(note) > 1: return ''.join([a for a in note if not a.startswith('<')]).replace(':', '') else: return "" class Transfer: """ limitation : in case a line is split on two lines in the updated .txt, it will keep the same uuid on the second line and only add a new uuid on the first line. """ def __init__(self): self.transfer = transfer def generate_entries(self, dump, po_file): if po_file.is_file(): dump = self.extract_entries(dump, po_file) updated = self.add_missing_uuids(dump) entries = [] for line in updated.strip().split('\n'): line = line.strip() line = self.remove_extra_uuid(line) txt, ctxt = line[:-1].split('—') entries.append([txt, ctxt]) return entries def extract_entries(self, dump, po_file): po_file = polib.pofile(po_file) po_entries = [] for p in po_file: if p.tcomment: line = p.tcomment else: line = p.msgid.replace(' ', '').replace(' ', ' ') po_entries.append([line, p.msgctxt]) po_dump = '\n'.join([''.join((a, f'—{b}—')) for a, b in po_entries]) pattern = [['uuid', '(—.+?—)']] transfered = self.transfer(po_dump, pattern, dump, 'txt') return transfered def add_missing_uuids(self, dump): lines = dump.strip().split('\n') for num, l in enumerate(lines): l = l.strip() if not l.endswith('—'): lines[num] = l + f'—{self.get_unique_id()}—' return '\n'.join(lines) @staticmethod def remove_extra_uuid(line): if line.count('—') > 2: idx1 = line.find('—') idx2 = line.find('—', idx1+1) return (line[:idx1] + line[idx2+1:]).strip() else: return line def get_unique_id(self): return uuid4().hex if __name__ == '__main__': folder = 'literal/tibetan' if len(sys.argv) > 1: stem = sys.argv[1] file = Path(folder) / (stem + '.txt') print(file) po = Po() po.txt_to_po(file) else: files = sorted(list(Path(folder).glob('*.txt'))) for file in files: print(file) po = Po() po.txt_to_po(file)
31.026144
97
0.540552
593
4,747
4.202361
0.251265
0.036517
0.02809
0.020465
0.158507
0.133226
0.074639
0.074639
0.074639
0.053772
0
0.006834
0.321888
4,747
152
98
31.230263
0.762349
0.090162
0
0.188034
0
0
0.055896
0.011557
0
0
0
0
0
1
0.111111
false
0
0.059829
0.008547
0.273504
0.017094
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dab78841b63bd54072dd390557aad814bcd2c522
2,845
py
Python
cvm/tests/test_trec.py
CortexFoundation/tvm-cvm
d8941dc60a51dd27a6d2accc1eff2eced3b3640d
[ "Apache-2.0" ]
6
2019-07-04T09:42:53.000Z
2021-12-28T13:19:48.000Z
cvm/tests/test_trec.py
CortexFoundation/tvm-cvm
d8941dc60a51dd27a6d2accc1eff2eced3b3640d
[ "Apache-2.0" ]
4
2019-06-27T08:05:18.000Z
2021-09-09T18:59:11.000Z
cvm/tests/test_trec.py
CortexFoundation/tvm-cvm
d8941dc60a51dd27a6d2accc1eff2eced3b3640d
[ "Apache-2.0" ]
null
null
null
import gluon_zoo as gz import mxnet as mx from mxnet import ndarray as nd from mxnet import gluon import tvm from tvm.contrib import graph_runtime import nnvm import pickle import sym_pass as spass import dataset as ds import sym_calib as calib import sim_quant_helper as sim import ops_generator as opg import utils import mrt as _mrt def load_fname(suffix=None, with_ext=False): suffix = "."+suffix if suffix is not None else "" prefix = "./data/trec%s" % (suffix) return utils.extend_fname(prefix, with_ext=with_ext) batch_size = 16 ctx = mx.gpu() inputs_ext = { 'data': { 'shape': (38, batch_size) }} inputs = [mx.sym.var(n) for n in inputs_ext] utils.log_init() data_iter = ds.load_trec(batch_size) def data_iter_func(): return next(data_iter) data, label = data_iter_func() sym_file, param_file = load_fname() net1 = utils.load_model(sym_file, param_file, inputs, ctx=ctx) def trec(data): res = net1(data.as_in_context(ctx)) return res sym, params = mx.sym.load(sym_file), nd.load(param_file) sym, params = spass.sym_quant_prepare(sym, params, inputs_ext) if True: mrt = _mrt.MRT(sym, params, inputs_ext) mrt.set_data('data', data) mrt.calibrate(ctx=ctx) mrt.set_input_prec('data', 16) mrt.set_fixed('data') mrt.set_output_prec(8) qsym, qparams, inputs_ext = mrt.quantize() else: inputs_ext['data']['data'] = data th_dict = calib.sym_calibrate(sym, params, inputs_ext, ctx=ctx) qsym, qparams, _ = calib.pure_int8_quantize(sym, params, inputs_ext, th_dict) net2 = gluon.nn.SymbolBlock(qsym, inputs) utils.load_parameters(net2, qparams, ctx=ctx) def quantize(data): data = sim.load_real_data(data, 'data', inputs_ext) res = net2(data.as_in_context(ctx)) return res quant_sym, quant_params, quant_ext = load_fname("sym.quantize", with_ext=True) open(quant_sym, "w").write(qsym.tojson()) if False: inputs_ext['data']['shape'] = (38, 1) data = data[:, 0].reshape(38, 1) _mrt.std_dump(qsym, qparams, inputs_ext, data, "trec", batch=True, data_dtype="int32", max_num=1000, dump_ops=["sentimentnet0_embedding0_fwd"]) opg.dump_file("take", ["/data/std_out/trec/sentimentnet0_embedding0_fwd_0.mrt.dump.in.npy", "/data/std_out/trec/sentimentnet0_embedding0_fwd_1.mrt.dump.in.npy"], ["/data/std_out/trec/sentimentnet0_embedding0_fwd_0.mrt.dump.out.npy"], "/data/std_out/trec/sentimentnet0_embedding0_fwd.attr") exit() if False: while True: data, _ = next(data_iter) data = sim.load_real_data(data, 'data', inputs_ext) inputs_ext['data']['data'] = data spass.sym_dump_ops(qsym, qparams, inputs_ext, ctx=mx.gpu(3)) exit() utils.multi_eval_accuracy(trec, data_iter_func, quantize, iter_num=1000)
30.265957
83
0.693497
442
2,845
4.219457
0.273756
0.06756
0.034853
0.038606
0.216622
0.172654
0.172654
0.1437
0.120643
0.082038
0
0.018844
0.179262
2,845
93
84
30.591398
0.779872
0
0
0.125
0
0
0.130098
0.097046
0
0
0
0
0
1
0.05
false
0.0375
0.1875
0.0125
0.2875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dab7892075e86447b7eb58c074c9b44136134f5d
9,856
py
Python
public/scripts/node_process_scripts/vamps_script_parse.py
avoorhis/vamps-node.js
7071da6e569a669a06a6d21c23cc9c667ec49250
[ "MIT" ]
1
2015-10-28T14:48:38.000Z
2015-10-28T14:48:38.000Z
public/scripts/node_process_scripts/vamps_script_parse.py
avoorhis/vamps-node.js
7071da6e569a669a06a6d21c23cc9c667ec49250
[ "MIT" ]
48
2015-10-23T16:02:14.000Z
2022-02-15T00:46:39.000Z
public/scripts/node_process_scripts/vamps_script_parse.py
avoorhis/vamps-node.js
7071da6e569a669a06a6d21c23cc9c667ec49250
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2011, Marine Biological Laboratory # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Please read the COPYING file. # import os from stat import * # ST_SIZE etc import sys import shutil import types import time import random import logging import csv, json import configparser as ConfigParser import fastalibAV as fastalib import datetime today = str(datetime.date.today()) import subprocess import pymysql as MySQLdb """ """ # class FastaReader: # def __init__(self,file_name=None): # self.file_name = file_name # self.h = open(self.file_name, 'rb') # #self.h = open(self.file_name) # self.seq = '' # self.id = None # # def next(self): # def read_id(): # #return self.h.readline().decode('utf-8').strip()[1:] # #print(self.h.readline()) # return self.h.readline().strip()[1:] # # def read_seq(): # #ret = bytearray(b'') # ret = '' # #ret = '' # while True: # line = self.h.readline() # print(str(line)) # while len(line) and not len(line.strip()): # # found empty line(s) # # line = self.h.readline() # print(str(line)) # if not len(line): # # EOF # break # # if str(line).startswith('>'): # # found new defline: move back to the start # self.h.seek(-len(line), os.SEEK_CUR) # break # # else: # ret += str(line).strip() # # return ret # # self.id = read_id() # self.seq = read_seq() # # if self.id: # return True # def get_data(args): pass def parse_matrix(args): print('running matrix') n = 0 dirty_datasets = {} clean_datasets = {} project_count = 0 max_ds_count = 0 with open(args.file, mode='r') as infile: for line in infile: items = line.strip().split('\t') #print('items',items) if not line or items[0][:5] == 'VAMPS': print('found vamps') continue if n==0: ds_items = items[1:] #line.strip('\n').split('\t')[1:] # stip original line on '\n' only to retain first '\t' ip present #print('ds_items',ds_items) for ds in ds_items: dirty_datasets[ds] = 0 else: line_items = items #line.strip().split('\t') #print('line_items',line_items) counts = line_items[1:] for i,cnt in enumerate(counts): #print(i,cnt) if cnt == '' or not cnt: cnt = 0 print('MISSING COUNT - Setting to zero (line:'+str(n+1)+';col:'+str(i+2)+')') try: cnt = int(cnt) except: cnt = 0 print('NON-INTEGER COUNT - Setting to zero (line:'+str(n+1)+';col:'+str(i+2)+')') project_count += int(cnt) dirty_datasets[ds_items[i]] += int(cnt) tax = line_items[0] n+=1 for ds in dirty_datasets: if not ds or ds == '': print('REMOVING EMPTY Dataset and data') else: clean_datasets[ds] = dirty_datasets[ds] print('clean datasets',clean_datasets.keys()) for ds in clean_datasets: if clean_datasets[ds] > max_ds_count: max_ds_count = clean_datasets[ds] return(clean_datasets, project_count, max_ds_count) def find_dataset_name(args, id): """ This should be the same fxn as in demultiplex """ # adjust to your specific defline if args.verbose: print(id) if args.separator == 'space': sampleName_items = id.split()[0].split('_') else: sampleName_items = id.split(args.separator)[0].split('_') test = sampleName_items[-1] try: int(test) sampleName = '_'.join(sampleName_items[:-1]) if args.verbose: print('INT',sampleName_items[-1]) except: sampleName = '_'.join(sampleName_items) if args.verbose: print('NO INT',sampleName_items[-1]) return sampleName def parse_fasta(args): print('running fasta') f = fastalib.SequenceSource(args.file) #f = FastaReader(args.file) datasets={} project_count = 0 max_ds_count = 0 # sample f.id: # 308_1|M01028:283:000000000-CPKFG:1:1101:16551:1784 1:N:0:15|o:99|m/o:0.030303|MR:n=0;r1=3;r2=0|Q30:p=93;p=98|CO:0|mismatches:3 # PC.354_3 FLP3FBN01EEWKD orig_bc=AGCACGAGCCTA new_bc=AGCACGAGCCTA bc_diffs=0 # dataset1 FLP3FBN01EEWKD orig_bc=AGCACGAGCCTA new_bc=AGCACGAGCCTA bc_diffs=0 while f.next(): #print(f.seq) #print(f.id) project_count += 1 if args.separator == 'space': defline_pts = f.id.split() # splits on white space else: defline_pts = f.id.split(args.separator) # splits on white space dataset = find_dataset_name(args, f.id) seq_id = defline_pts[1] if args.verbose: print(dataset) print(seq_id) sys.exit() if dataset in datasets: datasets[dataset] += 1 else: datasets[dataset] = 1 # max_ds_count; number_of_ds; total_seqs; for ds in datasets: if datasets[ds] > max_ds_count: max_ds_count = datasets[ds] #print(datasets) return(datasets, project_count, max_ds_count) def write_config(args, datasets, project_count, max_ds_count): ini_file = os.path.join(args.project_dir,'INFO.config') print( 'Writing INFO.config file:',ini_file ) f = open(ini_file, 'w') f.write('[MAIN]'+"\n") f.write('project_name='+args.project+"\n") f.write("total_seq_count="+str(project_count)+"\n") f.write('owner='+args.owner+"\n") f.write("max_dataset_count="+str(max_ds_count)+"\n") f.write('public=0'+"\n") f.write('project_dir='+args.project_dir+"\n") f.write('type='+args.type+"\n") f.write('number_of_datasets='+str(len(datasets))+"\n") f.write("\n") f.write('[MAIN.dataset]'+"\n") for ds in datasets: f.write(ds+'='+str(datasets[ds])+"\n") f.close() if __name__ == '__main__': import argparse myusage = """usage: 5-vamps-clean-db.py [options] where -pid/--project_id clean this pid only -p/--project_name clean this name only -site/--site vamps, vampsdev or localhost -all/--all Remove ALL Data for fresh install Be Careful -- will remove ALL data from db """ parser = argparse.ArgumentParser(description="" ,usage=myusage) parser.add_argument("-f","--file", required=True, action="store", dest = "file", help="""ProjectID""") parser.add_argument("-p", "--project", required=True, action='store', dest = "project", help=" ") parser.add_argument("-d", "--project_dir", required=True, action='store', dest = "project_dir", help=" ") # parser.add_argument("-host", "--host", # required=True, action='store', dest = "host", default='localhost', # help=" ") parser.add_argument("-t", "--type", required=True, action='store', dest = "type", help=" ") parser.add_argument("-u", "--user", required=True, action='store', dest = "owner", help=" ") parser.add_argument("-sep", "--separator", required=False, action='store', dest = "separator", default='space', help=" ") parser.add_argument("-v", "--verbose", required=False, action="store_true", dest = "verbose", default=False, help="""JSON Files Directory""") # parser.add_argument("-data_dir", "--data_dir", # required=True, action='store', dest = "data_dir", default='user_data', # help=" config.USER_FILES_BASE ") args = parser.parse_args() # if args.host == 'vamps': # #db_host = 'vampsdb' # db_host = 'bpcweb8' # args.NODE_DATABASE = 'vamps2' # db_home = '/groups/vampsweb/vamps/' # elif args.host == 'vampsdev': # #db_host = 'vampsdev' # db_host = 'bpcweb7' # args.NODE_DATABASE = 'vamps2' # db_home = '/groups/vampsweb/vampsdev/' # else: # db_host = 'localhost' # db_home = '~/' # args.NODE_DATABASE = 'vamps_development' # # args.obj = MySQLdb.connect( host=db_host, db=args.NODE_DATABASE, read_default_file=os.path.expanduser("~/.my.cnf_node") ) # # #db = MySQLdb.connect(host="localhost", # your host, usually localhost # # read_default_file="~/.my.cnf" ) # args.cur = args.obj.cursor() #(args.proj, args.pid, args.dids, args.dsets) = get_data(args) if args.type == 'fasta': (datasets, project_count, max_ds_count) = parse_fasta(args) elif args.type == 'matrix': (datasets, project_count, max_ds_count) = parse_matrix(args) write_config(args, datasets, project_count, max_ds_count) print('Finished')
32.104235
138
0.542106
1,193
9,856
4.331098
0.251467
0.013548
0.027095
0.023224
0.203406
0.168183
0.13083
0.093284
0.053029
0.035611
0
0.017682
0.317167
9,856
306
139
32.20915
0.750074
0.356027
0
0.173913
0
0
0.161483
0
0
0
0
0
0
1
0.031056
false
0.006211
0.093168
0
0.130435
0.086957
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dab7ff6f7d5580767751c098903a37a2703ecd3c
468
py
Python
Google It-Cert-Automation/f-Automating-Real-World-Tasks-with-Python/W1/os_walk_three_genx_fail.py
JAL-code/google
3989af94bb72b6fabde95ed50bac1493640d4cf6
[ "MIT" ]
null
null
null
Google It-Cert-Automation/f-Automating-Real-World-Tasks-with-Python/W1/os_walk_three_genx_fail.py
JAL-code/google
3989af94bb72b6fabde95ed50bac1493640d4cf6
[ "MIT" ]
null
null
null
Google It-Cert-Automation/f-Automating-Real-World-Tasks-with-Python/W1/os_walk_three_genx_fail.py
JAL-code/google
3989af94bb72b6fabde95ed50bac1493640d4cf6
[ "MIT" ]
null
null
null
import os import os.path def get_location_for_code(): # :releative location access_area = '/home/joseph/Desktop' # access_area = os.getcwd() return access_area def select_case(): case = 4 return case home_address = get_location_for_code() case = select_case() os.walk(home_address).next()[0] # returns 'C:\dir1\dir2\startdir' os.walk(home_address).next()[1] # returns all the dirs in 'C:\dir1\dir2\startdir' os.walk(home_address).next()[2]
23.4
81
0.707265
72
468
4.388889
0.472222
0.139241
0.094937
0.161392
0.306962
0.240506
0.240506
0.240506
0.240506
0
0
0.020202
0.153846
468
19
82
24.631579
0.777778
0.267094
0
0
0
0
0.059172
0
0
0
0
0
0
1
0.153846
false
0
0.153846
0
0.461538
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
dab8d7a585af5b87de622c04d5bf382bd3e38332
2,755
py
Python
tests/test_cliently.py
elcaceres/cliently
5bb03f9c4c1dcd59d17c2bcb830a501a7215aca8
[ "MIT" ]
null
null
null
tests/test_cliently.py
elcaceres/cliently
5bb03f9c4c1dcd59d17c2bcb830a501a7215aca8
[ "MIT" ]
3
2018-04-04T18:55:16.000Z
2018-04-04T18:57:09.000Z
tests/test_cliently.py
elcaceres/cliently
5bb03f9c4c1dcd59d17c2bcb830a501a7215aca8
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for `cliently` package.""" import unittest from cliently import client import os # TODO: This hard coding needs to be fixed with a sandbox when doing things for realsies ACCESS_TOKEN=os.getenv("FEEDLY_ACCESS_TOKEN") UID = os.getenv("FEEDLY_UID") BASE_URL = "https://cloud.feedly.com" VALID_STREAM_ID = os.getenv("FEEDLY_STREAM_ID") VALID_JOURNAL = os.getenv("FEEDLY_TEST_JOURNAL") VALID_ENTRY_IDS = os.getenv("FEEDLY_ENTRY_IDS") class TestCliently(unittest.TestCase): """Tests for `cliently` package """ def setUp(self): """Set up test fixtures, if any.""" options = dict(access_token=ACCESS_TOKEN, base_url=BASE_URL) self.myclient = client.FeedlyClient(**options) def tearDown(self): """Tear down test fixtures, if any.""" pass def test_set_access_token(self): """Set the access token""" self.assertEqual(self.myclient.access_token, ACCESS_TOKEN) def test_set_base_url(self): """Set the base_url""" self.assertEqual(self.myclient.base_url, BASE_URL) def test_get_user_subscriptions(self): """Get a valid streamID & Journal name from list of subscriptions""" subscriptions = self.myclient.get_user_subscriptions() streamIDs = set(i["categories"][0]["id"] for i in subscriptions) subscribed_journals = set(i["title"] for i in subscriptions) self.assertIn(VALID_JOURNAL, subscribed_journals) self.assertIn(VALID_STREAM_ID, streamIDs) def test_count_get_entry_ids(self): """Test getting entry IDs work""" # test that count returns the correct number of things self.assertEqual(len(self.myclient.get_entry_ids(VALID_STREAM_ID, maxcount=1)["ids"]), 1) def test_continuation_entry_ids(self): """Test continuation works""" # test that count returns the correct number of things continuation = self.myclient.get_entry_ids(VALID_STREAM_ID, maxcount=1)["continuation"] self.assertEqual(len(self.myclient.get_entry_ids(VALID_STREAM_ID, continuation=continuation, maxcount=1)["ids"]), 1) def test_mark_read(self): self.assertTrue(self.myclient.mark_article_read(VALID_ENTRY_IDS).ok) def test_get_single_entry_from_id(self): """Retrieve a single entry from an entryId""" self.assertEqual(self.myclient.get_entries_from_ids(VALID_ENTRY_IDS[0])[0]["id"], VALID_ENTRY_IDS[0]) def test_get_multiple_entries_from_ids(self): """Retrieve multiple entries from an entryId""" res = [i["id"] for i in self.myclient.get_entries_from_ids(VALID_ENTRY_IDS)] self.assertEqual(res, VALID_ENTRY_IDS)
39.927536
124
0.692922
374
2,755
4.863636
0.28877
0.057174
0.042881
0.04453
0.20066
0.20066
0.183617
0.183617
0.183617
0.08906
0
0.004507
0.194555
2,755
69
125
39.927536
0.815232
0.216697
0
0
0
0
0.068128
0
0
0
0
0.014493
0.243243
1
0.27027
false
0.027027
0.081081
0
0.378378
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
0
0
0
2
dab962fd7005a682b3d02a7ad6700beb39520d3f
3,907
py
Python
pyArango/validation.py
NeilduToit13/pyArango
75a883c073ee9b00ff4754139b823dd0df498036
[ "Apache-2.0" ]
null
null
null
pyArango/validation.py
NeilduToit13/pyArango
75a883c073ee9b00ff4754139b823dd0df498036
[ "Apache-2.0" ]
null
null
null
pyArango/validation.py
NeilduToit13/pyArango
75a883c073ee9b00ff4754139b823dd0df498036
[ "Apache-2.0" ]
null
null
null
from .theExceptions import ValidationError class Validator(object): """ All validators must inherit from this class """ def __init__(self, *args, **kwrags): pass def validate(self, value): """ The only function that a validator must implement. Must return True if erevything went well or a ValidationError otherwise """ raise NotImplemented("Should be implemented in child") def __str__(self): """ This function should be redifined in child to give a quick overview of the validator """ return self.__class__.__name__ class NotNull(Validator): """ Checks that the Field has a non null value """ def validate(self, value, zero=True, empty_string=True): if value is None or (value == 0 is zero) or (value == "" and empty_string): raise ValidationError("Field can't have a null value: '%s'" % value) return True class Email(Validator): """ Checks if the field contains an emailaddress """ def validate(self, value): import re pattern = '^[A-z0-9._-]+@[A-z0-9.-]+\.[A-z]{2,4}$' if re.match(pattern, value) is None: raise ValidationError("The email address: %s is invalid" % value) return True class Numeric(Validator): """ checks if the value is numerical """ def validate(self, value): try: float(value) except: raise ValidationError("%s is not valid numerical value" % value) return True class Int(Validator): """ The value must be an integer """ def validate(self, value): if not isinstance(value, int): raise ValidationError("%s is not a valid integer" % value) return True class Bool(Validator): """ The value must be a boolean """ def validate(self, value): if not isinstance(value, bool): raise ValidationError("%s is not a valid boolean" % value) return True class String(Validator): """ The value must be a string or unicode """ def validate(self, value): if not isinstance(value, str) and not isinstance(value, unicode): raise ValidationError("%s is not a valid string" % value) return True class Enumeration(Validator): """ The value must be in the allowed ones """ def __init__(self, allowed): self.allowed = set(allowed) def validate(self, value): if value not in self.allowed: raise ValidationError("%s is not among the allowed values %s" % (value, self.allowed)) return True class Range(Validator): """ The value must une [lower, upper] range """ def __init__(self, lower, upper): self.lower = lower self.upper = upper def validate(self, value): if value < self.lower or value > self.upper: raise ValidationError("%s is not in [%s, %s]" % (value, self.lower, self.upper)) def __str__(self): return "%s[%s, %s]" % (self.__class__.__name__, self.minLen, self.maxLen) class Length(Validator): """ Validates that the value length is between given bounds """ def __init__(self, min_length, max_length): self.minlength = min_length self.maxlength = max_length def validate(self, value): try: length = len(value) except: raise ValidationError("Field '%s' of type '%s' has no length" % (value, type(value))) if self.min_length <= len(value) and len(value) <= self.max_length: return True raise ValidationError("Field must have a length in ['%s';'%s'] got: '%s'" % (self.min_length, self.max_length, len(value))) def __str__(self): return "%s[%s, %s]" % (self.__class__.__name__, self.min_length, self.max_length)
28.940741
131
0.600973
490
3,907
4.659184
0.240816
0.048182
0.065703
0.087604
0.266316
0.191853
0.12615
0.0841
0.031537
0.031537
0
0.002519
0.288713
3,907
134
132
29.156716
0.819
0.153826
0
0.333333
0
0.013889
0.130281
0.012254
0
0
0
0
0
1
0.236111
false
0.013889
0.027778
0.027778
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
dab9da99251eb13cbf19a332bec3cc7115eec8f3
859
py
Python
yahoo_nsfw.py
m0r13/tensorflow-open_nsfw
35da5a6eb71ad5490b25ad433cfee02986f123eb
[ "BSD-2-Clause" ]
null
null
null
yahoo_nsfw.py
m0r13/tensorflow-open_nsfw
35da5a6eb71ad5490b25ad433cfee02986f123eb
[ "BSD-2-Clause" ]
null
null
null
yahoo_nsfw.py
m0r13/tensorflow-open_nsfw
35da5a6eb71ad5490b25ad433cfee02986f123eb
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python import sys import tensorflow as tf from image_utils import create_raw_image_loader from model import OpenNsfwModel, InputType class YahooNSFWClassifier: def __init__(self, weights_path): self.session = tf.Session() self.model = OpenNsfwModel() self.model.build(weights_path=weights_path) self.session.run(tf.global_variables_initializer()) self.fn_load_image = create_raw_image_loader() def classify(self, image): image = self.fn_load_image(image) predictions = self.session.run(self.model.predictions, feed_dict={self.model.input: image}) return predictions if __name__ == "__main__": from PIL import Image classifier = YahooNSFWClassifier("data/open_nsfw-weights.npy") print("NSFW score: %f" % classifier.classify(Image.open(sys.argv[1]))[0][1])
33.038462
99
0.718277
111
859
5.288288
0.486486
0.061329
0.0477
0.068143
0
0
0
0
0
0
0
0.004237
0.175786
859
25
100
34.36
0.824859
0.023283
0
0
0
0
0.057279
0.031026
0
0
0
0
0
1
0.105263
false
0
0.263158
0
0.473684
0.052632
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dabd1be8c1b91392a0032b7379bdd13519e05e0b
4,823
py
Python
crazyflie-lib-python/cflib/crazyflie/localization.py
loujiabin1994/crazyswarm
893325b63b3b19015fe261bfa989846a1c82dc82
[ "MIT" ]
null
null
null
crazyflie-lib-python/cflib/crazyflie/localization.py
loujiabin1994/crazyswarm
893325b63b3b19015fe261bfa989846a1c82dc82
[ "MIT" ]
null
null
null
crazyflie-lib-python/cflib/crazyflie/localization.py
loujiabin1994/crazyswarm
893325b63b3b19015fe261bfa989846a1c82dc82
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # # || ____ _ __ # +------+ / __ )(_) /_______________ _____ ___ # | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \ # +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/ # || || /_____/_/\__/\___/_/ \__,_/ /___/\___/ # # Copyright (C) 2017-2020 Bitcraze AB # # Crazyflie Nano Quadcopter Client # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """ Subsytem handling localization-related data communication """ import collections import logging import struct from cflib.crtp.crtpstack import CRTPPacket from cflib.crtp.crtpstack import CRTPPort from cflib.utils.callbacks import Caller __author__ = 'Bitcraze AB' __all__ = ['Localization', 'LocalizationPacket'] logger = logging.getLogger(__name__) # A generic location packet contains type and data. When received the data # may be decoded by the lib. LocalizationPacket = collections.namedtuple('localizationPacket', ['type', 'raw_data', 'data']) class Localization(): """ Handle localization-related data communication with the Crazyflie """ # Implemented channels POSITION_CH = 0 GENERIC_CH = 1 # Location message types for generig channel RANGE_STREAM_REPORT = 0 RANGE_STREAM_REPORT_FP16 = 1 LPS_SHORT_LPP_PACKET = 2 EMERGENCY_STOP = 3 EMERGENCY_STOP_WATCHDOG = 4 COMM_GNSS_NMEA = 6 COMM_GNSS_PROPRIETARY = 7 EXT_POSE = 8 EXT_POSE_PACKED = 9 def __init__(self, crazyflie=None): """ Initialize the Extpos object. """ self._cf = crazyflie self.receivedLocationPacket = Caller() self._cf.add_port_callback(CRTPPort.LOCALIZATION, self._incoming) def _incoming(self, packet): """ Callback for data received from the copter. """ if len(packet.data) < 1: logger.warning('Localization packet received with incorrect' + 'length (length is {})'.format(len(packet.data))) return pk_type = struct.unpack('<B', packet.data[:1])[0] data = packet.data[1:] # Decoding the known packet types # TODO: more generic decoding scheme? decoded_data = None if pk_type == self.RANGE_STREAM_REPORT: if len(data) % 5 != 0: logger.error('Wrong range stream report data lenght') return decoded_data = {} raw_data = data for i in range(int(len(data) / 5)): anchor_id, distance = struct.unpack('<Bf', raw_data[:5]) decoded_data[anchor_id] = distance raw_data = raw_data[5:] pk = LocalizationPacket(pk_type, data, decoded_data) self.receivedLocationPacket.call(pk) def send_extpos(self, pos): """ Send the current Crazyflie X, Y, Z position. This is going to be forwarded to the Crazyflie's position estimator. """ pk = CRTPPacket() pk.port = CRTPPort.LOCALIZATION pk.channel = self.POSITION_CH pk.data = struct.pack('<fff', pos[0], pos[1], pos[2]) self._cf.send_packet(pk) def send_extpose(self, pos, quat): """ Send the current Crazyflie pose (position [x, y, z] and attitude quaternion [qx, qy, qz, qw]). This is going to be forwarded to the Crazyflie's position estimator. """ pk = CRTPPacket() pk.port = CRTPPort.LOCALIZATION pk.channel = self.GENERIC_CH pk.data = struct.pack('<Bfffffff', self.EXT_POSE, pos[0], pos[1], pos[2], quat[0], quat[1], quat[2], quat[3]) self._cf.send_packet(pk) def send_short_lpp_packet(self, dest_id, data): """ Send ultra-wide-band LPP packet to dest_id """ pk = CRTPPacket() pk.port = CRTPPort.LOCALIZATION pk.channel = self.GENERIC_CH pk.data = struct.pack('<BB', self.LPS_SHORT_LPP_PACKET, dest_id) + data self._cf.send_packet(pk)
33.262069
79
0.609579
575
4,823
4.843478
0.393043
0.012567
0.024417
0.020467
0.202154
0.159785
0.131059
0.113106
0.113106
0.113106
0
0.015707
0.287166
4,823
144
80
33.493056
0.794357
0.37259
0
0.191176
0
0
0.069538
0
0
0
0
0.006944
0
1
0.073529
false
0
0.088235
0
0.367647
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dabfe56a60fe696acd94d21ad2b11875bcc6001b
1,396
py
Python
uibcdf_stdlib/exceptions.py
uibcdf/UIBCDF-Standard-Library
16665d12d94c8d82b17356f79795d3741cc86324
[ "MIT" ]
1
2021-07-06T18:51:45.000Z
2021-07-06T18:51:45.000Z
uibcdf_stdlib/exceptions.py
dprada/UIBCDF-Standard-Library
96906e631623ff8de59bd5e15fe25d87a9d2a4f4
[ "MIT" ]
null
null
null
uibcdf_stdlib/exceptions.py
dprada/UIBCDF-Standard-Library
96906e631623ff8de59bd5e15fe25d87a9d2a4f4
[ "MIT" ]
1
2021-07-06T18:51:48.000Z
2021-07-06T18:51:48.000Z
class BadCallError(ValueError): def __init__(self, message=None, documentation_web=None): if message is None: message = ('Wrong way of invoking this method. Check the online documentation for' 'more information.') if documentation_web is not None: message = message[:-1] + ': {}'.format(documentation_web) super().__init__(message) class NotImplementedError(NotImplementedError): def __init__(self, message=None, issues_web=None): if message is None: if issues_web is not None: message = ('It has not been implemeted yet. Write a new issue in' '{} asking for it.'.format(issues_web)) super().__init__(message) class LibraryNotFound(NotImplementedError): def __init__(self, library): message = 'The python library {} was not found.'.format(library) super().__init__(message) class InputArgumentError(NotImplementedError): def __init__(self, argument, method, documentation_web=None): message = ('Invalid value for input argument "{}" in method or class "{}".' 'Check the online documentation for more information.'.format(argument, method)) if documentation_web is not None: message = message[:-1] + ': {}'.format(documentation_web) super().__init__(message)
31.022222
96
0.637536
151
1,396
5.622517
0.344371
0.113074
0.051826
0.042403
0.454653
0.34629
0.294464
0.188457
0.188457
0.188457
0
0.001936
0.260029
1,396
44
97
31.727273
0.819942
0
0
0.384615
0
0
0.224373
0
0
0
0
0
0
1
0.153846
false
0
0
0
0.307692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dac5823dc3e2b862d786e19df82399b8a50a961d
2,757
py
Python
feature-detection.py
ronheywood/opencv
664de6f5b3708af6c00348a31a37db484137bb8d
[ "MIT" ]
7
2021-07-08T14:57:22.000Z
2022-01-12T09:15:01.000Z
feature-detection.py
ronheywood/opencv
664de6f5b3708af6c00348a31a37db484137bb8d
[ "MIT" ]
1
2021-08-18T20:15:44.000Z
2022-02-04T18:04:39.000Z
feature-detection.py
ronheywood/opencv
664de6f5b3708af6c00348a31a37db484137bb8d
[ "MIT" ]
2
2021-10-14T18:40:26.000Z
2022-01-10T00:36:29.000Z
# import the necessary packages import cv2 import sys import os import numpy as np import argparse sys.path.append(os.path.abspath('./modules/')) import detection import helpers ap = argparse.ArgumentParser() ap.add_argument("-d", "--debug", type=bool, default=False, help="experiment with different image filters") ap.add_argument("-i", "--imagepath", type=str, default = None, help="path to test image") args = ap.parse_args() if(args.imagepath is None): image_path = helpers.get_random_test_image() else: image_path = args.imagepath image = cv2.imread(image_path,1) ball = detection.GolfBallDetection(image) if ball: (x,y,w,h) = ball x_plus_w = x+w y_plus_h = y+h section = image.copy()[y:y+h, x:x+w] cv2.imshow("Crop to ball",section) # generating the kernels kernel1 = np.array([[0, -1, -1], # kernel for embossing bottom left side [1, 0, -1], [1, 1, 0]]) # you can generate kernels for embossing top as well gray = cv2.cvtColor(section, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (7, 7), 0) if(args.debug): cv2.imshow("Gray",gray) if(args.debug): embossdepth = np.ones((h, w), np.uint8) * 128 embossed = cv2.add(cv2.filter2D(gray, -1, kernel1),embossdepth) # emboss on bottom left side cv2.imshow("Embossed",embossed) if(args.debug): _, binary = cv2.threshold(cv2.cvtColor(section, cv2.COLOR_BGR2GRAY), 20, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) binary = 255 - binary cv2.imshow("Binary",binary) if(args.debug): edges = cv2.Canny(binary,100,200) cv2.imshow("Edges",edges) if(args.debug): embossed_edges = cv2.Canny(embossed,100,200) cv2.imshow("Embossed Edges",embossed_edges) contours,heirarchy = cv2.findContours(embossed_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) im2 = cv2.cvtColor(section, cv2.COLOR_BGR2GRAY) dimples = [] for contour in contours: approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area = cv2.contourArea(contour) if ((len(approx) > 8) & (area > 30) ): dimples.append(contour) cv2.drawContours(im2, dimples, -1, (0,255,0), 3) cv2.imshow("Contours",im2) circle = detection.get_ball_circle(image,x,y,w,h) if circle is not None: detection.draw_circle_around_ball(image,circle,(x,y)) else: detection.draw_boundaries_and_label(image,(x,y),(w,h),(0,255,0),'Ball') else: print('[INFO] Failed to find ball') cv2.imshow("Detecting features",image) cv2.waitKey(0) cv2.destroyAllWindows()
32.05814
122
0.629307
376
2,757
4.526596
0.364362
0.042303
0.032315
0.007051
0.070505
0.059929
0
0
0
0
0
0.046919
0.234675
2,757
86
123
32.05814
0.759716
0.060936
0
0.117647
0
0
0.075077
0
0
0
0
0
0
1
0
false
0
0.102941
0
0.102941
0.014706
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dac5b2ff80767ae00c126aba31c2851cfe3769ef
7,538
py
Python
loopy/execution.py
xywei/loopy
f176bc70ef52862ee0dfcc661fbe4b86b1726f55
[ "MIT" ]
null
null
null
loopy/execution.py
xywei/loopy
f176bc70ef52862ee0dfcc661fbe4b86b1726f55
[ "MIT" ]
null
null
null
loopy/execution.py
xywei/loopy
f176bc70ef52862ee0dfcc661fbe4b86b1726f55
[ "MIT" ]
null
null
null
from __future__ import division, with_statement, absolute_import __copyright__ = "Copyright (C) 2012-16 Andreas Kloeckner" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import six import numpy as np from pytools import ImmutableRecord, memoize_method from loopy.diagnostic import LoopyError import logging logger = logging.getLogger(__name__) from pytools.persistent_dict import PersistentDict from loopy.tools import LoopyKeyBuilder from loopy.version import DATA_MODEL_VERSION # {{{ object array argument packing class _PackingInfo(ImmutableRecord): """ .. attribute:: name .. attribute:: sep_shape .. attribute:: subscripts_and_names A list of type ``[(index, unpacked_name), ...]``. """ class SeparateArrayPackingController(object): """For argument arrays with axes tagged to be implemented as separate arrays, this class provides preprocessing of the incoming arguments so that all sub-arrays may be passed in one object array (under the original, un-split argument name) and are unpacked into separate arrays before being passed to the kernel. It also repacks outgoing arrays of this type back into an object array. """ def __init__(self, kernel): # map from arg name self.packing_info = {} from loopy.kernel.array import ArrayBase for arg in kernel.args: if not isinstance(arg, ArrayBase): continue if arg.shape is None or arg.dim_tags is None: continue subscripts_and_names = arg.subscripts_and_names() if subscripts_and_names is None: continue self.packing_info[arg.name] = _PackingInfo( name=arg.name, sep_shape=arg.sep_shape(), subscripts_and_names=subscripts_and_names, is_written=arg.name in kernel.get_written_variables()) def unpack(self, kernel_kwargs): if not self.packing_info: return kernel_kwargs kernel_kwargs = kernel_kwargs.copy() for packing_info in six.itervalues(self.packing_info): arg_name = packing_info.name if packing_info.name in kernel_kwargs: arg = kernel_kwargs[arg_name] for index, unpacked_name in packing_info.subscripts_and_names: assert unpacked_name not in kernel_kwargs kernel_kwargs[unpacked_name] = arg[index] del kernel_kwargs[arg_name] return kernel_kwargs def pack(self, outputs): if not self.packing_info: return outputs for packing_info in six.itervalues(self.packing_info): if not packing_info.is_written: continue result = outputs[packing_info.name] = \ np.zeros(packing_info.sep_shape, dtype=np.object) for index, unpacked_name in packing_info.subscripts_and_names: result[index] = outputs.pop(unpacked_name) return outputs # }}} # {{{ KernelExecutorBase typed_and_scheduled_cache = PersistentDict( "loopy-typed-and-scheduled-cache-v1-"+DATA_MODEL_VERSION, key_builder=LoopyKeyBuilder()) class KernelExecutorBase(object): """An object connecting a kernel to a :class:`pyopencl.Context` for execution. .. automethod:: __init__ .. automethod:: __call__ """ def __init__(self, kernel): """ :arg kernel: a loopy.LoopKernel """ self.kernel = kernel self.packing_controller = SeparateArrayPackingController(kernel) self.output_names = tuple(arg.name for arg in self.kernel.args if arg.name in self.kernel.get_written_variables()) self.has_runtime_typed_args = any( arg.dtype is None for arg in kernel.args) def get_typed_and_scheduled_kernel_uncached(self, arg_to_dtype_set): from loopy.kernel.tools import add_dtypes kernel = self.kernel if arg_to_dtype_set: var_to_dtype = {} for var, dtype in arg_to_dtype_set: try: dest_name = kernel.impl_arg_to_arg[var].name except KeyError: dest_name = var try: var_to_dtype[dest_name] = dtype except KeyError: raise LoopyError("cannot set type for '%s': " "no known variable/argument with that name" % var) kernel = add_dtypes(kernel, var_to_dtype) from loopy.type_inference import infer_unknown_types kernel = infer_unknown_types(kernel, expect_completion=True) if kernel.schedule is None: from loopy.preprocess import preprocess_kernel kernel = preprocess_kernel(kernel) from loopy.schedule import get_one_scheduled_kernel kernel = get_one_scheduled_kernel(kernel) return kernel @memoize_method def get_typed_and_scheduled_kernel(self, arg_to_dtype_set): from loopy import CACHING_ENABLED cache_key = (type(self).__name__, self.kernel, arg_to_dtype_set) if CACHING_ENABLED: try: return typed_and_scheduled_cache[cache_key] except KeyError: pass logger.debug("%s: typed-and-scheduled cache miss" % self.kernel.name) kernel = self.get_typed_and_scheduled_kernel_uncached(arg_to_dtype_set) if CACHING_ENABLED: typed_and_scheduled_cache[cache_key] = kernel return kernel def arg_to_dtype_set(self, kwargs): if not self.has_runtime_typed_args: return None from loopy.types import NumpyType target = self.kernel.target impl_arg_to_arg = self.kernel.impl_arg_to_arg arg_to_dtype = {} for arg_name, val in six.iteritems(kwargs): arg = impl_arg_to_arg.get(arg_name, None) if arg is None: # offsets, strides and such continue if arg.dtype is None and val is not None: try: dtype = val.dtype except AttributeError: pass else: arg_to_dtype[arg_name] = NumpyType(dtype, target) return frozenset(six.iteritems(arg_to_dtype)) # }}} # vim: foldmethod=marker
32.076596
79
0.648183
926
7,538
5.046436
0.278618
0.035309
0.0214
0.019474
0.152579
0.109566
0.064199
0.040659
0.040659
0.021828
0
0.001309
0.290395
7,538
234
80
32.213675
0.872313
0.107588
0
0.214286
0
0
0.181173
0.005289
0
0
0
0
0.007143
1
0.05
false
0.014286
0.114286
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dac7f27abecc653f42dd61c7ee788bece9231835
3,419
py
Python
rebound/python_examples/megno_interactive/problem.py
rodluger/ttv-devil
e534e4f3cd75db951cba54441f7a5458c87e0cf9
[ "MIT" ]
null
null
null
rebound/python_examples/megno_interactive/problem.py
rodluger/ttv-devil
e534e4f3cd75db951cba54441f7a5458c87e0cf9
[ "MIT" ]
null
null
null
rebound/python_examples/megno_interactive/problem.py
rodluger/ttv-devil
e534e4f3cd75db951cba54441f7a5458c87e0cf9
[ "MIT" ]
null
null
null
#!/usr/bin/python import rebound from rebound.interruptible_pool import InterruptiblePool # Import other modules import numpy as np import matplotlib.pyplot as plt plt.ion() from matplotlib.colors import LogNorm # Runs one simulation. def simulation(par): saturn_a, saturn_e = par sim = rebound.Simulation() sim.integrator = "whfast" sim.integrator_whfast_safe_mode = 0 sim.dt = 5. # These parameters are only approximately those of Jupiter and Saturn. sim.add(m=1.) sim.add(m=0.000954, a=5.204, anom=0.600, omega=0.257, e=0.048) sim.add(m=0.000285, a=saturn_a, anom=0.871, omega=1.616, e=saturn_e) sim.move_to_com() sim.init_megno() sim.integrate(5e2*2.*np.pi) # integrator for 500 years return [sim.calculate_megno(),1./(sim.calculate_lyapunov()*2.*np.pi)] # returns MEGNO and Lypunov timescale in years def updatePlot(first=False): # This constructs a 2d array. # The current implementation is slow, but simple. keys = np.array(resd.keys()) x1 = np.unique(keys.T[0]) x2 = np.unique(keys.T[1]) res = np.empty((len(x2),len(x1),2)) for i,_x1 in enumerate(x1): for j,_x2 in enumerate(x2): res[j][i] = resd[(_x1,_x2)] # Clip arrays megno = np.clip(res[:,:,0],1.8,4.) lyaptimescale = np.clip(np.absolute(res[:,:,1]),1e1,4e3) # Plot MEGNO im1 = axarr[0].imshow(megno, vmin=1.8, vmax=4., aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent) # Plot Lyapunov timescale im2 = axarr[1].imshow(lyaptimescale, vmin=1e1, vmax=4e3, norm=LogNorm(), aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn", extent=extent) if first: cb1 = plt.colorbar(im1, ax=axarr[0]) cb1.solids.set_rasterized(True) cb1.set_label("MEGNO $\\langle Y \\rangle$") cb2 = plt.colorbar(im2, ax=axarr[1]) cb2.solids.set_rasterized(True) cb2.set_label("Lyapunov timescale [years]") plt.draw() pool = InterruptiblePool() # Number of threads default to the number of CPUs on the system def runSim(p): print("Running %d simulations." % len(p)) res = np.nan_to_num(np.array(pool.map(simulation,p))) for i,r in enumerate(res): resd[p[i]] = r # Setup grid and run many simulations in parallel a = np.array([7.,10.]) # range of saturn semi-major axis in AU e = np.array([0.,0.5]) # range of saturn eccentricity # Setup plots f, axarr = plt.subplots(2,figsize=(10,8)) extent = [a.min(), a.max(), e.min(), e.max()] for ax in axarr: ax.set_xlim(extent[0],extent[1]) ax.set_ylim(extent[2],extent[3]) ax.set_xlabel("$a_{\mathrm{Saturn}}$ [AU]") ax.set_ylabel("$e_{\mathrm{Saturn}}$") # Results are stored in this dictionary resd = {} # Initial parameters (2x2 grid) parameters = [(_a, _e) for _a in a for _e in e] # Run and plot first simulations runSim(parameters) updatePlot(first=True) # Eight levels of refinement for i in xrange(8): _a = np.linspace((a[0]+a[1])/2.,a[-1],len(a))[:-1] a = np.sort(np.concatenate((a,_a))) parameters = [(__a, _e) for __a in _a for _e in e] runSim(parameters) updatePlot() _e = np.linspace((e[0]+e[1])/2.,e[-1],len(e))[:-1] e = np.sort(np.concatenate((e,_e))) parameters = [(_a, __e) for _a in a for __e in _e] runSim(parameters) updatePlot() raw_input('Press enter...')
31.953271
162
0.64434
543
3,419
3.959484
0.366483
0.013023
0.009767
0.02093
0.107907
0.107907
0.107907
0.107907
0.060465
0.060465
0
0.043573
0.194501
3,419
106
163
32.254717
0.73711
0.186312
0
0.071429
0
0
0.068429
0.015206
0
0
0
0
0
1
0.042857
false
0
0.071429
0
0.128571
0.014286
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dac8c3ed489595ae8af03592386194327a0affcc
257
py
Python
Exercicios/Ex23.py
angeloridolfi/Python-CEV
fd11b7ea0725f83c84336b99304c50f183514245
[ "MIT" ]
null
null
null
Exercicios/Ex23.py
angeloridolfi/Python-CEV
fd11b7ea0725f83c84336b99304c50f183514245
[ "MIT" ]
null
null
null
Exercicios/Ex23.py
angeloridolfi/Python-CEV
fd11b7ea0725f83c84336b99304c50f183514245
[ "MIT" ]
null
null
null
num = int(input('Digite um número de 0 a 9999:\n')) u = num // 1 % 10 d = num // 10 % 10 c = num // 100 % 100 m = num // 1000 % 1000 print(f'Analisando o número {num}') print(f'Unidade:{u}') print(f'Dezena:{d}') print(f'Centena:{c}') print(f'Milhar:{m}')
19.769231
51
0.587549
49
257
3.081633
0.55102
0.198676
0
0
0
0
0
0
0
0
0
0.12381
0.182879
257
12
52
21.416667
0.595238
0
0
0
0
0
0.381323
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
1
dacba558be9c5556dc97deb7330b68ccc601680a
167
py
Python
Diena_13_Visualization/random/miss_num.py
edzya/Python_RTU_08_20
d2921d998c611c18328dd523daf976a27ce858c1
[ "MIT" ]
8
2020-08-31T16:10:54.000Z
2021-11-24T06:37:37.000Z
Diena_13_Visualization/random/miss_num.py
edzya/Python_RTU_08_20
d2921d998c611c18328dd523daf976a27ce858c1
[ "MIT" ]
8
2021-06-08T22:30:29.000Z
2022-03-12T00:48:55.000Z
Diena_13_Visualization/random/miss_num.py
edzya/Python_RTU_08_20
d2921d998c611c18328dd523daf976a27ce858c1
[ "MIT" ]
12
2020-09-28T17:06:52.000Z
2022-02-17T12:12:46.000Z
import random nums = [str(n)+"\n" for n in range(1, 51)] random.shuffle(nums) print(nums) nums.pop() with open("missing_number.txt", "w") as f: f.writelines(nums)
20.875
42
0.670659
30
167
3.7
0.733333
0
0
0
0
0
0
0
0
0
0
0.020833
0.137725
167
7
43
23.857143
0.75
0
0
0
0
0
0.125749
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
dacc0afe8e6fcaa1fdee48ff089bcf82ca383f60
1,926
py
Python
code/eval_tsd.py
YJiangcm/COMP_5222_Project
18afd92af6f38c9410d711fa9448b7a03507e0ac
[ "Apache-2.0" ]
null
null
null
code/eval_tsd.py
YJiangcm/COMP_5222_Project
18afd92af6f38c9410d711fa9448b7a03507e0ac
[ "Apache-2.0" ]
null
null
null
code/eval_tsd.py
YJiangcm/COMP_5222_Project
18afd92af6f38c9410d711fa9448b7a03507e0ac
[ "Apache-2.0" ]
null
null
null
import pandas as pd import numpy as np import sys import argparse def f1(predictions, gold): """ F1 (a.k.a. DICE) operating on two lists of offsets (e.g., character). >>> assert f1([0, 1, 4, 5], [0, 1, 6]) == 0.5714285714285714 :param predictions: a list of predicted offsets :param gold: a list of offsets serving as the ground truth :return: a score between 0 and 1 """ if len(gold) == 0: return 1. if len(predictions) == 0 else 0. if len(predictions) == 0: return 0. predictions_set = set(predictions) gold_set = set(gold) nom = 2 * len(predictions_set.intersection(gold_set)) denom = len(predictions_set) + len(gold_set) return float(nom)/float(denom) def evaluate(pred_file, test_file): ''' pred_file: path to the prediction file test_file: path to the test csv file ''' test_df = pd.read_csv(test_file) gold_spans = test_df.spans.apply(eval).to_list() pred_spans = [eval(line.strip()) for line in open(pred_file).readlines()] if(len(gold_spans) != len(pred_spans)): print('Error: the number of predictions does not match the number of test examples!') sys.exit(1) scores = [] for pred, gold in zip(pred_spans, gold_spans): scores.append(f1(pred, gold)) print('F1 score: ', np.mean(scores)) def main(): parser = argparse.ArgumentParser() parser.add_argument("--prediction_file", required=True, help="path to the line-by-line file containing system predictions.") parser.add_argument("--test_file", required=True, help="path to the csv file with gold spans.") args = parser.parse_args() evaluate(args.prediction_file, args.test_file) if __name__ == "__main__": main()
28.746269
94
0.600727
260
1,926
4.307692
0.373077
0.035714
0.032143
0.030357
0.051786
0.051786
0.051786
0
0
0
0
0.028384
0.286604
1,926
67
95
28.746269
0.786754
0.179647
0
0.054054
0
0
0.14959
0
0
0
0
0
0
1
0.081081
false
0
0.108108
0
0.27027
0.054054
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daccefc813bcf6533f739ab7e77855039705a38d
1,963
py
Python
Trie/634.Word Squares/Solution.py
Zhenye-Na/LxxxCode
afd79d790d0a7495d75e6650f80adaa99bd0ff07
[ "MIT" ]
12
2019-05-04T04:21:27.000Z
2022-03-02T07:06:57.000Z
Trie/634.Word Squares/Solution.py
Zhenye-Na/LxxxCode
afd79d790d0a7495d75e6650f80adaa99bd0ff07
[ "MIT" ]
1
2019-07-24T18:43:53.000Z
2019-07-24T18:43:53.000Z
Trie/634.Word Squares/Solution.py
Zhenye-Na/LxxxCode
afd79d790d0a7495d75e6650f80adaa99bd0ff07
[ "MIT" ]
10
2019-07-01T04:03:04.000Z
2022-03-09T03:57:37.000Z
class TrieNode: def __init__(self): self.children = {} self.is_word = False self.word_list = [] class Trie: def __init__(self): self.root = TrieNode() def add(self, word): node = self.root for ch in word: if ch not in node.children: node.children[ch] = TrieNode() node = node.children[ch] node.word_list.append(word) node.is_word = True def find(self, word): node = self.root for ch in word: node = node.children.get(ch) if node is None: return None return node def searchWord(self, word): node = self.find(word) return node is not None and node.is_word def searchPrefix(self, prefix): node = self.find(prefix) return [] if node is None else node.word_list class Solution: """ @param: words: a list of words without duplicates @return: all word squares """ def wordSquares(self, words): # write your code here if not words or len(words) == 0: return [] trie = Trie() for word in words: trie.add(word) results = [] for word in words: self.dfs(trie, [word], results) return results def dfs(self, trie, word_square, results): idx, n = len(word_square), len(word_square[0]) if idx == n: results.append(word_square[:]) return # dfs pruning for row_index in range(idx, n): prefix = "".join([word_square[i][row_index] for i in range(idx)]) if not trie.searchPrefix(prefix): return prefix = "".join([word_square[i][idx] for i in range(idx)]) for word in trie.searchPrefix(prefix): word_square.append(word) self.dfs(trie, word_square, results) word_square.pop()
24.848101
77
0.538971
246
1,963
4.199187
0.239837
0.087125
0.03485
0.046467
0.127783
0.060019
0.060019
0.060019
0.060019
0
0
0.001592
0.360163
1,963
78
78
25.166667
0.82086
0.055527
0
0.185185
0
0
0
0
0
0
0
0.012821
0
1
0.148148
false
0
0
0
0.351852
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
dace81c02ecfe88dcde9a79af889af15ed3ee59f
63
py
Python
pyschism/io/__init__.py
pmav99/pyschism
b533d29b881da6cd4f408e91a4749f86451c042a
[ "Apache-2.0" ]
null
null
null
pyschism/io/__init__.py
pmav99/pyschism
b533d29b881da6cd4f408e91a4749f86451c042a
[ "Apache-2.0" ]
null
null
null
pyschism/io/__init__.py
pmav99/pyschism
b533d29b881da6cd4f408e91a4749f86451c042a
[ "Apache-2.0" ]
null
null
null
from pyschism.io.bctides import Bctides __all__ = ['Bctides']
15.75
39
0.761905
8
63
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.126984
63
3
40
21
0.8
0
0
0
0
0
0.111111
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
dacf5473b855ccb8326cf4f486426fd5801c552b
83
py
Python
run_bot.py
minproton/ReVot
077457a66fb476cf11622ee9ff29494695a84fc6
[ "Unlicense" ]
3
2020-10-06T16:34:15.000Z
2021-04-17T18:10:33.000Z
run_bot.py
minproton/ReVot
077457a66fb476cf11622ee9ff29494695a84fc6
[ "Unlicense" ]
1
2020-10-18T07:41:00.000Z
2020-10-18T07:43:04.000Z
run_bot.py
minproton/ReVot
077457a66fb476cf11622ee9ff29494695a84fc6
[ "Unlicense" ]
3
2020-10-18T07:38:52.000Z
2021-10-03T17:00:41.000Z
from reverse_image_search_bot import bot if __name__ == '__main__': bot.main()
20.75
40
0.746988
12
83
4.25
0.75
0
0
0
0
0
0
0
0
0
0
0
0.156627
83
3
41
27.666667
0.728571
0
0
0
0
0
0.096386
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
dad0c47ce0d0ea8d56dcd1c6ed410b390c57cb8f
1,444
py
Python
notebooks/PyTorch/tutorial_deep_learning_with_pytorch_a_60_minute_blitz_training_a_classifier_CIFAR-10/training.py
shitake/ml-sandbox
a23a312251d82b27582df0029c23ff4aa7792d4c
[ "MIT" ]
null
null
null
notebooks/PyTorch/tutorial_deep_learning_with_pytorch_a_60_minute_blitz_training_a_classifier_CIFAR-10/training.py
shitake/ml-sandbox
a23a312251d82b27582df0029c23ff4aa7792d4c
[ "MIT" ]
1
2018-11-01T03:09:50.000Z
2018-11-01T03:09:50.000Z
notebooks/PyTorch/tutorial_deep_learning_with_pytorch_a_60_minute_blitz_training_a_classifier_CIFAR-10/training.py
shitake/ml-sandbox
a23a312251d82b27582df0029c23ff4aa7792d4c
[ "MIT" ]
null
null
null
from net import Net import os import sys import time import torch.optim as optim print(os.getcwd()) sys.path.append(os.getcwd()) # from ml-sandbox.src.utils.utils import display_formatted_time import gc del Training; gc.collect() class Training: def __init__(self, dev): self.device = dev def train(self, lr, momentum): since = time.time() print(self.device) model = Net().to(self.device) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum) for epoch in range(epochs): running_loss = 0.0 for batch_idx, (inputs, labels) in enumerate(train_loader, 0): inputs, labels = inputs.to(self.device), labels.to(self.device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if batch_idx % log_interval == (log_interval - 1): print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tRunningLoss: {:.3f}".format( epoch, batch_idx * len(inputs), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item(), running_loss / log_interval )) running_loss = 0.0 display_formatted_time(time.time() - since)
27.245283
96
0.585873
171
1,444
4.812866
0.432749
0.060753
0.043742
0.031592
0
0
0
0
0
0
0
0.011707
0.290166
1,444
52
97
27.769231
0.79122
0.042244
0
0.054054
0
0
0.04924
0
0
0
0
0
0
1
0.054054
false
0
0.162162
0
0.243243
0.081081
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dad20c62d339443c7b2314dec5e7926d16864103
751
py
Python
tools/gen_flask_secret.py
DVC-Viking-Robotics/webapp
39a13a86b46973328bd664308c6a3fa375629adc
[ "MIT" ]
6
2019-03-27T19:26:32.000Z
2019-10-14T09:17:28.000Z
tools/gen_flask_secret.py
DVC-Viking-Robotics/webapp
39a13a86b46973328bd664308c6a3fa375629adc
[ "MIT" ]
54
2019-02-24T12:38:52.000Z
2019-11-17T09:28:40.000Z
tools/gen_flask_secret.py
DVC-Viking-Robotics/webapp
39a13a86b46973328bd664308c6a3fa375629adc
[ "MIT" ]
12
2019-03-24T23:10:24.000Z
2019-12-07T02:48:32.000Z
""" This script allows the admin to generate a new Fernet key file and re-encrypt any '.encrypted' file, should the old key file be compromised. Run it with ``python -m tools.gen_flask_secret`` """ import os from webapp.utils.file_encryption import FernetVault from webapp.constants import SECRET_KEYFILE, FLASK_SECRET_FILE from webapp.utils.super_logger import logger if __name__ == '__main__': if not os.path.exists(SECRET_KEYFILE): logger.info('Tools', 'Error: You must have the original key file before you can change to a new one.') exit(-1) # generate new Flask secret key save it vault = FernetVault(SECRET_KEYFILE) NEW_FLASK_SECRET = os.urandom(24) vault.write_file(NEW_FLASK_SECRET, FLASK_SECRET_FILE)
34.136364
110
0.750999
116
751
4.655172
0.551724
0.122222
0.077778
0
0
0
0
0
0
0
0
0.004815
0.170439
751
21
111
35.761905
0.861958
0.304927
0
0
1
0
0.177043
0
0
0
0
0
0
1
0
false
0
0.363636
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
dad29b783a0cff6154b9ddd5bbb2a707070f8ace
1,321
py
Python
metricol/outputs/statsite.py
soutys/metricol
56b96e80aab8a61727b48eb2f5f19e0694895b55
[ "MIT" ]
null
null
null
metricol/outputs/statsite.py
soutys/metricol
56b96e80aab8a61727b48eb2f5f19e0694895b55
[ "MIT" ]
null
null
null
metricol/outputs/statsite.py
soutys/metricol
56b96e80aab8a61727b48eb2f5f19e0694895b55
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- '''Graphite output plugins module ''' from __future__ import ( absolute_import, division, print_function, unicode_literals, with_statement, ) import logging from queue import Empty from statsd.client import StatsClient from metricol.inputs import MetricInput from metricol.outputs import MetricOutput LOG = logging.getLogger(__name__) class Statsite(MetricOutput): '''Statsite pusher class ''' options = ['host', 'port'] def __init__(self, section, queue): super(Statsite, self).__init__(section, queue) self.client = None def prepare_things(self): super(Statsite, self).prepare_things() self.client = StatsClient( host=self.cfg['host'], port=int(self.cfg['port']), maxudpsize=1024) def do_things(self): while True: try: _key, _val, _type, _ = self.queue.get(block=False) if _type == MetricInput.METRIC_TYPE_GAUGE: self.client.gauge(_key, _val) elif _type == MetricInput.METRIC_TYPE_COUNTER: self.client.incr(_key, count=_val) elif _type == MetricInput.METRIC_TYPE_TIMER: self.client.timing(_key, _val) except Empty: break
24.462963
79
0.613929
142
1,321
5.415493
0.5
0.06502
0.081925
0.097529
0.083225
0.083225
0
0
0
0
0
0.005297
0.28539
1,321
53
80
24.924528
0.809322
0.06056
0
0
0
0
0.013019
0
0
0
0
0
0
1
0.088235
false
0
0.205882
0
0.352941
0.029412
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dad77c6e4ccbfc91bf868ef8fb393b299a2ec072
30
py
Python
main.py
jsjhcccn/qqbot
90505ae42a5b2c3a74b85c7bc98b597373aa6b6b
[ "MIT" ]
4,253
2016-07-30T11:54:14.000Z
2022-03-17T15:40:29.000Z
main.py
MidKateIsOP520/qqbot
c5f53c31062ad5c6e70555b9bf0479488339ecbe
[ "MIT" ]
375
2016-08-05T02:07:19.000Z
2020-08-03T01:29:32.000Z
main.py
MidKateIsOP520/qqbot
c5f53c31062ad5c6e70555b9bf0479488339ecbe
[ "MIT" ]
1,183
2016-07-31T01:37:43.000Z
2022-03-13T13:18:04.000Z
from qqbot import Main; Main()
30
30
0.766667
5
30
4.6
0.8
0
0
0
0
0
0
0
0
0
0
0
0.133333
30
1
30
30
0.884615
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dad80e149a334d4d98779bd5f03ddaee09b35f0c
5,300
py
Python
quotes_generate_attribute.py
russDA/Quotes_Predictor
6c22f141a468b304c1a5d76e43165d6f42d4e3cf
[ "MIT" ]
null
null
null
quotes_generate_attribute.py
russDA/Quotes_Predictor
6c22f141a468b304c1a5d76e43165d6f42d4e3cf
[ "MIT" ]
null
null
null
quotes_generate_attribute.py
russDA/Quotes_Predictor
6c22f141a468b304c1a5d76e43165d6f42d4e3cf
[ "MIT" ]
null
null
null
from bs4 import BeautifulSoup import requests import pandas as pd import numpy as np def url_list(base_url, no_of_pages): nos = np.linspace(1, no_of_pages, no_of_pages) pages = [] for n in nos: page = base_url+str(int(n)) pages.append(page) return pages def quote_list(url): site = requests.get(url) soup = BeautifulSoup(site.text, features='lxml') quotes = [] genres = [] items = soup.find_all('div', {'class':'quote mediumText'}) for item in items: q = item.find('div', {'class':'quoteText'}).text quote = str.split(q, '―') quotes.append(quote[0].strip()) genre_box = item.find('div', {'class':'greyText smallText left'}).find_all('a') genre = '' for g in genre_box: genre += (g.text + ',') genre = genre[:-1] genres.append(genre) df = pd.DataFrame({'Quote':quotes, 'Genre':genres}) return df def make_quote_df(base_url, no_of_pages): pages = url_list(base_url, no_of_pages) list_of_dfs = [] counter=1 for page in pages: print(f'About to make dataframe: {counter}') counter +=1 df = quote_list(page) list_of_dfs.append(df) final_df=pd.concat(list_of_dfs, ignore_index=True) return final_df # ============================================================================= # Love category of quotes # ============================================================================= romance_urls = 'https://www.goodreads.com/quotes/tag/romance?page=' romance_df = make_quote_df(romance_urls, 100) love_urls = 'https://www.goodreads.com/quotes/tag/love?page=' love_df = make_quote_df(love_urls, 100) # ============================================================================= # Wisdom category of quotes # ============================================================================= wisdom_urls = 'https://www.goodreads.com/quotes/tag/wisdom?page=' wisdom_df = make_quote_df(wisdom_urls, 100) truth_urls = 'https://www.goodreads.com/quotes/tag/truth?page=' truth_df = make_quote_df(truth_urls, 100) # ============================================================================= # Religion category of quotes # ============================================================================= god_urls = 'https://www.goodreads.com/quotes/tag/god?page=' god_df = make_quote_df(god_urls, 100) faith_urls = 'https://www.goodreads.com/quotes/tag/faith?page=' faith_df = make_quote_df(faith_urls, 100) # ============================================================================= # Witty and clever category of quotes # ============================================================================= humor_urls = 'https://www.goodreads.com/quotes/tag/humor?page=' humor_df = make_quote_df(humor_urls, 100) writing_urls = 'https://www.goodreads.com/quotes/tag/writing?page=' writing_df = make_quote_df(writing_urls, 100) # ============================================================================= # Dark and contemplative category of quotes # ============================================================================= death_urls = 'https://www.goodreads.com/quotes/tag/death?page=' death_df = make_quote_df(death_urls, 100) time_urls = 'https://www.goodreads.com/quotes/tag/time?page=' time_df = make_quote_df(time_urls, 100) # ============================================================================= # Intellectual category of quotes # ============================================================================= knowledge_urls = 'https://www.goodreads.com/quotes/tag/knowledge?page=' knowledge_df = make_quote_df(knowledge_urls, 100) science_urls = 'https://www.goodreads.com/quotes/tag/science?page=' science_df = make_quote_df(science_urls, 100) # ============================================================================= # Giving each df the column category, with respective category # ============================================================================= CATEGORY = 'Category' romance_df[CATEGORY] = 'Romance' love_df[CATEGORY] = 'Love' wisdom_df[CATEGORY] = 'Wisdom' truth_df[CATEGORY] = 'Truth' god_df[CATEGORY] = 'God' faith_df[CATEGORY] = 'Faith' humor_df[CATEGORY] = 'Humor' writing_df[CATEGORY] = 'Writing' death_df[CATEGORY] = 'Death' time_df[CATEGORY] = 'Time' knowledge_df[CATEGORY] = 'Knowledge' science_df[CATEGORY] = 'Science' # ============================================================================= # Converting every df into a CSV for future use # ============================================================================= romance_df.to_csv('Romance_Quotes.csv') love_df.to_csv('Love_Quotes.csv') wisdom_df.to_csv('Wisdom_Quotes.csv') truth_df.to_csv('Truth_Quotes.csv') god_df.to_csv('God_Quotes.csv') faith_df.to_csv('Faith_Quotes.csv') humor_df.to_csv('Humor_Quotes.csv') writing_df.to_csv('Writing_Quotes.csv') death_df.to_csv('Death_Quotes.csv') time_df.to_csv('Time_Quotes.csv') knowledge_df.to_csv('Knowledge_Quotes.csv') science_df.to_csv('Science_Quotes.csv')
29.943503
88
0.508113
582
5,300
4.386598
0.189003
0.045828
0.056013
0.098707
0.179397
0.17313
0.17313
0
0
0
0
0.009404
0.157358
5,300
176
89
30.113636
0.562024
0.290943
0
0
0
0
0.276134
0
0
0
0
0
0
1
0.033708
false
0
0.044944
0
0.11236
0.011236
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dad8ed0484b8958b3b31e0149f5e4b0720f01f84
791
py
Python
tap_tally/__init__.py
bkgreve/tap-tally
207c64b821024003859713624034c531969f0456
[ "MIT" ]
1
2021-02-12T23:22:38.000Z
2021-02-12T23:22:38.000Z
tap_tally/__init__.py
bkgreve/tap-tally
207c64b821024003859713624034c531969f0456
[ "MIT" ]
3
2020-09-10T11:13:17.000Z
2020-09-10T11:19:06.000Z
tap_tally/__init__.py
bkgreve/tap-tally
207c64b821024003859713624034c531969f0456
[ "MIT" ]
null
null
null
import os import json from flask import Flask app = Flask(__name__) @app.route('/api/entries') def get_tap_entries(): with open('/usr/src/app/data/beers.json') as f: beer_data = json.load(f) entries = [] for entry in beer_data: if entry.get('visible', False): image_name = entry.get('image', False) if image_name: image_exists = os.path.exists(f"/usr/src/app/data/images/{image_name}") if not image_exists: entry['image'] = None entries.append(entry) return { 'entries': entries } @app.route('/api/header-info') def get_header_info(): return { 'headerInfo': { 'breweryName': '', 'kegeratorTemp': None } }
22.6
87
0.549937
94
791
4.468085
0.446809
0.064286
0.052381
0.061905
0
0
0
0
0
0
0
0
0.31732
791
34
88
23.264706
0.777778
0
0
0.071429
0
0
0.190898
0.082174
0
0
0
0
0
1
0.071429
false
0
0.107143
0.035714
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dad97463770df090d4e73a80e6339dec1808753d
4,133
py
Python
DatabaseGeneration_database4_question5.py
wangpinggl/covidQA
f8b440ee27058cc1030af7cd57178ddf987462d0
[ "MIT" ]
null
null
null
DatabaseGeneration_database4_question5.py
wangpinggl/covidQA
f8b440ee27058cc1030af7cd57178ddf987462d0
[ "MIT" ]
null
null
null
DatabaseGeneration_database4_question5.py
wangpinggl/covidQA
f8b440ee27058cc1030af7cd57178ddf987462d0
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Sat Aug 29 05:31:09 2020 @author: Srikar Balusu """ import json import pandas as pd import numpy as np import re import random import sqlite3 import datetime import calendar from dateutil.relativedelta import * with open('lookup1.json') as json_file: data = json.load(json_file) with open('uniquelookup.json') as json_file: data2 = json.load(json_file) with open('state_dict.json') as json_file: state_dict = json.load(json_file) conn = sqlite3.connect('testQ.db') c = conn.cursor() question_template = "What state will have the (Value Entity) total forecasted number of deaths (Time Entity)?" question_template_id = 'db4q5' output = {} question_key = {} entities = ['Value Entity', 'Time Entity'] time_values = ['in the next (x) days', 'in the next (x) weeks', 'in the next week', 'tomorrow', 'day after tomorrow'] count = 1 def queryEndDate(query, time_entity): today = datetime.date.today() output = time_entity if time_entity == 'in the next (x) days': num_day = random.randint(1,20) future_date = today + datetime.timedelta(days = num_day) output = output.replace("(x)", str(num_day)) elif time_entity == 'in the next (x) weeks': num_week = random.randint(2,3) future_date = today + datetime.timedelta(days=num_week * 7) output = output.replace("(x)", str(num_week)) elif time_entity == 'in the next week': future_date = today + datetime.timedelta(days=7) elif time_entity == 'tomorrow': future_date = today + datetime.timedelta(days=1) elif time_entity == 'day after tomorrow': future_date = today + datetime.timedelta(days=2) if future_date.weekday() == 5: query = query.replace("given date", str(future_date)) elif future_date.weekday() ==6: query = query.replace("given date", str(future_date + datetime.timedelta(days=6))) else: query = query.replace("given date", str(future_date + datetime.timedelta(days=5-future_date.weekday()))) return query, output while count < 250: output[count] = [] populated_entities = [] time_entity = random.choice(time_values) val = random.choice(data['Value Entity']) if val.find("(x)") >= 0: order = random.randint(1,5) val = val.replace("(x)", str(order)) if order == 2: val = val.replace("th", "nd") if order == 3: val = val.replace("th", "rd") if order == 1: val = val.replace("th", "st") else: order = 1 if val.find("most") >= 0 or val.find("highest") >=0 or val.find("Highest") >=0: ascending = False else: ascending = True sql_template = "Select location_name, Max(point) from db4forecaststate WHERE target_week_end_date = 'given date' and location_name != 'National' group by location_name order by Max(point) asc/desc limit X,1" query = sql_template query, time_e = queryEndDate(query,time_entity) if ascending == False: query = query.replace("asc/desc", "desc") query = query.replace("X", str(order-1)) else: query = query.replace("asc/desc", "asc") query = query.replace("X", str(order-1)) real_question = question_template.replace("(Time Entity)", time_e) real_question = real_question.replace("(Value Entity)", val) populated_entities.append(val) populated_entities.append(time_e) c.execute(query) result = c.fetchall() if len(result) == 0 or result[0][0] == None: continue elif real_question in question_key.keys(): continue else: question_key[real_question] = True output[count].append({'question_template_id' : question_template_id, 'question_template' : question_template, 'entities' : entities, 'question' : real_question, 'populated_entities': populated_entities, 'query_template' : sql_template, 'query' : query, 'database': 'database 4'}) print(count) print(real_question) print(query) print(result) count = count + 1
36.575221
212
0.640455
547
4,133
4.703839
0.270567
0.046638
0.057132
0.044695
0.293432
0.239798
0.147299
0.061796
0.046638
0.046638
0
0.017236
0.227922
4,133
113
213
36.575221
0.789094
0.01984
0
0.092784
0
0.010309
0.189911
0
0
0
0
0
0
1
0.010309
false
0
0.092784
0
0.113402
0.041237
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dada78f117f641ec85db243d1ba2fcd8f0d8b75a
1,347
py
Python
src/tutorials/hello_world_tutorial/functions/app.py
drblahdblah/covid-19-analysis
abd6ed23a49bd1277708163cad0741c5b9f9698a
[ "MIT" ]
3
2020-04-20T11:10:23.000Z
2020-04-22T08:06:04.000Z
src/tutorials/hello_world_tutorial/functions/app.py
drblahdblah/covid-19-analysis
abd6ed23a49bd1277708163cad0741c5b9f9698a
[ "MIT" ]
null
null
null
src/tutorials/hello_world_tutorial/functions/app.py
drblahdblah/covid-19-analysis
abd6ed23a49bd1277708163cad0741c5b9f9698a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import dash import dash_core_components as dcc import dash_html_components as html import pandas as pd df = pd.read_csv(f'https://gist.githubusercontent.com/chriddyp/c78bf172206ce24f77d6363a2d754b59/' f'raw/c353e8ef842413cae56ae3920b8fd78468aa4cb2/usa-agricultural-exports-2011.csv') def generate_simple_table(dataframe: pd.DataFrame, max_rows=10) -> html.Table: """ Simple function to return a simple HTML table for a Dash dashboard. :param dataframe: A Pandas DataFrame to plot. :param max_rows: The maximum number of rows to plot: defaults to 10 :return: A HTML table object. """ return html.Table([ html.Thead( html.Tr([html.Tr(col) for col in dataframe.columns]) ), html.Tbody([ html.Tr([ html.Td(dataframe.iloc[i][col] for col in dataframe.columns) ]) for i in range(min(len(dataframe), max_rows)) ]) ]) external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = html.Div(children=[ html.H4(children="US Agriculture Exports (2011)"), generate_simple_table(dataframe=df, max_rows=10) ]) if __name__ == '__main__': app.run_server(debug=True, host='127.0.0.1', port=8088)
31.325581
99
0.680772
180
1,347
4.933333
0.494444
0.031532
0.042793
0.063063
0.060811
0.060811
0
0
0
0
0
0.066852
0.200445
1,347
42
100
32.071429
0.75766
0.173719
0
0.12
0
0
0.223963
0.071889
0
0
0
0
0
1
0.04
false
0
0.16
0
0.24
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dadb199b4bbce57f73cf63b1e61db8a2e22975c4
3,339
py
Python
ooiservices/tests/test_cmislib.py
Bobfrat/ooi-ui-services
32f67948510e6af0bc06d62fe49ff3c991b22b40
[ "Apache-2.0" ]
2
2015-02-28T00:20:30.000Z
2015-04-30T12:40:31.000Z
ooiservices/tests/test_cmislib.py
Bobfrat/ooi-ui-services
32f67948510e6af0bc06d62fe49ff3c991b22b40
[ "Apache-2.0" ]
266
2015-01-02T21:29:25.000Z
2020-01-23T16:00:11.000Z
ooiservices/tests/test_cmislib.py
Bobfrat/ooi-ui-services
32f67948510e6af0bc06d62fe49ff3c991b22b40
[ "Apache-2.0" ]
13
2015-02-04T21:13:34.000Z
2016-10-18T14:39:36.000Z
#!/usr/bin/env python ''' Test the cmislib package created by Apache Chemistry project. Version is currently at 0.5.1 and has very good reviews from the community. Created: 10/04/2015 ''' __author__ = 'M@Campbell' import unittest from unittest import skipIf import os from base64 import b64encode from ooiservices.app import create_app @skipIf(os.getenv('TRAVIS'), 'Skip if testing from Travis CI.') class AlfrescoTestCase(unittest.TestCase): def setUp(self): self.app = create_app('TESTING_CONFIG') self.app_context = self.app.app_context() self.app_context.push() self.client = self.app.test_client() self.basedir = os.path.abspath(os.path.dirname(__file__)) # set the vars for the connection self.cmisUrl = \ 'https://alfresco.oceanobservatories.org/alfresco/s/api/cmis' self.cmisUsername = 'ooinet' self.cmisPassword = '75commonLIKEbrown76' self.cmisId = 'c161bc66-4f7e-4a4f-b5f2-aac9fbf1d3cd' def tearDown(self): self.app_context.pop() def get_api_headers(self, username, password): return { 'Authorization': 'Basic ' + b64encode( (username + ':' + password).encode('utf-8')).decode('utf-8'), 'Accept': 'application/json', 'Content-Type': 'application/json' } def test_cmislib_loaded(self): ''' Make sure the cmislib library is properly loaded. ''' from cmislib.model import CmisClient CmisClient def test_cmislib_connection(self): from cmislib.model import CmisClient # create the connection object client = CmisClient(self.cmisUrl, self.cmisUsername, self.cmisPassword) # check to make sure the object was created with the correct url self.assertEquals(client.repositoryUrl, self.cmisUrl) # use the client to connect to the repository repo = client.getRepository(self.cmisId) # make sure the repo information is referencing the correct repository self.assertEqual(repo.info['repositoryId'], self.cmisId) def test_cmislib_CRD(self): from cmislib.model import CmisClient client = CmisClient(self.cmisUrl, self.cmisUsername, self.cmisPassword) repo = client.getRepository(self.cmisId) # for tests, lets make sure the test folder isn't still there try: print ". . ." someObject = repo.getObjectByPath('/testFolder') someObject.deleteTree() except: print "\tno existing folders..." # create a new dir in the root folder print "\ttesting folder creation..." root = repo.rootFolder someFolder = root.createFolder('testFolder') # create a test file and drop it in the test folder. print "\ttesting file creation..." someFile = open(self.basedir + '/mock_data/test.txt', 'r') someFolder.createDocument('Test Document', contentFile=someFile) # test read by using a full-text search. print "\ttesting full-text search (read)..." repo.query("select * from cmis:document where contains('test')") # Then obliterate the folder and all it's children, mercilessly. print "\ttesting delete..." someFolder.deleteTree()
34.42268
79
0.650195
388
3,339
5.530928
0.471649
0.019571
0.020503
0.030755
0.134203
0.088537
0.054986
0.054986
0
0
0
0.014788
0.250674
3,339
96
80
34.78125
0.842926
0.151542
0
0.122807
0
0
0.199535
0.013948
0
0
0
0
0.035088
0
null
null
0.087719
0.140351
null
null
0.105263
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
dadb84725f965ceb0d440e96ae59bc095140ff91
128
py
Python
Code/YOLO/darkflow/darkflow/utils/im_transform.py
kalvin-osoro/ml_project
bf0bdc5719f2712682dd070045a5f1edf933a0c4
[ "Apache-2.0" ]
null
null
null
Code/YOLO/darkflow/darkflow/utils/im_transform.py
kalvin-osoro/ml_project
bf0bdc5719f2712682dd070045a5f1edf933a0c4
[ "Apache-2.0" ]
null
null
null
Code/YOLO/darkflow/darkflow/utils/im_transform.py
kalvin-osoro/ml_project
bf0bdc5719f2712682dd070045a5f1edf933a0c4
[ "Apache-2.0" ]
null
null
null
version https://git-lfs.github.com/spec/v1 oid sha256:834d7828e80c45d2160d52eeeefc97a2432c512b24b89422f604edf52f3d667a size 895
32
75
0.882813
13
128
8.692308
1
0
0
0
0
0
0
0
0
0
0
0.401639
0.046875
128
3
76
42.666667
0.52459
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
dadc943c456eca12a123766407c6fe7d3b911d2b
12,804
py
Python
main_robust.py
npatel5/similarity-learning
9052f3d966a2e14918fcaf9d3a4c93dc9143db71
[ "MIT" ]
null
null
null
main_robust.py
npatel5/similarity-learning
9052f3d966a2e14918fcaf9d3a4c93dc9143db71
[ "MIT" ]
null
null
null
main_robust.py
npatel5/similarity-learning
9052f3d966a2e14918fcaf9d3a4c93dc9143db71
[ "MIT" ]
null
null
null
from __future__ import print_function from IPython.core.debugger import set_trace import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import config as cf import numpy as np import torchvision import torchvision.transforms as transforms #import ipdb import os import sys import time import argparse import datetime import scipy.ndimage as ndimage from networks import * from torch.autograd import Variable from itertools import starmap import random parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training') parser.add_argument('--lr', default=0.1, type=float, help='learning_rate') parser.add_argument('--net_type', default='wide-resnet', type=str, help='model') parser.add_argument('--depth', default=28, type=int, help='depth of model') parser.add_argument('--widen_factor', default=10, type=int, help='width of model') parser.add_argument('--dropout', default=0.3, type=float, help='dropout_rate') parser.add_argument('--dataset', default='cifar10', type=str, help='dataset = [cifar10/cifar100]') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--testOnly', '-t', action='store_true', help='Test mode with the saved model') args = parser.parse_args() # Hyper Parameter settings sim_learning = False #use_noise = True use_cuda = torch.cuda.is_available() best_acc = 0 #sig = 10 reg_strength = 1 start_epoch, num_epochs, batch_size, optim_type = cf.start_epoch, cf.num_epochs, cf.batch_size, cf.optim_type # Data Uplaod print('\n[Phase 1] : Data Preparation') torch.manual_seed(2809) gaussian_transforms = [ transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=0)), # transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=1)), # transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=2)), # transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=5)), # transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=10)) ] transform_train_noise = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.RandomChoice(gaussian_transforms), transforms.ToTensor(), transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]), ]) transform_train_clean = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]), ]) # meanstd transformation transform_test_noise = transforms.Compose([ transforms.RandomChoice(gaussian_transforms), transforms.ToTensor(), transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]), ]) transform_test = transforms.Compose([ #transforms.RandomChoice(gaussian_transforms), transforms.ToTensor(), transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]), ]) if(args.dataset == 'cifar10'): print("| Preparing CIFAR-10 dataset...") sys.stdout.write("| ") trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test) num_classes = 10 elif(args.dataset == 'cifar100'): print("| Preparing CIFAR-100 dataset...") sys.stdout.write("| ") trainset_noise = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train_noise) trainset_clean = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train_clean) testset_noise = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test_noise) num_classes = 100 testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test) num_classes = 100 trainloader_noise = torch.utils.data.DataLoader(trainset_noise, batch_size=batch_size, shuffle=True, num_workers=2) trainloader_clean = torch.utils.data.DataLoader(trainset_clean, batch_size=batch_size, shuffle=True, num_workers=2) testloader_noise = torch.utils.data.DataLoader(testset_noise, batch_size=100, shuffle=False, num_workers=2) testloader_clean = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2) # Return network & file name def getNetwork(args): if (args.net_type == 'lenet'): net = LeNet(num_classes) file_name = 'lenet' elif (args.net_type == 'vggnet'): net = VGG(args.depth, num_classes) file_name = 'vgg-'+str(args.depth) elif (args.net_type == 'resnet'): net = ResNet_2Read(args.depth, num_classes) file_name = 'resnet-'+str(args.depth) elif (args.net_type == 'wide-resnet'): net = Wide_ResNet(args.depth, args.widen_factor, args.dropout, num_classes) file_name = 'wide-resnet-'+str(args.depth)+'x'+str(args.widen_factor) else: print('Error : Network should be either [LeNet / VGGNet / ResNet / Wide_ResNet') sys.exit(0) return net, file_name if (sim_learning): checkpoint_gauss = torch.load("./checkpoint/cifar100/resnet-50_2readout_3.t7") robustNet = checkpoint_gauss['net'] robustNet = torch.nn.DataParallel(robustNet, device_ids=range(torch.cuda.device_count())) # Test only option if (args.testOnly): print('\n[Test Phase] : Model setup') assert os.path.isdir('checkpoint'), 'Error: No checkpoint directory found!' _, file_name = getNetwork(args) checkpoint = torch.load('./checkpoint/'+args.dataset+os.sep+file_name+'_readout_match.t7') net = checkpoint['net'] if use_cuda: net.cuda() net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count())) cudnn.benchmark = True net.eval() test_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(testloader_noise): if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = Variable(inputs, volatile=True), Variable(targets) outputs = net(inputs, compute_similarity=False) _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += predicted.eq(targets.data).cpu().sum() acc = 100.*correct/total #variance = batch_var.mean() print("| Test Result (Noise Readout)\tAcc@1: %.2f%%" %(acc)) test_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(testloader_noise): if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() inputs, targets = Variable(inputs, volatile=True), Variable(targets) outputs = net(inputs, img_type="clean") _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += predicted.eq(targets.data).cpu().sum() acc = 100.*correct/total #variance = batch_var.mean() print("| Test Result (Clean Readout)\tAcc@1: %.2f%%" %(acc)) # std = 0. # for images, _ in testloader: # batch_samples = images.size(0) # images = images.view(batch_samples,images.size(1), -1) # std += images.std(2).sum(0) # std /= len(testloader.dataset) #print("| Standard Deviation of noise / Standard Deviation of Pixels: %.2f" %(sig/std)) sys.exit(0) # Model print('\n[Phase 2] : Model setup') if args.resume: # Load checkpoint print('| Resuming from checkpoint...') assert os.path.isdir('checkpoint'), 'Error: No checkpoint directory found!' _, file_name = getNetwork(args) checkpoint = torch.load('./checkpoint/'+args.dataset+os.sep+file_name+'_2readout_3.t7') net = checkpoint['net'] best_acc = 100.0 #start_epoch = checkpoint['epoch'] start_epoch = 200 else: print('| Building net type [' + args.net_type + ']...') net, file_name = getNetwork(args) net.apply(conv_init) if use_cuda: net.cuda() net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count())) cudnn.benchmark = True criterion = nn.CrossEntropyLoss() w_loss = nn.MSELoss() # Similarity Loss Computation # Training similarities = {} accs = [] def train(epoch): net.train() train_loss = 0 correct = 0 total = 0 optimizer = optim.SGD(net.module.linear_clean.parameters(), lr=cf.learning_rate(args.lr, epoch), momentum=0.9, weight_decay=5e-4) print('\n=> Training Epoch #%d, LR=%.4f' %(epoch, cf.learning_rate(args.lr, epoch))) for batch_idx, ((inputs1, targets1), (inputs2, targets2)) in enumerate(zip(trainloader_noise, trainloader_clean)): if use_cuda: # inputs, targets = inputs.cuda(), targets.cuda() inputs1, targets1 = inputs1.cuda(), targets1.cuda() # GPU settings inputs2, targets2 = inputs2.cuda(), targets2.cuda() optimizer.zero_grad() outputs_n = net(inputs1, img_type="noise", compute_similarity=False) l1 = criterion(outputs_n, targets1) l1.backward(retain_graph=True) outputs_c = net(inputs2, img_type="clean", compute_similarity=False) l2 = criterion(outputs_c, targets2) l2.backward(retain_graph=True) l3 = w_loss(outputs_n, outputs_c) l3.backward(retain_graph=True) optimizer.step() # Optimizer update loss = l1 + l2 + l3 train_loss += loss.item() _, predicted = torch.max(outputs_c.data, 1) total += targets2.size(0) correct += predicted.eq(targets2.data).cpu().sum() sys.stdout.write('\r') sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\t Loss: %.4f Acc@1: %.3f%%' %(epoch, num_epochs, batch_idx+1, (len(trainset_noise)//batch_size)+1, loss.item(), 100.*correct/total)) sys.stdout.flush() def test(epoch): global best_acc net.eval() test_loss = 0 correct1 = 0 total1 = 0 correct2 = 0 total2 = 0 for batch_idx, (inputs1, targets1) in enumerate(testloader_noise): if use_cuda: inputs1, targets1 = inputs1.cuda(), targets1.cuda() outputs_n = net(inputs1, img_type="noise", compute_similarity=False) loss = criterion(outputs_n, targets1) test_loss += loss.item() _, predicted1 = torch.max(outputs_n.data, 1) total1 += targets1.size(0) correct1 += predicted1.eq(targets1.data).cpu().sum() acc = 100.*correct1/total1 for batch_idx, (inputs2, targets2) in enumerate(testloader_noise): if use_cuda: inputs2, targets2 = inputs2.cuda(), targets2.cuda() outputs_c = net(inputs2, img_type="clean", compute_similarity=False) loss2 = criterion(outputs_c, targets2) _, predicted2 = torch.max(outputs_c.data, 1) total2 += targets2.size(0) correct2 += predicted2.eq(targets2.data).cpu().sum() acc2 = 100.*correct2/total2 print("\n| Validation Epoch #%d\t\t\tLoss (Noise): %.4f Acc@1: %.2f%%" %(epoch, loss.item(), acc)) print("\n| Validation Epoch #%d\t\t\tLoss (Clean): %.4f Acc@1: %.2f%%" %(epoch, loss2.item(), acc2)) # Save checkpoint when best model if acc > best_acc: print('| Saving Best model...\t\t\tTop1 = %.2f%%' %(acc)) state = { 'net':net.module if use_cuda else net, 'acc':acc, 'epoch':epoch, } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') save_point = './checkpoint/'+args.dataset+os.sep if not os.path.isdir(save_point): os.mkdir(save_point) torch.save(state, save_point+file_name+'.t7') best_acc = acc accs.append(acc) #net.train() print('\n[Phase 3] : Training model') print('| Training Epochs = ' + str(num_epochs)) print('| Initial Learning Rate = ' + str(args.lr)) print('| Optimizer = ' + str(optim_type)) elapsed_time = 0 for epoch in range(start_epoch, start_epoch+num_epochs): start_time = time.time() train(epoch) test(epoch) epoch_time = time.time() - start_time elapsed_time += epoch_time print('| Elapsed time : %d:%02d:%02d' %(cf.get_hms(elapsed_time))) np.save('epoch_accs', accs) print('\n[Phase 4] : Testing model') print('* Test results : Acc@1 = %.2f%%' %(best_acc)) print('| Saving model...') state = { 'net':net.module if use_cuda else net, #'acc':acc, #'epoch':epoch, } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') save_point = './checkpoint/'+args.dataset+os.sep if not os.path.isdir(save_point): os.mkdir(save_point) torch.save(state, save_point+file_name+'robust_readout_matching_basicblock.t7')
38.107143
133
0.670181
1,664
12,804
5.016827
0.184495
0.018448
0.009703
0.013776
0.508984
0.453402
0.418304
0.389075
0.373742
0.322832
0
0.0245
0.187129
12,804
335
134
38.220896
0.777575
0.083958
0
0.351145
0
0.01145
0.133219
0.007012
0
0
0
0
0.007634
1
0.01145
false
0
0.080153
0
0.09542
0.087786
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dadcb95e7278009dc692c238d4ccf77a3fb2e55e
1,843
py
Python
tools/list_exp.py
AlexMaximenko/sew
f26b45f1374f3c3ce477abcc07a2d0017859c026
[ "MIT" ]
49
2021-09-15T06:10:07.000Z
2022-03-28T17:37:28.000Z
tools/list_exp.py
AlexMaximenko/sew
f26b45f1374f3c3ce477abcc07a2d0017859c026
[ "MIT" ]
3
2021-09-26T03:45:04.000Z
2022-02-21T09:36:38.000Z
tools/list_exp.py
AlexMaximenko/sew
f26b45f1374f3c3ce477abcc07a2d0017859c026
[ "MIT" ]
9
2021-09-15T07:35:09.000Z
2022-02-21T00:10:20.000Z
# Copyright (c) ASAPP Inc. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import fire import os import sys import time from tqdm.auto import tqdm import re import json from colorama import Fore def get_num_updates(folder): num_updates = 0 try: with open(os.path.join(folder, 'hydra_train.log')) as f: lines = [line.strip() for line in f][-50:] for line in lines: try: start = line.find('num_updates": "') if start >= 0: start += len('num_updates": "') end = line[start:].find('",') + start if end >= start: num_updates = int(line[start:end]) except: continue return num_updates except: return 0 def print_folder(folder, total_updates): num_updates = get_num_updates(folder) if num_updates >= total_updates: color = Fore.GREEN else: color = Fore.RED print(f"{folder}\t{color}{num_updates}{Fore.RESET}") def main(root="exp-bu", total_updates=100_000): exp_dirs = set() match_names = set() for dirname, dirs, files in tqdm(os.walk(root)): if 'checkpoints' in dirs: exp_dirs.add(dirname) exp_dirs = sorted(exp_dirs) evaled = [] not_evaled = [] for x in exp_dirs: if os.path.exists(f"{x}/eval.log"): evaled.append(x) else: not_evaled.append(x) print("Evaluated:") for folder in evaled: print_folder(folder, total_updates) print("\nNot evaluated:") for folder in not_evaled: print_folder(folder, total_updates) if __name__ == "__main__": fire.Fire(main)
27.102941
65
0.571351
238
1,843
4.260504
0.394958
0.098619
0.050296
0.065089
0.097633
0.069034
0
0
0
0
0
0.008821
0.323386
1,843
67
66
27.507463
0.80433
0.078133
0
0.145455
0
0
0.089728
0.024793
0
0
0
0
0
1
0.054545
false
0
0.145455
0
0.236364
0.109091
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dadd9c97dc297ce53e68887ddd85475bda9eda3c
11,356
py
Python
tests/tags_test.py
alexanderGugel/nate
18856142efe0b80f5260df3fb2ad9cd0a22a1c6f
[ "MIT" ]
3
2020-10-31T17:16:18.000Z
2022-01-27T06:42:44.000Z
tests/tags_test.py
alexanderGugel/nate
18856142efe0b80f5260df3fb2ad9cd0a22a1c6f
[ "MIT" ]
null
null
null
tests/tags_test.py
alexanderGugel/nate
18856142efe0b80f5260df3fb2ad9cd0a22a1c6f
[ "MIT" ]
null
null
null
import unittest from nate import ( H1, Body, Br, DangerousHtml, Div, Head, Hr, Html, Input, Meta, P, Title, Table, Thead, Tbody, Th, Tr, Td, Fragment, ) from .common import article_1, article_2, html, section class TagsTest(unittest.TestCase): def test_basic(self) -> None: basic = Html( lang="en", children=[ Head( children=[ Meta(charset="utf-8"), Meta( name="viewport", content="width=device-width, initial-scale=1", ), Title(children="Basic"), ] ), Body( children=[ H1("Hello world"), P( children=[ "First line", Br(), "Second line", ] ), Hr(), P("The end."), ], ), ], ) basic_expected = """<!DOCTYPE html>\ <html lang="en">\ <head>\ <meta charset="utf-8"/>\ <meta name="viewport" content="width=device-width, initial-scale=1"/>\ <title>Basic</title>\ </head>\ <body>\ <h1>Hello world</h1>\ <p>First line<br/>Second line</p><hr/>\ <p>The end.</p>\ </body>\ </html>\ """ self.assertEqual(basic.to_html(), basic_expected) def test_iterables(self) -> None: politicians = [ {"first_name": "Theodor", "last_name": "Heuss", "party": "FDP"}, {"first_name": "Heinrich", "last_name": "Lübke", "party": "CDU"}, {"first_name": "Gustav", "last_name": "Heinemann", "party": "SPD"}, {"first_name": "Walter", "last_name": "Scheel", "party": "FDP"}, {"first_name": "Karl", "last_name": "Carstens", "party": "CDU"}, { "first_name": "Richard", "last_name": "von Weizsäcker", "party": "CDU", }, {"first_name": "Roman", "last_name": "Herzog", "party": "CDU"}, {"first_name": "Johannes", "last_name": "Rau", "party": "SPD"}, ] table = Table( [ Thead(Th([Tr("First Name"), Tr("Last Name"), Tr("Party")])), Tbody( map( lambda politician: Tr( [ Td(politician["first_name"]), Td(politician["last_name"]), Td(politician["party"]), ] ), politicians, ), ), ] ) table_expected = """<table>\ <thead><th><tr>First Name</tr><tr>Last Name</tr><tr>Party</tr></th></thead>\ <tbody>\ <tr><td>Theodor</td><td>Heuss</td><td>FDP</td></tr>\ <tr><td>Heinrich</td><td>Lübke</td><td>CDU</td></tr>\ <tr><td>Gustav</td><td>Heinemann</td><td>SPD</td></tr>\ <tr><td>Walter</td><td>Scheel</td><td>FDP</td></tr>\ <tr><td>Karl</td><td>Carstens</td><td>CDU</td></tr>\ <tr><td>Richard</td><td>von Weizsäcker</td><td>CDU</td></tr>\ <tr><td>Roman</td><td>Herzog</td><td>CDU</td></tr>\ <tr><td>Johannes</td><td>Rau</td><td>SPD</td></tr>\ </tbody>\ </table>\ """ self.assertEqual(table.to_html(), table_expected) p = P("Test", class_=iter(["bg-red", "blue"])) p_expected = '<p class="bg-red blue">Test</p>' self.assertEqual(p.to_html(), p_expected) def test_xss(self) -> None: p = P("<script>alert('XSS');</script>") p_expected =\ "<p>&lt;script&gt;alert(&#x27;XSS&#x27;);&lt;/script&gt;</p>" self.assertEqual(p.to_html(), p_expected) div = Div([DangerousHtml("<script>alert('XSS');</script>")]) div_expected = "<div><script>alert('XSS');</script></div>" self.assertEqual(div.to_html(), div_expected) text_input = Input(value="<script>alert('XSS');</script>") text_input_expected =\ '<input value="&lt;script&gt;alert(&#x27;XSS&#x27;);&lt;/\ script&gt;"/>' self.assertEqual(text_input.to_html(), text_input_expected) def test_article(self) -> None: article_1_expected = """\ <article class="pv4 bt bb b--black-10 ph3 ph0-l">\ <div class="flex flex-column flex-row-ns">\ <div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\ <h1 class="f3 athelas mt0 lh-title">\ Tech Giant Invests Huge Money to Build a Computer Out of Science Fiction</h1>\ <p class="f5 f4-l lh-copy athelas">\ The tech giant says it is ready to begin planning a quantum computer, a \ powerful cpu machine that relies on subatomic particles instead of \ transistors.\ </p>\ </div>\ <div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\ <img class="db" src="https://mrmrs.github.io/photos/cpu.jpg" alt="Photo of a \ dimly lit room with a computer interface terminal."/>\ </div>\ </div>\ <p class="f6 lh-copy gray mv0">By <span class="ttu">Robin Darnell</span></p>\ <time class="f6 db gray">Nov. 21, 2016</time>\ </article>""" article_2_expected = """<article class="pv4 bt bb b--black-10 ph3 ph0-l">\ <div class="flex flex-column flex-row-ns">\ <div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\ <h1 class="f3 athelas mt0 lh-title">\ A whale takes up residence in a large body of water\ </h1>\ <p class="f5 f4-l lh-copy athelas">\ This giant of a whale says it is ready to begin planning a new swim later \ this afternoon. A powerful mammal that relies on fish and plankton instead of \ hamburgers.\ </p>\ </div>\ <div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\ <img class="db" src="http://mrmrs.github.io/photos/whale.jpg" alt="Photo of a \ whale&#x27;s tale coming crashing out of the water."/>\ </div>\ </div>\ <p class="f6 lh-copy gray mv0">By <span class="ttu">Katherine Grant</span></p>\ <time class="f6 db gray">Nov. 19, 2016</time>\ </article>""" self.assertEqual(article_1.to_html(), article_1_expected) self.assertEqual(article_2.to_html(), article_2_expected) def test_section(self) -> None: expected = """<section class="mw7 center">\ <h2 class="athelas ph3 ph0-l">News</h2>\ <article class="pv4 bt bb b--black-10 ph3 ph0-l">\ <div class="flex flex-column flex-row-ns">\ <div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\ <h1 class="f3 athelas mt0 lh-title">\ Tech Giant Invests Huge Money to Build a Computer Out of Science Fiction</h1>\ <p class="f5 f4-l lh-copy athelas">\ The tech giant says it is ready to begin planning a quantum computer, a \ powerful cpu machine that relies on subatomic particles instead of \ transistors.\ </p>\ </div>\ <div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\ <img class="db" src="https://mrmrs.github.io/photos/cpu.jpg" alt="Photo of a \ dimly lit room with a computer interface terminal."/>\ </div>\ </div>\ <p class="f6 lh-copy gray mv0">By <span class="ttu">Robin Darnell</span></p>\ <time class="f6 db gray">Nov. 21, 2016</time>\ </article>\ <article class="pv4 bt bb b--black-10 ph3 ph0-l">\ <div class="flex flex-column flex-row-ns">\ <div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\ <h1 class="f3 athelas mt0 lh-title">\ A whale takes up residence in a large body of water\ </h1>\ <p class="f5 f4-l lh-copy athelas">\ This giant of a whale says it is ready to begin planning a new swim later \ this afternoon. A powerful mammal that relies on fish and plankton instead of \ hamburgers.\ </p>\ </div>\ <div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\ <img class="db" src="http://mrmrs.github.io/photos/whale.jpg" alt="Photo of a \ whale&#x27;s tale coming crashing out of the water."/>\ </div>\ </div>\ <p class="f6 lh-copy gray mv0">By <span class="ttu">Katherine Grant</span></p>\ <time class="f6 db gray">Nov. 19, 2016</time>\ </article>\ </section>""" self.assertEqual(section.to_html(), expected) def test_html(self) -> None: actual = html.to_html() expected = """<!DOCTYPE html>\ <html lang="en">\ <head>\ <meta charset="utf-8"/>\ <meta name="viewport" content="width=device-width, initial-scale=1"/>\ <title>Example Title</title>\ <link rel="stylesheet" href="https://unpkg.com/tachyons@4.12.0/css/tachyons.\ min.css"/>\ </head>\ <body class="sans-serif">\ <section class="mw7 center">\ <h2 class="athelas ph3 ph0-l">News</h2>\ <article class="pv4 bt bb b--black-10 ph3 ph0-l">\ <div class="flex flex-column flex-row-ns">\ <div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\ <h1 class="f3 athelas mt0 lh-title">\ Tech Giant Invests Huge Money to Build a Computer Out of Science Fiction</h1>\ <p class="f5 f4-l lh-copy athelas">\ The tech giant says it is ready to begin planning a quantum computer, a \ powerful cpu machine that relies on subatomic particles instead of \ transistors.\ </p>\ </div>\ <div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\ <img class="db" src="https://mrmrs.github.io/photos/cpu.jpg" alt="Photo of a \ dimly lit room with a computer interface terminal."/>\ </div>\ </div>\ <p class="f6 lh-copy gray mv0">By <span class="ttu">Robin Darnell</span></p>\ <time class="f6 db gray">Nov. 21, 2016</time>\ </article>\ <article class="pv4 bt bb b--black-10 ph3 ph0-l">\ <div class="flex flex-column flex-row-ns">\ <div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\ <h1 class="f3 athelas mt0 lh-title">\ A whale takes up residence in a large body of water</h1>\ <p class="f5 f4-l lh-copy athelas">\ This giant of a whale says it is ready to begin planning a new swim later \ this afternoon. A powerful mammal that relies on fish and plankton instead of \ hamburgers.\ </p>\ </div>\ <div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\ <img class="db" src="http://mrmrs.github.io/photos/whale.jpg" alt="Photo of a \ whale&#x27;s tale coming crashing out of the water."/>\ </div>\ </div>\ <p class="f6 lh-copy gray mv0">By <span class="ttu">Katherine Grant</span></p>\ <time class="f6 db gray">Nov. 19, 2016</time>\ </article>\ </section>\ </body>\ </html>\ """ self.assertEqual(actual, expected) def test_fragment(self) -> None: fragment = Div( children=[ "before fragment", Fragment( children=[ Div(children="before nested fragment in div"), Fragment( children="nested fragment", ), Fragment( children=Div(children="nested fragment in div"), ), "after nested fragment", ], ), "after fragment", ], ) actual = fragment.to_html() expected = """<div>\ before fragment\ <div>before nested fragment in div</div>\ nested fragment\ <div>nested fragment in div</div>\ after nested fragment\ after fragment\ </div>\ """ self.assertEqual(actual, expected) if __name__ == "__main__": unittest.main()
34.941538
82
0.566925
1,613
11,356
3.946063
0.146311
0.022624
0.009427
0.008798
0.697251
0.657659
0.646661
0.624666
0.614611
0.614611
0
0.032461
0.256693
11,356
324
83
35.049383
0.721597
0
0
0.525974
0
0.113636
0.638165
0.069831
0
0
0
0
0.035714
1
0.022727
false
0
0.00974
0
0.035714
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
dade5fb81eac6f03d7cd440bbd9b3041f81946b2
554
py
Python
worker_zeromq/addresses.py
espang/projects
3a4d93592bc3427a6abd8d2170081155862754a8
[ "MIT" ]
null
null
null
worker_zeromq/addresses.py
espang/projects
3a4d93592bc3427a6abd8d2170081155862754a8
[ "MIT" ]
null
null
null
worker_zeromq/addresses.py
espang/projects
3a4d93592bc3427a6abd8d2170081155862754a8
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Feb 26 09:11:06 2016 @author: eikes """ ADD_VENTI_WORKER = 'tcp://*:4555' ADD_VENTI_LH_WORKER = 'tcp://localhost:4555' ADD_SINK_RECEIVE = 'tcp://*:4556' ADD_SINK_LH_RECEIVE = 'tcp://localhost:4556' ADD_SINK_PUBLISH = 'tcp://*:4557' ADD_SINK_LH_PUBLISH = 'tcp://localhost:4557' ADD_VENTI_RECEIVE = 'tcp://*:4558' ADD_VENTI_LH_RECEIVE = 'tcp://localhost:4558' ADD_LOG_CONTROLLER = 'tcp://*:4560' ADD_LOG_LH_CONTROLLER = 'tcp://localhost:4560' ADD_LOGGING = 'tcp://*:4559' ADD_LH_LOGGING = 'tcp://localhost:4559'
29.157895
46
0.707581
84
554
4.333333
0.380952
0.197802
0.054945
0.115385
0
0
0
0
0
0
0
0.121756
0.095668
554
18
47
30.777778
0.60479
0.133574
0
0
0
0
0.40678
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dade9648a2fc13cba868380ebc0181b3e2615f68
593
py
Python
ancli/__main__.py
devforfu/ancli
44ea37e756fb12221bfafc1ecae3b8ef46d84cce
[ "MIT" ]
1
2020-10-24T16:08:14.000Z
2020-10-24T16:08:14.000Z
ancli/__main__.py
devforfu/ancli
44ea37e756fb12221bfafc1ecae3b8ef46d84cce
[ "MIT" ]
null
null
null
ancli/__main__.py
devforfu/ancli
44ea37e756fb12221bfafc1ecae3b8ef46d84cce
[ "MIT" ]
null
null
null
import importlib import sys from ancli import make_cli try: entry_point = sys.argv[1] except IndexError: print('Error: no entry point name provided!') sys.exit(1) try: module_path, function_name = entry_point.split(':') except ValueError: print('Error: entry point name should have format a.b.c:function') sys.exit(1) mod = importlib.import_module(module_path) try: func = getattr(mod, function_name) except AttributeError: print(f'Error: function \'{function_name}\' is not found') sys.exit(1) sys.argv = [sys.argv[0]] + sys.argv[2:] make_cli(func)
20.448276
70
0.701518
89
593
4.561798
0.460674
0.098522
0.059113
0
0
0
0
0
0
0
0
0.012245
0.173693
593
28
71
21.178571
0.816327
0
0
0.285714
0
0
0.209106
0
0
0
0
0
0
1
0
false
0
0.190476
0
0.190476
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dadeeda64957f5eeac274eff704cc39ef9a9cf4d
31,856
py
Python
venv/Lib/site-packages/pyramid/traversal.py
mandamg/Exercicios-de-Python-do-Curso-em-Video
3f818c11c3c10213bebc1dfb6a740adee468ea3a
[ "MIT" ]
null
null
null
venv/Lib/site-packages/pyramid/traversal.py
mandamg/Exercicios-de-Python-do-Curso-em-Video
3f818c11c3c10213bebc1dfb6a740adee468ea3a
[ "MIT" ]
null
null
null
venv/Lib/site-packages/pyramid/traversal.py
mandamg/Exercicios-de-Python-do-Curso-em-Video
3f818c11c3c10213bebc1dfb6a740adee468ea3a
[ "MIT" ]
null
null
null
from functools import lru_cache from urllib.parse import unquote_to_bytes from zope.interface import implementer from zope.interface.interfaces import IInterface from pyramid.encode import url_quote from pyramid.exceptions import URLDecodeError from pyramid.interfaces import ( VH_ROOT_KEY, IRequestFactory, IResourceURL, ITraverser, ) from pyramid.location import lineage from pyramid.threadlocal import get_current_registry from pyramid.util import ascii_, is_nonstr_iter, text_ PATH_SEGMENT_SAFE = "~!$&'()*+,;=:@" # from webob PATH_SAFE = PATH_SEGMENT_SAFE + "/" def find_root(resource): """Find the root node in the resource tree to which ``resource`` belongs. Note that ``resource`` should be :term:`location`-aware. Note that the root resource is available in the request object by accessing the ``request.root`` attribute. """ for location in lineage(resource): if location.__parent__ is None: resource = location break return resource def find_resource(resource, path): """Given a resource object and a string or tuple representing a path (such as the return value of :func:`pyramid.traversal.resource_path` or :func:`pyramid.traversal.resource_path_tuple`), return a resource in this application's resource tree at the specified path. The resource passed in *must* be :term:`location`-aware. If the path cannot be resolved (if the respective node in the resource tree does not exist), a :exc:`KeyError` will be raised. This function is the logical inverse of :func:`pyramid.traversal.resource_path` and :func:`pyramid.traversal.resource_path_tuple`; it can resolve any path string or tuple generated by either of those functions. Rules for passing a *string* as the ``path`` argument: if the first character in the path string is the ``/`` character, the path is considered absolute and the resource tree traversal will start at the root resource. If the first character of the path string is *not* the ``/`` character, the path is considered relative and resource tree traversal will begin at the resource object supplied to the function as the ``resource`` argument. If an empty string is passed as ``path``, the ``resource`` passed in will be returned. Resource path strings must be escaped in the following manner: each path segment must be UTF-8 encoded and escaped via Python's :mod:`urllib.quote`. For example, ``/path/to%20the/La%20Pe%C3%B1a`` (absolute) or ``to%20the/La%20Pe%C3%B1a`` (relative). The :func:`pyramid.traversal.resource_path` function generates strings which follow these rules (albeit only absolute ones). The text may not have any non-ASCII characters in it. Rules for passing a *tuple* as the ``path`` argument: if the first element in the path tuple is the empty string (for example ``('', 'a', 'b', 'c')``, the path is considered absolute and the resource tree traversal will start at the resource tree root object. If the first element in the path tuple is not the empty string (for example ``('a', 'b', 'c')``), the path is considered relative and resource tree traversal will begin at the resource object supplied to the function as the ``resource`` argument. If an empty sequence is passed as ``path``, the ``resource`` passed in itself will be returned. No URL-quoting of individual path segments within the tuple is required (each segment may be any string representing a resource name). Resource path tuples generated by :func:`pyramid.traversal.resource_path_tuple` can always be resolved by ``find_resource``. """ if isinstance(path, str): path = ascii_(path) D = traverse(resource, path) view_name = D['view_name'] context = D['context'] if view_name: raise KeyError('%r has no subelement %s' % (context, view_name)) return context find_model = find_resource # b/w compat (forever) def find_interface(resource, class_or_interface): """ Return the first resource found in the :term:`lineage` of ``resource`` which, a) if ``class_or_interface`` is a Python class object, is an instance of the class or any subclass of that class or b) if ``class_or_interface`` is a :term:`interface`, provides the specified interface. Return ``None`` if no resource providing ``interface_or_class`` can be found in the lineage. The ``resource`` passed in *must* be :term:`location`-aware. """ if IInterface.providedBy(class_or_interface): test = class_or_interface.providedBy else: test = lambda arg: isinstance(arg, class_or_interface) for location in lineage(resource): if test(location): return location def resource_path(resource, *elements): """Return a string object representing the absolute physical path of the resource object based on its position in the resource tree, e.g ``/foo/bar``. Any positional arguments passed in as ``elements`` will be appended as path segments to the end of the resource path. For instance, if the resource's path is ``/foo/bar`` and ``elements`` equals ``('a', 'b')``, the returned string will be ``/foo/bar/a/b``. The first character in the string will always be the ``/`` character (a leading ``/`` character in a path string represents that the path is absolute). Resource path strings returned will be escaped in the following manner: each path segment will be encoded as UTF-8 and escaped via Python's :mod:`urllib.quote`. For example, ``/path/to%20the/La%20Pe%C3%B1a``. This function is a logical inverse of :mod:`pyramid.traversal.find_resource`: it can be used to generate path references that can later be resolved via that function. The ``resource`` passed in *must* be :term:`location`-aware. .. note:: Each segment in the path string returned will use the ``__name__`` attribute of the resource it represents within the resource tree. Each of these segments *should* be a string (as per the contract of :term:`location`-awareness). However, no conversion or safety checking of resource names is performed. For instance, if one of the resources in your tree has a ``__name__`` which (by error) is a dictionary, the :func:`pyramid.traversal.resource_path` function will attempt to append it to a string and it will cause a :exc:`pyramid.exceptions.URLDecodeError`. .. note:: The :term:`root` resource *must* have a ``__name__`` attribute with a value of either ``None`` or the empty string for paths to be generated properly. If the root resource has a non-null ``__name__`` attribute, its name will be prepended to the generated path rather than a single leading '/' character. """ # joining strings is a bit expensive so we delegate to a function # which caches the joined result for us return _join_path_tuple(resource_path_tuple(resource, *elements)) model_path = resource_path # b/w compat (forever) def traverse(resource, path): """Given a resource object as ``resource`` and a string or tuple representing a path as ``path`` (such as the return value of :func:`pyramid.traversal.resource_path` or :func:`pyramid.traversal.resource_path_tuple` or the value of ``request.environ['PATH_INFO']``), return a dictionary with the keys ``context``, ``root``, ``view_name``, ``subpath``, ``traversed``, ``virtual_root``, and ``virtual_root_path``. A definition of each value in the returned dictionary: - ``context``: The :term:`context` (a :term:`resource` object) found via traversal or URL dispatch. If the ``path`` passed in is the empty string, the value of the ``resource`` argument passed to this function is returned. - ``root``: The resource object at which :term:`traversal` begins. If the ``resource`` passed in was found via URL dispatch or if the ``path`` passed in was relative (non-absolute), the value of the ``resource`` argument passed to this function is returned. - ``view_name``: The :term:`view name` found during :term:`traversal` or :term:`URL dispatch`; if the ``resource`` was found via traversal, this is usually a representation of the path segment which directly follows the path to the ``context`` in the ``path``. The ``view_name`` will be a string. The ``view_name`` will be the empty string if there is no element which follows the ``context`` path. An example: if the path passed is ``/foo/bar``, and a resource object is found at ``/foo`` (but not at ``/foo/bar``), the 'view name' will be ``'bar'``. If the ``resource`` was found via URL dispatch, the ``view_name`` will be the empty string unless the ``traverse`` predicate was specified or the ``*traverse`` route pattern was used, at which point normal traversal rules dictate the result. - ``subpath``: For a ``resource`` found via :term:`traversal`, this is a sequence of path segments found in the ``path`` that follow the ``view_name`` (if any). Each of these items is a string. If no path segments follow the ``view_name``, the subpath will be the empty sequence. An example: if the path passed is ``/foo/bar/baz/buz``, and a resource object is found at ``/foo`` (but not ``/foo/bar``), the 'view name' will be ``'bar'`` and the :term:`subpath` will be ``['baz', 'buz']``. For a ``resource`` found via URL dispatch, the subpath will be a sequence of values discerned from ``*subpath`` in the route pattern matched or the empty sequence. - ``traversed``: The sequence of path elements traversed from the root to find the ``context`` object during :term:`traversal`. Each of these items is a string. If no path segments were traversed to find the ``context`` object (e.g. if the ``path`` provided is the empty string), the ``traversed`` value will be the empty sequence. If the ``resource`` is a resource found via :term:`URL dispatch`, traversed will be None. - ``virtual_root``: A resource object representing the 'virtual' root of the resource tree being traversed during :term:`traversal`. See :ref:`vhosting_chapter` for a definition of the virtual root object. If no virtual hosting is in effect, and the ``path`` passed in was absolute, the ``virtual_root`` will be the *physical* root resource object (the object at which :term:`traversal` begins). If the ``resource`` passed in was found via :term:`URL dispatch` or if the ``path`` passed in was relative, the ``virtual_root`` will always equal the ``root`` object (the resource passed in). - ``virtual_root_path`` -- If :term:`traversal` was used to find the ``resource``, this will be the sequence of path elements traversed to find the ``virtual_root`` resource. Each of these items is a string. If no path segments were traversed to find the ``virtual_root`` resource (e.g. if virtual hosting is not in effect), the ``traversed`` value will be the empty list. If URL dispatch was used to find the ``resource``, this will be ``None``. If the path cannot be resolved, a :exc:`KeyError` will be raised. Rules for passing a *string* as the ``path`` argument: if the first character in the path string is the with the ``/`` character, the path will considered absolute and the resource tree traversal will start at the root resource. If the first character of the path string is *not* the ``/`` character, the path is considered relative and resource tree traversal will begin at the resource object supplied to the function as the ``resource`` argument. If an empty string is passed as ``path``, the ``resource`` passed in will be returned. Resource path strings must be escaped in the following manner: each path segment must be encoded as UTF-8 and escaped via Python's :mod:`urllib.quote`. For example, ``/path/to%20the/La%20Pe%C3%B1a`` (absolute) or ``to%20the/La%20Pe%C3%B1a`` (relative). The :func:`pyramid.traversal.resource_path` function generates strings which follow these rules (albeit only absolute ones). Rules for passing a *tuple* as the ``path`` argument: if the first element in the path tuple is the empty string (for example ``('', 'a', 'b', 'c')``, the path is considered absolute and the resource tree traversal will start at the resource tree root object. If the first element in the path tuple is not the empty string (for example ``('a', 'b', 'c')``), the path is considered relative and resource tree traversal will begin at the resource object supplied to the function as the ``resource`` argument. If an empty sequence is passed as ``path``, the ``resource`` passed in itself will be returned. No URL-quoting or UTF-8-encoding of individual path segments within the tuple is required (each segment may be any string representing a resource name). Explanation of the decoding of ``path`` segment values during traversal: Each segment is URL-unquoted, and UTF-8 decoded. Each segment is assumed to be encoded using the UTF-8 encoding (or a subset, such as ASCII); a :exc:`pyramid.exceptions.URLDecodeError` is raised if a segment cannot be decoded. If a segment name is empty or if it is ``.``, it is ignored. If a segment name is ``..``, the previous segment is deleted, and the ``..`` is ignored. As a result of this process, the return values ``view_name``, each element in the ``subpath``, each element in ``traversed``, and each element in the ``virtual_root_path`` will be decoded strings. """ if is_nonstr_iter(path): # the traverser factory expects PATH_INFO to be a string and it # expects path segments to be utf-8 and # urlencoded (it's the same traverser which accepts PATH_INFO # from user agents; user agents always send strings). if path: path = _join_path_tuple(tuple(path)) else: path = '' # The user is supposed to pass us a string object, never Unicode. In # practice, however, users indeed pass Unicode to this API. If they do # pass a Unicode object, its data *must* be entirely encodeable to ASCII, # so we encode it here as a convenience to the user and to prevent # second-order failures from cropping up (all failures will occur at this # step rather than later down the line as the result of calling # ``traversal_path``). path = ascii_(path) if path and path[0] == '/': resource = find_root(resource) reg = get_current_registry() request_factory = reg.queryUtility(IRequestFactory) if request_factory is None: from pyramid.request import Request # avoid circdep request_factory = Request request = request_factory.blank(path) request.registry = reg traverser = reg.queryAdapter(resource, ITraverser) if traverser is None: traverser = ResourceTreeTraverser(resource) return traverser(request) def resource_path_tuple(resource, *elements): """ Return a tuple representing the absolute physical path of the ``resource`` object based on its position in a resource tree, e.g ``('', 'foo', 'bar')``. Any positional arguments passed in as ``elements`` will be appended as elements in the tuple representing the resource path. For instance, if the resource's path is ``('', 'foo', 'bar')`` and elements equals ``('a', 'b')``, the returned tuple will be ``('', 'foo', 'bar', 'a', 'b')``. The first element of this tuple will always be the empty string (a leading empty string element in a path tuple represents that the path is absolute). This function is a logical inverse of :func:`pyramid.traversal.find_resource`: it can be used to generate path references that can later be resolved by that function. The ``resource`` passed in *must* be :term:`location`-aware. .. note:: Each segment in the path tuple returned will equal the ``__name__`` attribute of the resource it represents within the resource tree. Each of these segments *should* be a string (as per the contract of :term:`location`-awareness). However, no conversion or safety checking of resource names is performed. For instance, if one of the resources in your tree has a ``__name__`` which (by error) is a dictionary, that dictionary will be placed in the path tuple; no warning or error will be given. .. note:: The :term:`root` resource *must* have a ``__name__`` attribute with a value of either ``None`` or the empty string for path tuples to be generated properly. If the root resource has a non-null ``__name__`` attribute, its name will be the first element in the generated path tuple rather than the empty string. """ return tuple(_resource_path_list(resource, *elements)) model_path_tuple = resource_path_tuple # b/w compat (forever) def _resource_path_list(resource, *elements): """Implementation detail shared by resource_path and resource_path_tuple""" path = [loc.__name__ or '' for loc in lineage(resource)] path.reverse() path.extend(elements) return path _model_path_list = _resource_path_list # b/w compat, not an API def virtual_root(resource, request): """ Provided any :term:`resource` and a :term:`request` object, return the resource object representing the :term:`virtual root` of the current :term:`request`. Using a virtual root in a :term:`traversal` -based :app:`Pyramid` application permits rooting. For example, the resource at the traversal path ``/cms`` will be found at ``http://example.com/`` instead of rooting it at ``http://example.com/cms/``. If the ``resource`` passed in is a context obtained via :term:`traversal`, and if the ``HTTP_X_VHM_ROOT`` key is in the WSGI environment, the value of this key will be treated as a 'virtual root path': the :func:`pyramid.traversal.find_resource` API will be used to find the virtual root resource using this path; if the resource is found, it will be returned. If the ``HTTP_X_VHM_ROOT`` key is not present in the WSGI environment, the physical :term:`root` of the resource tree will be returned instead. Virtual roots are not useful at all in applications that use :term:`URL dispatch`. Contexts obtained via URL dispatch don't really support being virtually rooted (each URL dispatch context is both its own physical and virtual root). However if this API is called with a ``resource`` argument which is a context obtained via URL dispatch, the resource passed in will be returned unconditionally.""" try: reg = request.registry except AttributeError: reg = get_current_registry() url_adapter = reg.queryMultiAdapter((resource, request), IResourceURL) if url_adapter is None: url_adapter = ResourceURL(resource, request) vpath, rpath = url_adapter.virtual_path, url_adapter.physical_path if rpath != vpath and rpath.endswith(vpath): vroot_path = rpath[: -len(vpath)] return find_resource(resource, vroot_path) try: return request.root except AttributeError: return find_root(resource) def traversal_path(path): """Variant of :func:`pyramid.traversal.traversal_path_info` suitable for decoding paths that are URL-encoded. If this function is passed a string, it *must* be directly encodeable to ASCII. For example, '/foo' will work but '/<unprintable unicode>' (a string object with characters that cannot be encoded to ASCII) will not. A :exc:`UnicodeEncodeError` will be raised if the string cannot be encoded directly to ASCII. """ if isinstance(path, str): # must not possess characters outside ascii path = path.encode('ascii') # we unquote this path exactly like a PEP 3333 server would path = unquote_bytes_to_wsgi(path) # result will be a native string return traversal_path_info(path) # result will be a tuple of unicode @lru_cache(1000) def traversal_path_info(path): """Given``path``, return a tuple representing that path which can be used to traverse a resource tree. ``path`` is assumed to be an already-URL-decoded ``str`` type as if it had come to us from an upstream WSGI server as the ``PATH_INFO`` environ variable. The ``path`` is first decoded from its WSGI representation to text. Per the :pep:`3333` spec, ``path`` is first encoded to bytes using the Latin-1 encoding; the resulting set of bytes is subsequently decoded to text using the UTF-8 encoding; a :exc:`pyramid.exc.URLDecodeError` is raised if the URL cannot be decoded. The ``path`` is split on slashes, creating a list of segments. If a segment name is empty or if it is ``.``, it is ignored. If a segment name is ``..``, the previous segment is deleted, and the ``..`` is ignored. Examples: ``/`` () ``/foo/bar/baz`` ('foo', 'bar', 'baz') ``foo/bar/baz`` ('foo', 'bar', 'baz') ``/foo/bar/baz/`` ('foo', 'bar', 'baz') ``/foo//bar//baz/`` ('foo', 'bar', 'baz') ``/foo/bar/baz/..`` ('foo', 'bar') ``/my%20archives/hello`` ('my archives', 'hello') ``/archives/La%20Pe%C3%B1a`` ('archives', '<unprintable unicode>') .. note:: This function does not generate the same type of tuples that :func:`pyramid.traversal.resource_path_tuple` does. In particular, the leading empty string is not present in the tuple it returns, unlike tuples returned by :func:`pyramid.traversal.resource_path_tuple`. As a result, tuples generated by ``traversal_path`` are not resolveable by the :func:`pyramid.traversal.find_resource` API. ``traversal_path`` is a function mostly used by the internals of :app:`Pyramid` and by people writing their own traversal machinery, as opposed to users writing applications in :app:`Pyramid`. """ try: path = decode_path_info(path) # result will be Unicode except UnicodeDecodeError as e: raise URLDecodeError(e.encoding, e.object, e.start, e.end, e.reason) return split_path_info(path) # result will be tuple of Unicode @lru_cache(1000) def split_path_info(path): # suitable for splitting an already-unquoted-already-decoded (unicode) # path value path = path.strip('/') clean = [] for segment in path.split('/'): if not segment or segment == '.': continue elif segment == '..': if clean: del clean[-1] else: clean.append(segment) return tuple(clean) # see PEP 3333 for why we encode to latin-1 then decode to utf-8 def decode_path_info(path): return path.encode('latin-1').decode('utf-8') # see PEP 3333 for why we decode the path to latin-1 def unquote_bytes_to_wsgi(bytestring): return unquote_to_bytes(bytestring).decode('latin-1') _segment_cache = {} def quote_path_segment(segment, safe=PATH_SEGMENT_SAFE): """ Return a quoted representation of a 'path segment' (such as the string ``__name__`` attribute of a resource) as a string. If the ``segment`` passed in is a bytes object, it is decoded as a UTF-8 string. The result is then URL-quoted using Python's ``urllib.quote``. If the segment passed in is not bytes nor a string, an error will be raised. The return value of ``quote_path_segment`` is always a string. You may pass a string of characters that need not be encoded as the ``safe`` argument to this function. This corresponds to the ``safe`` argument to :mod:`urllib.quote`. .. note:: The return value for each segment passed to this function is cached in a module-scope dictionary for speed: the cached version is returned when possible rather than recomputing the quoted version. No cache emptying is ever done for the lifetime of an application, however. If you pass arbitrary user-supplied strings to this function (as opposed to some bounded set of values from a 'working set' known to your application), it may become a memory leak. """ # The bit of this code that deals with ``_segment_cache`` is an # optimization: we cache all the computation of URL path segments # in this module-scope dictionary with the original string as the # key, so we can look it up later without needing to reencode # or re-url-quote it try: if segment.__class__ not in (str, bytes): segment = str(segment) return _segment_cache[(segment, safe)] except KeyError: result = url_quote(text_(segment, 'utf-8'), safe) # we don't need a lock to mutate _segment_cache, as the below # will generate exactly one Python bytecode (STORE_SUBSCR) _segment_cache[(segment, safe)] = result return result @implementer(ITraverser) class ResourceTreeTraverser: """A resource tree traverser that should be used (for speed) when every resource in the tree supplies a ``__name__`` and ``__parent__`` attribute (ie. every resource in the tree is :term:`location` aware) .""" VH_ROOT_KEY = VH_ROOT_KEY VIEW_SELECTOR = '@@' def __init__(self, root): self.root = root def __call__(self, request): environ = request.environ matchdict = request.matchdict if matchdict is not None: path = matchdict.get('traverse', '/') or '/' if is_nonstr_iter(path): # this is a *traverse stararg (not a {traverse}) # routing has already decoded these elements, so we just # need to join them path = '/' + '/'.join(path) or '/' subpath = matchdict.get('subpath', ()) if not is_nonstr_iter(subpath): # this is not a *subpath stararg (just a {subpath}) # routing has already decoded this string, so we just need # to split it subpath = split_path_info(subpath) else: # this request did not match a route subpath = () try: # empty if mounted under a path in mod_wsgi, for example path = request.path_info or '/' except KeyError: # if environ['PATH_INFO'] is just not there path = '/' except UnicodeDecodeError as e: raise URLDecodeError( e.encoding, e.object, e.start, e.end, e.reason ) if self.VH_ROOT_KEY in environ: # HTTP_X_VHM_ROOT vroot_path = decode_path_info(environ[self.VH_ROOT_KEY]) vroot_tuple = split_path_info(vroot_path) vpath = ( vroot_path + path ) # both will (must) be unicode or asciistr vroot_idx = len(vroot_tuple) - 1 else: vroot_tuple = () vpath = path vroot_idx = -1 root = self.root ob = vroot = root if vpath == '/': # invariant: vpath must not be empty # prevent a call to traversal_path if we know it's going # to return the empty tuple vpath_tuple = () else: # we do dead reckoning here via tuple slicing instead of # pushing and popping temporary lists for speed purposes # and this hurts readability; apologies i = 0 view_selector = self.VIEW_SELECTOR vpath_tuple = split_path_info(vpath) for segment in vpath_tuple: if segment[:2] == view_selector: return { 'context': ob, 'view_name': segment[2:], 'subpath': vpath_tuple[i + 1 :], 'traversed': vpath_tuple[: vroot_idx + i + 1], 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } try: getitem = ob.__getitem__ except AttributeError: return { 'context': ob, 'view_name': segment, 'subpath': vpath_tuple[i + 1 :], 'traversed': vpath_tuple[: vroot_idx + i + 1], 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } try: next = getitem(segment) except KeyError: return { 'context': ob, 'view_name': segment, 'subpath': vpath_tuple[i + 1 :], 'traversed': vpath_tuple[: vroot_idx + i + 1], 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } if i == vroot_idx: vroot = next ob = next i += 1 return { 'context': ob, 'view_name': '', 'subpath': subpath, 'traversed': vpath_tuple, 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } ModelGraphTraverser = ( ResourceTreeTraverser # b/w compat, not API, used in wild ) @implementer(IResourceURL) class ResourceURL: VH_ROOT_KEY = VH_ROOT_KEY def __init__(self, resource, request): physical_path_tuple = resource_path_tuple(resource) physical_path = _join_path_tuple(physical_path_tuple) if physical_path_tuple != ('',): physical_path_tuple = physical_path_tuple + ('',) physical_path = physical_path + '/' virtual_path = physical_path virtual_path_tuple = physical_path_tuple environ = request.environ vroot_path = environ.get(self.VH_ROOT_KEY) # if the physical path starts with the virtual root path, trim it out # of the virtual path if vroot_path is not None: vroot_path = vroot_path.rstrip('/') if vroot_path and physical_path.startswith(vroot_path): vroot_path_tuple = tuple(vroot_path.split('/')) numels = len(vroot_path_tuple) virtual_path_tuple = ('',) + physical_path_tuple[numels:] virtual_path = physical_path[len(vroot_path) :] self.virtual_path = virtual_path # IResourceURL attr self.physical_path = physical_path # IResourceURL attr self.virtual_path_tuple = virtual_path_tuple # IResourceURL attr (1.5) self.physical_path_tuple = ( physical_path_tuple # IResourceURL attr (1.5) ) @lru_cache(1000) def _join_path_tuple(tuple): return tuple and '/'.join([quote_path_segment(x) for x in tuple]) or '/' class DefaultRootFactory: __parent__ = None __name__ = None def __init__(self, request): pass
41.915789
79
0.651588
4,382
31,856
4.640575
0.12346
0.02867
0.015736
0.012147
0.440521
0.387067
0.336661
0.313646
0.297467
0.288812
0
0.0042
0.260014
31,856
759
80
41.971014
0.85844
0.645781
0
0.255814
0
0
0.038244
0
0.003876
0
0
0
0
1
0.073643
false
0.003876
0.042636
0.011628
0.232558
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dadf3ed9b04f6470f12f0113ed470f0b639c3816
291
py
Python
setup.py
WolfyLPDC/spanish2datetime
3c5db5b0e6e59567286c9f394890557d9d50e162
[ "MIT" ]
1
2021-11-22T17:40:27.000Z
2021-11-22T17:40:27.000Z
setup.py
msolefonte/spanish2datetime
3c5db5b0e6e59567286c9f394890557d9d50e162
[ "MIT" ]
null
null
null
setup.py
msolefonte/spanish2datetime
3c5db5b0e6e59567286c9f394890557d9d50e162
[ "MIT" ]
null
null
null
from setuptools import setup setup( name='spanish2datetime', version='1.0', description='Python3 script able to turn spanish text into datetime.', author="Marc Solé", author_email="marcsole@insomniacwolves.com", license='LICENSE', packages=['spanish2datetime'] )
26.454545
74
0.707904
32
291
6.40625
0.875
0
0
0
0
0
0
0
0
0
0
0.020747
0.171821
291
11
75
26.454545
0.829876
0
0
0
0
0
0.458904
0.09589
0
0
0
0
0
1
0
true
0
0.1
0
0.1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
dadf717f7a0bf06444bc04239a755273afb5cf77
3,485
py
Python
Lib/site-packages/visual/examples/faces_heightfield.py
raychorn/svn_Python-2.5.1
425005b1b489ba44ec0bb989e077297e8953d9be
[ "PSF-2.0" ]
null
null
null
Lib/site-packages/visual/examples/faces_heightfield.py
raychorn/svn_Python-2.5.1
425005b1b489ba44ec0bb989e077297e8953d9be
[ "PSF-2.0" ]
null
null
null
Lib/site-packages/visual/examples/faces_heightfield.py
raychorn/svn_Python-2.5.1
425005b1b489ba44ec0bb989e077297e8953d9be
[ "PSF-2.0" ]
null
null
null
## Demonstrates some techniques for working with "faces", and ## shows how to build a height field (a common feature request) ## with it. ## David Scherer July 2001 from visual import * class Model: def __init__(self): self.frame = frame() self.model = faces(frame=self.frame) self.twoSided = true # add every face twice with opposite normals def FacetedTriangle(self, v1, v2, v3, color=color.white): """Add a triangle to the model, apply faceted shading automatically""" v1 = vector(v1) v2 = vector(v2) v3 = vector(v3) try: normal = norm( cross(v2-v1, v3-v1) ) except: normal = vector(0,0,0) for v in (v1,v2,v3): self.model.append( pos=v, color=color, normal=normal ) if self.twoSided: for v in (v1,v3,v2): self.model.append( pos=v, color=color, normal=-normal ) def FacetedPolygon(self, *v): """Appends a planar polygon of any number of vertices to the model, applying faceted shading automatically.""" for t in range(len(v)-2): self.FacetedTriangle( v[0], v[t+1], v[t+2] ) def DoSmoothShading(self): """Change a faceted model to smooth shaded, by averaging normals at coinciding vertices. This is a very slow and simple smooth shading implementation which has to figure out the connectivity of the model and does not attempt to detect sharp edges. It attempts to work even in two-sided mode where there are two opposite normals at each vertex. It may fail somehow in pathological cases. """ pos = self.model.pos normal = self.model.normal vertex_map = {} # vertex position -> vertex normal vertex_map_backface = {} for i in range( len(pos) ): tp = tuple(pos[i]) old_normal = vertex_map.get( tp, (0,0,0) ) if dot(old_normal, normal[i]) >= 0: vertex_map[tp] = normal[i] + old_normal else: vertex_map_backface[tp] = normal[i] + vertex_map_backface.get(tp, (0,0,0)) for i in range( len(pos) ): tp = tuple(pos[i]) if dot(vertex_map[tp], normal[i]) >= 0: normal[i] = vertex_map[tp] and norm( vertex_map[ tp ] ) else: normal[i] = vertex_map_backface[tp] and norm(vertex_map_backface[tp] ) def DrawNormal(self, scale): pos = self.model.pos normal = self.model.normal for i in range(len(pos)): arrow(pos=pos[i], axis=normal[i]*scale) class Mesh (Model): def __init__(self, xvalues, yvalues, zvalues): Model.__init__(self) points = zeros( xvalues.shape + (3,), Float ) points[...,0] = xvalues points[...,1] = yvalues points[...,2] = zvalues for i in range(zvalues.shape[0]-1): for j in range(zvalues.shape[1]-1): self.FacetedPolygon( points[i,j], points[i,j+1], points[i+1,j+1], points[i+1,j] ) ## Graph a function of two variables (a height field) x = arange(-1,1,2./20) y = arange(-1,1,2./20) z = zeros( (len(x),len(y)), Float ) x,y = x[:,NewAxis]+z, y+z m = Mesh( x, (sin(x*pi)+sin(y*pi))*0.2, y ) m.DoSmoothShading() ##m.DrawNormal(0.05)
35.561224
91
0.556385
479
3,485
3.983299
0.325679
0.051887
0.044549
0.023061
0.212788
0.129979
0.110063
0.110063
0.072327
0.02935
0
0.027554
0.323099
3,485
97
92
35.927835
0.781263
0.245624
0
0.177419
0
0
0
0
0
0
0
0
0
1
0.096774
false
0
0.016129
0
0.145161
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dae1d10cf13014392041b42324790708bc97af7d
766
py
Python
tests/test_appcli_jwt.py
Carrene/restfulpy
21472af0415fffc23f8003b6074afc2de2e0b414
[ "MIT" ]
25
2017-06-19T10:01:36.000Z
2019-08-03T10:35:41.000Z
tests/test_appcli_jwt.py
Carrene/restfulpy
21472af0415fffc23f8003b6074afc2de2e0b414
[ "MIT" ]
209
2017-09-27T11:59:29.000Z
2019-08-10T11:10:48.000Z
tests/test_appcli_jwt.py
Carrene/restfulpy
21472af0415fffc23f8003b6074afc2de2e0b414
[ "MIT" ]
16
2017-11-22T15:44:31.000Z
2019-04-14T12:29:34.000Z
import base64 import json from bddcli import Given, given, when, stdout, stderr, Application, status from restfulpy import Application as RestfulpyApplication foo = RestfulpyApplication(name='jwt') app = Application('foo', 'tests.test_appcli_jwt:foo.cli_main') def test_jwt(): with Given(app, 'jwt create'): assert stderr == '' assert status == 0 assert len(stdout) > 10 when(given + '\'{"foo": 1}\'') assert stderr == '' assert status == 0 header, payload, signature = stdout.encode().split(b'.') payload = base64.urlsafe_b64decode(payload) assert json.loads(payload) == {'foo': 1} if __name__ == '__main__': # pragma: no cover foo.cli_main(['jwt', 'create', '{"foo": 1}'])
25.533333
74
0.627937
91
766
5.131868
0.483516
0.025696
0.042827
0.102784
0.107066
0
0
0
0
0
0
0.021922
0.225849
766
29
75
26.413793
0.765599
0.020888
0
0.210526
0
0
0.113788
0.045515
0
0
0
0
0.315789
1
0.052632
false
0
0.210526
0
0.263158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dae25a006d1cb1117ed8b2fe4c8f61d85c1dfe71
476
py
Python
classes/inheritance.py
VictorPinas/python-basics
63358322166becf69bbd2d96c6bffa531456ac5d
[ "MIT" ]
null
null
null
classes/inheritance.py
VictorPinas/python-basics
63358322166becf69bbd2d96c6bffa531456ac5d
[ "MIT" ]
null
null
null
classes/inheritance.py
VictorPinas/python-basics
63358322166becf69bbd2d96c6bffa531456ac5d
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Jan 22 18:36:34 2022 @author: victor """ class SARDog(Dog): """Represent a search dog.""" def __init__(self, name): """Initilize the sar dog.""" super().__init__(name) def search(self): """Simulate search.""" print(self.name + " is searching.") my_dog = SARDog("Willie") print(my_dog.name + " is a search dog.") my_dog.sit() my_dog.search()
19.833333
43
0.565126
64
476
4.015625
0.609375
0.077821
0.077821
0
0
0
0
0
0
0
0
0.039548
0.256303
476
24
44
19.833333
0.686441
0.336134
0
0
0
0
0.12628
0
0
0
0
0
0
1
0.222222
false
0
0
0
0.333333
0.222222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
dae369d14e4029c4e7a65aeb8f3575d751e3c7bb
2,277
py
Python
hw4/visualizations.py
zeynepCankara/NTU_DLCV2019
2dc44584ec7b9e1d84e688551eb8cef48d501b45
[ "MIT" ]
1
2022-01-17T14:28:46.000Z
2022-01-17T14:28:46.000Z
hw4/visualizations.py
zeynepCankara/NTU_DLCV2019
2dc44584ec7b9e1d84e688551eb8cef48d501b45
[ "MIT" ]
null
null
null
hw4/visualizations.py
zeynepCankara/NTU_DLCV2019
2dc44584ec7b9e1d84e688551eb8cef48d501b45
[ "MIT" ]
2
2021-11-08T19:05:57.000Z
2022-01-17T14:28:48.000Z
import os import numpy as np import matplotlib.pyplot as plt def plot_embedding(X, y, training_mode, save_name): """ Reference: https://github.com/NaJaeMin92/pytorch_DANN/ Gets the t-sne output and actions label encodings plot T-SNE """ x_min, x_max = np.min(X, 0), np.max(X, 0) X = (X - x_min) / (x_max - x_min) plt.figure(figsize=(10, 10)) for i in range(len(y)): # X.shape[0] : 1024 # plot colored number if y[i] == 0: colors = (0.0, 0.0, 1.0, 1.0) elif y[i] == 1: colors = (1.0, 0.0, 0.0, 1.0) elif y[i] == 2: colors = (1.0, 1.0, 0.0, 1.0) elif y[i] == 3: colors = (1.0, 1.0, 1.0, 1.0) elif y[i] == 4: colors = (1.0, 0.5, 0.0, 1.0) elif y[i] == 5: colors = (1.0, 0.0, 0.5, 1.0) elif y[i] == 6: colors = (1.0, 1.0, 0.0, 0.0) elif y[i] == 7: colors = (1.0, 0.0, 1.0, 1.0) elif y[i] == 8: colors = (0.5, 0.5, 0.5, 0.5) elif y[i] == 9: colors = (0.5, 0.2, 0.2, 0.2) elif y[i] == 10: colors = (1.0, 0.5, 0.2, 1.0) else: colors = (1.0, 0.2, 0.5, 1.0) plt.text(X[i, 0], X[i, 1], str(y[i]), color=colors, fontdict={'weight': 'bold', 'size': 9}) plt.xticks([]), plt.yticks([]) if save_name is not None: plt.title(save_name) save_folder = 'saved_plot' if not os.path.exists(save_folder): os.makedirs(save_folder) fig_name = 'saved_plot/' + str(training_mode) + '_' + str(save_name) + '.png' plt.savefig(fig_name) print('{} is saved'.format(fig_name)) def plot_p1_train_info(training_loss, val_accuracy, save_dir = "./saved_plot/problem1_loss_acc.png"): """ Plots training Loss and Validation Acc """ plt.figure(figsize=(20,8)) plt.subplot(1,2,1) plt.plot(training_loss, color = 'red') plt.title("Training Loss vs # Epochs") plt.ylabel("Cross Entropy Loss") plt.xlabel("Number of Epochs") plt.subplot(1,2,2) plt.plot(val_accuracy, color = 'blue') plt.title("Validation Accuracy vs # Epochs") plt.ylabel("Accuracy") plt.xlabel("Number of Epochs") plt.savefig(save_dir) plt.show()
28.4625
101
0.527448
384
2,277
3.039063
0.278646
0.041131
0.030848
0.047986
0.186804
0.169666
0.077121
0.050557
0.022279
0.022279
0
0.083903
0.293368
2,277
79
102
28.822785
0.641392
0.084761
0
0.034483
0
0
0.101328
0.016724
0
0
0
0
0
1
0.034483
false
0
0.051724
0
0.086207
0.017241
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dae48a50c2d41cee7a17a1643cdfb185e9a03591
3,274
py
Python
experiments_histogram.py
LaGuer/DeepSphere
5a02ac01b8fe3a5b9814ae9efd20d10e0c843209
[ "MIT" ]
71
2018-10-30T09:46:19.000Z
2020-02-15T12:02:00.000Z
experiments_histogram.py
aosheng1996/DeepSphere
ebcf162eaa6e23c1c92dbc84e0908695bb7245d7
[ "MIT" ]
9
2018-10-31T02:32:00.000Z
2020-02-12T08:55:43.000Z
experiments_histogram.py
aosheng1996/DeepSphere
ebcf162eaa6e23c1c92dbc84e0908695bb7245d7
[ "MIT" ]
16
2018-10-31T00:32:33.000Z
2020-01-19T21:24:46.000Z
#!/usr/bin/env python3 # coding: utf-8 """ Script to run the baseline experiment: SVM classification with histogram features. """ import os import sys import numpy as np from deepsphere import experiment_helper from grid import pgrid def single_experiment(sigma, order, sigma_noise, path): """Run as experiment. Check the notebook `part_sphere.ipynb` to get more insides about this code. """ Nside = 1024 print('Solve the histogram problem for sigma {}, order {}, noise {}'.format(sigma, order, sigma_noise), flush=True) EXP_NAME = '40sim_{}sides_{}noise_{}order_{}sigma'.format( Nside, sigma_noise, order, sigma) x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(sigma, order) x_raw_test, labels_test, _ = experiment_helper.get_testing_data(sigma, order, sigma_noise, x_raw_std) if order==4: augmentation = 20 else: augmentation = 40 ret = experiment_helper.data_preprossing(x_raw_train, labels_raw_train, x_raw_test, sigma_noise, feature_type='histogram', augmentation=augmentation) features_train, labels_train, features_validation, labels_validation, features_test = ret ntrain = len(features_train)//augmentation nsamples = list(ntrain // 12 * np.linspace(1, 6, num=6).astype(np.int)) nsamples += list(ntrain // 2 * np.linspace(1, augmentation*2, num=40).astype(np.int)) err_train = np.zeros(shape=[len(nsamples)]) err_validation = np.zeros(shape=[len(nsamples)]) err_train[:] = np.nan err_validation[:] = np.nan for i, n in enumerate(nsamples): print('{} Solve it for {} samples'.format(i, n), flush=True) err_train[i], err_validation[i], _ = experiment_helper.err_svc_linear( features_train[:n], labels_train[:n], features_validation, labels_validation) e_train, e_validation, C = experiment_helper.err_svc_linear( features_train, labels_train, features_validation, labels_validation) print('The validation error is {}%'.format(e_validation * 100), flush=True) # Cheating in favor of SVM e_train, e_test = experiment_helper.err_svc_linear_single(C, features_train, labels_train, features_test, labels_test) print('The test error is {}%'.format(e_test * 100), flush=True) np.savez(path + EXP_NAME, [nsamples, err_train, err_validation, e_test]) return e_test if __name__ == '__main__': if len(sys.argv) > 1: sigma = int(sys.argv[1]) order = int(sys.argv[2]) sigma_noise = float(sys.argv[3]) grid = [(sigma, order, sigma_noise)] else: grid = pgrid() path = 'results/histogram/' os.makedirs(path, exist_ok=True) for sigma, order, sigma_noise in grid: print('Launch experiment for sigma={}, order={}, noise={}'.format(sigma, order, sigma_noise)) res = single_experiment(sigma, order, sigma_noise, path) filepath = os.path.join(path, 'histogram_results_list_sigma{}'.format(sigma)) new_data = [order, sigma_noise, res] if os.path.isfile(filepath+'.npz'): results = np.load(filepath+'.npz')['data'].tolist() else: results = [] results.append(new_data) np.savez(filepath, data=results)
35.204301
153
0.678986
442
3,274
4.78733
0.29638
0.051985
0.056711
0.066163
0.245274
0.216919
0.192344
0.121928
0.041588
0
0
0.012576
0.198534
3,274
92
154
35.586957
0.793826
0.072999
0
0.050847
0
0
0.099003
0.022259
0
0
0
0
0
1
0.016949
false
0
0.084746
0
0.118644
0.084746
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dae6cac23452498b7274b531bf02a845882db8e7
2,827
py
Python
tests/deepspeed/ds_pipe.py
drunkcoding/model-inference
02d2240bc7052fa32223a80fa63625fe681db102
[ "MIT" ]
1
2021-11-15T19:07:13.000Z
2021-11-15T19:07:13.000Z
tests/deepspeed/ds_pipe.py
drunkcoding/model-inference
02d2240bc7052fa32223a80fa63625fe681db102
[ "MIT" ]
null
null
null
tests/deepspeed/ds_pipe.py
drunkcoding/model-inference
02d2240bc7052fa32223a80fa63625fe681db102
[ "MIT" ]
null
null
null
import os import torch.cuda from hfutils.model_pipe import T5DeepSpeedPipe import deepspeed from tqdm import tqdm from transformers.models.t5.configuration_t5 import T5Config from transformers import DataCollatorForSeq2Seq, default_data_collator import argparse from deepspeed.utils import RepeatingLoader from hfutils.arg_parser import HfArguments from hfutils.loader import ModelLoader, DatasetLoader from torch.utils.data import DataLoader from torch.utils.data.dataset import Dataset from datasets import load_dataset, load_metric, concatenate_datasets os.environ['TOKENIZERS_PARALLELISM'] = 'false' args = HfArguments() data_args = args.data_args dataset_loader = DatasetLoader(args) tokenizer, _ = ModelLoader(args).load(load_model=False) eval_dataset = dataset_loader.load( tokenizer, partition="validation", create_dataloader=False ) batch_size = data_args.eval_bsz user_path = os.path.expanduser("~") # model_path = os.path.join(user_path, "HuggingFace", "google", "t5-xl-lm-adapt") # model_path = "/mnt/yavin/checkpoints/t5-xl-lm-adapt/sst2/checkpoint-1380/" # model_path = "google/t5-small-lm-adapt" model_path = args.model_args.model_name_or_path if data_args.pad_to_max_length: data_collator = default_data_collator else: data_collator = DataCollatorForSeq2Seq(tokenizer) class PipeDataset(Dataset): def __init__(self, dataset: Dataset): self.dataset = dataset def __len__(self): return len(self.dataset) def __getitem__(self, i): return { "encoder_input_ids": self.dataset[i]['input_ids'], "encoder_attention_mask": self.dataset[i]['attention_mask'], } eval_dataset = concatenate_datasets([eval_dataset]*70) eval_dataset = PipeDataset(eval_dataset) # print(eval_dataset[0]) def eval_generator(): eval_dataloader = DataLoader( eval_dataset, shuffle=True, collate_fn=data_collator, batch_size=batch_size, ) for batch in tqdm(eval_dataloader, desc="eval_generator"): shape = batch["encoder_input_ids"].shape yield (( batch["encoder_input_ids"], batch["encoder_attention_mask"], ), torch.zeros(shape[0])) # print(shape) # yield ( # batch["encoder_input_ids"], # batch["encoder_attention_mask"], # ) # exit() config = T5Config.from_pretrained( model_path ) deepspeed.init_distributed() model = T5DeepSpeedPipe(config, num_stages=torch.cuda.device_count()) engine, _, _, _ = deepspeed.initialize(args.ds_args, model=model) for step, batch in enumerate(RepeatingLoader(eval_generator())): if step > 500: break engine.eval_batch(iter([batch]*1), compute_loss=False) # engine.eval_batch(RepeatingLoader(eval_generator()), compute_loss=False)
30.074468
81
0.723382
345
2,827
5.649275
0.344928
0.039507
0.030785
0.030785
0.056439
0.056439
0.056439
0.056439
0.056439
0.056439
0
0.010261
0.172621
2,827
93
82
30.397849
0.823001
0.140432
0
0
0
0
0.070306
0.027295
0
0
0
0
0
1
0.063492
false
0
0.222222
0.031746
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dae6f59082823ad960155ff941d486f9928ce8dd
2,694
py
Python
Polynomial_dict.py
chapman-phys227-2016s/hw-7-patti102
8688cbb3649ae623ec870ea380cd7470d69a2517
[ "MIT" ]
null
null
null
Polynomial_dict.py
chapman-phys227-2016s/hw-7-patti102
8688cbb3649ae623ec870ea380cd7470d69a2517
[ "MIT" ]
null
null
null
Polynomial_dict.py
chapman-phys227-2016s/hw-7-patti102
8688cbb3649ae623ec870ea380cd7470d69a2517
[ "MIT" ]
null
null
null
import numpy import copy class Polynomial(object): def __init__(self, coefficients): self.coeff = coefficients def __call__(self, x): """Evaluate the polynomial.""" s = 0 for degree in self.coeff: s += self.coeff[degree]*x**degree return s def __add__(self, other): """Return self + other as Polynomial object.""" result_coeff = copy.deepcopy(self.coeff) for key in other.coeff: if key in self.coeff: result_coeff[key] = result_coeff[key] + other.coeff[key] else: result_coeff[key] = other.coeff[key] return Polynomial(result_coeff) def __mul__(self, other): result_coeff = {} for keyself in self.coeff: for keyother in other.coeff: if keyself + keyother in result_coeff: result_coeff[keyself+keyother] = result_coeff[keyself+keyother] + self.coeff[keyself] * other.coeff[keyother] else: result_coeff[keyself+keyother] = self.coeff[keyself] * other.coeff[keyother] return Polynomial(result_coeff) def differentiate(self): """Differentiate this polynomial in-place.""" for i in range(1, len(self.coeff)): self.coeff[i-1] = i*self.coeff[i] del self.coeff[-1] def derivative(self): """Copy this polynomial and return its derivative.""" dpdx = Polynomial(self.coeff[:]) # make a copy dpdx.differentiate() return dpdx def __str__(self): s = '' for i in range(0, len(self.coeff)): if self.coeff[i] != 0: s += ' + %g*x^%d' % (self.coeff[i], i) # Fix layout s = s.replace('+ -', '- ') s = s.replace('x^0', '1') s = s.replace(' 1*', ' ') s = s.replace('x^1 ', 'x ') #s = s.replace('x^1', 'x') # will replace x^100 by x^00 if s[0:3] == ' + ': # remove initial + s = s[3:] if s[0:3] == ' - ': # fix spaces for initial - s = '-' + s[3:] return s def simplestr(self): s = '' for i in range(0, len(self.coeff)): s += ' + %g*x^%d' % (self.coeff[i], i) return s def test_Polynomial(): p1 = Polynomial({1:1, 100:-3}) p2 = Polynomial({20:1, 1:-1, 100:4}) assert (p1.__add__(p2)).coeff == {1:0, 20:1, 100:1}, 'Improper addition.' assert(p1.__mul__(p2)).coeff == {2:-1, 21:1, 101:7, 120:-3, 200:-12}, 'Improper multiplication.' if __name__ == '__main__': import sys if len(sys.argv) >= 2 and sys.argv[1] == 'verify': test_Polynomial()
32.853659
129
0.528582
349
2,694
3.939828
0.234957
0.117818
0.036364
0.056727
0.251636
0.208
0.151273
0.151273
0.129455
0.129455
0
0.039869
0.320342
2,694
82
130
32.853659
0.711087
0.101336
0
0.206349
0
0
0.042571
0
0
0
0
0
0.031746
1
0.142857
false
0
0.047619
0
0.301587
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
dae7ac093bb2806c2693266b9bf2b6b2ab7584bc
9,341
py
Python
whatthefood/tests/test_ops.py
lychanl/WhatTheFood
94b6eec2c306e7e55b19395cde207d6e6beec7fe
[ "MIT" ]
null
null
null
whatthefood/tests/test_ops.py
lychanl/WhatTheFood
94b6eec2c306e7e55b19395cde207d6e6beec7fe
[ "MIT" ]
null
null
null
whatthefood/tests/test_ops.py
lychanl/WhatTheFood
94b6eec2c306e7e55b19395cde207d6e6beec7fe
[ "MIT" ]
null
null
null
import unittest import numpy as np import whatthefood.graph as graph class TestOps(unittest.TestCase): def test_matmul(self): x_arr = np.array([[1, 2], [2, 3], [3, 4]]) y_arr = np.array([[1, 2, 3, 4], [4, 5, 6, 7]]) x = graph.Constant(x_arr) y = graph.Constant(y_arr) m = graph.Matmul(x, y) np.testing.assert_array_equal(graph.run(m), np.matmul(x_arr, y_arr)) def test_matmul_grad(self): x_arr = np.array([[1, 2], [2, 3], [3, 4]]) y_arr = np.array([[1, 2, 3, 4], [4, 5, 6, 7]]) x = graph.Constant(x_arr) y = graph.Constant(y_arr) m = graph.Matmul(x, y) g = graph.Grad(m, [x, y]) mv, (g_x, g_y) = graph.run((m, g)) self.assertSequenceEqual(g_x.shape, x.shape) self.assertSequenceEqual(g_y.shape, y.shape) np.testing.assert_array_equal(g_x, np.matmul(np.ones_like(mv), y_arr.T)) np.testing.assert_array_equal(g_y, np.matmul(x_arr.T, np.ones_like(mv))) def test_matmul_vec(self): x = graph.Constant([1, 2, 3]) y = graph.Constant([[1, 2], [1, 3], [2, 4]]) m = graph.Matmul(x, y) np.testing.assert_array_equal([9, 20], graph.run(m)) def test_matmul_vec_grad(self): x = graph.Constant([1, 2, 3]) y = graph.Constant([[1, 2], [1, 3], [2, 4]]) m = graph.Matmul(x, y) g = graph.Grad(m, [x, y]) g_x, g_y = graph.run(g) np.testing.assert_array_equal([3, 4, 6], g_x) np.testing.assert_array_equal([[1, 1], [2, 2], [3, 3]], g_y) def test_reduce_sum(self): x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]]) y1 = graph.ReduceSum(x, axis=0) y2 = graph.ReduceSum(x, axis=(1, -1)) y3 = graph.ReduceSum(x) np.testing.assert_array_equal([[9], [12]], graph.run(y1)) np.testing.assert_array_equal([3, 7, 11], graph.run(y2)) self.assertEqual(21, graph.run(y3)) def test_reduce_sum_batched(self): x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]) y_arr = np.array([9, 12]) x = graph.Placeholder(shape=(3, 2, 1), batched=True) y1 = graph.ReduceSum(x, (0, 2), True) y2 = graph.ReduceSum(x, (0, 2), False) np.testing.assert_array_equal(y_arr * 3, graph.run(y1, {x: np.array([x_arr, 2 * x_arr])})) np.testing.assert_array_equal([y_arr, 2 * y_arr], graph.run(y2, {x: np.array([x_arr, 2 * x_arr])})) def test_reduce_sum_grad(self): x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]]) y1 = graph.ReduceSum(x, axis=0) y2 = graph.ReduceSum(x, axis=(1, -1)) y3 = graph.ReduceSum(x) g1 = graph.Grad(y1, x) g2 = graph.Grad(y2, x) g3 = graph.Grad(y3, x) np.testing.assert_array_equal(np.ones_like(x.value), graph.run(g1)) np.testing.assert_array_equal(np.ones_like(x.value), graph.run(g2)) np.testing.assert_array_equal(np.ones_like(x.value), graph.run(g3)) def test_reduce_sum_grad_batched(self): x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]) x = graph.Placeholder(shape=(3, 2, 1), batched=True) y1 = graph.ReduceSum(x, (0, 2), True) y2 = graph.ReduceSum(x, (0, 2), False) g1 = graph.Grad(y1, x) g2 = graph.Grad(y2, x) np.testing.assert_array_equal( [np.ones_like(x_arr), np.ones_like(x_arr)], graph.run(g1, {x: np.array([x_arr, 2 * x_arr])})) np.testing.assert_array_equal( [np.ones_like(x_arr), np.ones_like(x_arr)], graph.run(g2, {x: np.array([x_arr, 2 * x_arr])})) def test_reduce_mean(self): x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]]) y1 = graph.ReduceMean(x, axis=0) y2 = graph.ReduceMean(x, axis=(1, -1)) y3 = graph.ReduceMean(x) np.testing.assert_array_equal([[3], [4]], graph.run(y1)) np.testing.assert_array_equal([1.5, 3.5, 5.5], graph.run(y2)) self.assertEqual(3.5, graph.run(y3)) def test_reduce_mean_batched(self): x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]) y_arr = np.array([3, 4]) x = graph.Placeholder(shape=(3, 2, 1), batched=True) y1 = graph.ReduceMean(x, (0, 2), True) y2 = graph.ReduceMean(x, (0, 2), False) np.testing.assert_array_equal(y_arr * 1.5, graph.run(y1, {x: np.array([x_arr, 2 * x_arr])})) np.testing.assert_array_equal([y_arr, 2 * y_arr], graph.run(y2, {x: np.array([x_arr, 2 * x_arr])})) def test_reduce_mean_grad(self): x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]]) y1 = graph.ReduceMean(x, axis=0) y2 = graph.ReduceMean(x, axis=(1, -1)) y3 = graph.ReduceMean(x) g1 = graph.Grad(y1, x) g2 = graph.Grad(y2, x) g3 = graph.Grad(y3, x) np.testing.assert_array_equal(np.ones_like(x.value) / 3, graph.run(g1)) np.testing.assert_array_equal(np.ones_like(x.value) / 2, graph.run(g2)) np.testing.assert_array_equal(np.ones_like(x.value) / 6, graph.run(g3)) def test_reduce_mean_grad_batched(self): x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]]) x = graph.Placeholder(shape=(3, 2, 1), batched=True) y1 = graph.ReduceMean(x, (0, 2), True) y2 = graph.ReduceMean(x, (0, 2), False) g1 = graph.Grad(y1, x) g2 = graph.Grad(y2, x) np.testing.assert_array_equal( [np.ones_like(x_arr) / 6, np.ones_like(x_arr) / 6], graph.run(g1, {x: np.array([x_arr, 2 * x_arr])})) np.testing.assert_array_equal( [np.ones_like(x_arr) / 3, np.ones_like(x_arr) / 3], graph.run(g2, {x: np.array([x_arr, 2 * x_arr])})) def test_slice(self): x_arr = [[1, 2, 3], [4, 5, 6]] x = graph.Constant(x_arr) y1 = graph.Slice(x, (0, 1), (2, 2)) np.testing.assert_array_equal([[2], [5]], graph.run(y1)) def test_slice_batched(self): x_arr = np.array([[1, 2, 3], [4, 5, 6]]) x = graph.Placeholder((2, 3), True) y1 = graph.Slice(x, (0, 1), (2, 2)) np.testing.assert_array_equal( [[[2], [5]], [[-2], [-5]]], graph.run(y1, {x: np.array([x_arr, -x_arr])})) def test_slice_grad(self): x_arr = [[1, 2, 3], [4, 5, 6]] x = graph.Constant(x_arr) y1 = graph.Slice(x, (0, 1), (2, 2)) g1 = graph.Grad(y1, x) np.testing.assert_array_equal([[0, 1, 0], [0, 1, 0]], graph.run(g1)) def test_slice_grad_batched(self): x_arr = np.array([[1, 2, 3], [4, 5, 6]]) x = graph.Placeholder((2, 3), True) y1 = graph.Slice(x, (0, 1), (2, 2)) g1 = graph.Grad(y1, x) np.testing.assert_array_equal( [[[0, 1, 0], [0, 1, 0]], [[0, 1, 0], [0, 1, 0]]], graph.run(g1, {x: np.array([x_arr, -x_arr])})) def test_concatenate(self): x1 = graph.Constant([[1, 2, 3], [4, 5, 6]]) x2 = graph.Constant([[7, 8], [9, 10]]) y = graph.Concatenate((x1, x2), axis=1) np.testing.assert_array_equal([[1, 2, 3, 7, 8], [4, 5, 6, 9, 10]], graph.run(y)) def test_concatenate_batched(self): x1_arr = np.array([[1, 2, 3], [4, 5, 6]]) x2_arr = np.array([[7, 8], [9, 10]]) x1 = graph.Placeholder(x1_arr.shape, batched=True) x2 = graph.Placeholder(x2_arr.shape, batched=True) y = graph.Concatenate((x1, x2), axis=-1) np.testing.assert_array_equal( [[[1, 2, 3, -7, -8], [4, 5, 6, -9, -10]], [[-1, -2, -3, 7, 8], [-4, -5, -6, 9, 10]]], graph.run(y, {x1: np.array([x1_arr, -x1_arr]), x2: np.array([-x2_arr, x2_arr])})) def test_concatenate_grad(self): x1 = graph.Constant([[1, 2, 3], [4, 5, 6]]) x2 = graph.Constant([[7, 8], [9, 10]]) y = graph.Concatenate((x1, x2), axis=1) g = graph.Grad(y, (x1, x2)) g1, g2 = graph.run(g) np.testing.assert_array_equal(np.ones_like(x1.value), g1) np.testing.assert_array_equal(np.ones_like(x2.value), g2) def test_multply(self): x1 = graph.Constant([[1], [2], [3], [4]]) x2 = graph.Constant([[1, -1], [2, -2], [3, -3], [4, -4]]) y = graph.Multiply(x1, x2) np.testing.assert_array_equal([[1, -1], [4, -4], [9, -9], [16, -16]], graph.run(y)) def test_multply_grad(self): x1 = graph.Constant([[1], [2], [3], [4]]) x2 = graph.Constant([[1, -1], [2, -2], [3, -3], [4, -4]]) y = graph.Multiply(x1, x2) g = graph.Grad(y, (x1, x2)) g1, g2 = graph.run(g) np.testing.assert_array_equal([[0], [0], [0], [0]], g1) np.testing.assert_array_equal([[1, 1], [2, 2], [3, 3], [4, 4]], g2) def test_divide(self): x1 = graph.Constant([1, 2, 3, 4]) x2 = graph.Constant([4, 3, 2, 1]) y = graph.Divide(x1, x2) np.testing.assert_array_equal([1/4, 2/3, 3/2, 4], graph.run(y)) def test_divide_grad(self): x1 = graph.Constant([1, 2, 3, 4]) x2 = graph.Constant([4, 3, 2, 1]) y = graph.Divide(x1, x2) g = graph.Grad(y, (x1, x2)) g1, g2 = graph.run(g) np.testing.assert_array_equal([1/4, 1/3, 1/2, 1], g1) np.testing.assert_array_equal([-1/16, -2/9, -3/4, -4], g2)
36.065637
107
0.52853
1,533
9,341
3.076321
0.048271
0.03732
0.120865
0.161154
0.859415
0.833969
0.78838
0.774597
0.738126
0.708227
0
0.078281
0.260143
9,341
258
108
36.205426
0.604109
0
0
0.594737
0
0
0
0
0
0
0
0
0.221053
1
0.121053
false
0
0.015789
0
0.142105
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
dae800ffa6f6c81d8205d99c1a64fe8460eb79e8
21,339
py
Python
fastpapers/carion2020end.py
ababino/papers
31b6502d150160f697ce34ac888d37ac0e7ad9f2
[ "Apache-2.0" ]
2
2020-11-30T11:01:39.000Z
2021-05-21T00:07:28.000Z
fastpapers/carion2020end.py
ababino/fastpapers
31b6502d150160f697ce34ac888d37ac0e7ad9f2
[ "Apache-2.0" ]
null
null
null
fastpapers/carion2020end.py
ababino/fastpapers
31b6502d150160f697ce34ac888d37ac0e7ad9f2
[ "Apache-2.0" ]
null
null
null
# AUTOGENERATED! DO NOT EDIT! File to edit: 04_carion2020end.ipynb (unless otherwise specified). __all__ = ['coco_vocab', 'bb_pad', 'ParentSplitter', 'box_cxcywh_to_xyxy', 'box_xyxy_to_cxcywh', 'TensorBBoxWH', 'TensorBBoxTL', 'ToWH', 'ToXYXY', 'ToTL', 'box_area', 'all_op', 'generalized_box_iou', 'DETRLoss', 'DETR', 'CocoEval', 'sorted_detr_trainable_params', 'GetAnnotatedImageFiles', 'GetBboxAnnotation', 'GetClassAnnotation', 'CocoDataLoaders', 'detr_learner'] # Cell import os import torch import numpy as np import seaborn as sns import io from contextlib import redirect_stdout from IPython.core.debugger import set_trace from torch import functional as F from scipy.optimize import linear_sum_assignment from fastprogress.fastprogress import master_bar, progress_bar from fastai.data.all import * from fastai.vision.all import * from .core import * from itertools import chain from pycocotools.cocoeval import COCOeval from pycocotools.coco import COCO from .core import _parent_idxs # Cell coco_vocab = [ 'N/A0', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A1', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A2', 'backpack', 'umbrella', 'N/A3', 'N/A4', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A5', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A6', 'dining table', 'N/A7', 'N/A8', 'toilet', 'N/A9', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A10', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] # Cell def bb_pad(samples, pad_idx=0): "Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`." if len(samples[0][1:])>0: samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples] max_len = max([len(s[2]) for s in samples]) def _f(img,bbox,lbl): bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)]) lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx]) return img,bbox,lbl return [_f(*s) for s in samples] else: return samples # Cell def ParentSplitter(train_name='train', valid_name='valid'): "Split `items` from the grand parent folder names (`train_name` and `valid_name`)." def _inner(o): tindex = _parent_idxs(o, train_name) vindex = _parent_idxs(o, valid_name) return tindex, vindex return _inner # Cell def box_cxcywh_to_xyxy(x): x_c, y_c, w, h = x.unbind(-1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=-1) # Cell def box_xyxy_to_cxcywh(x): x0, y0, x1, y1 = x.unbind(-1) b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] return torch.stack(b, dim=-1) # Cell class TensorBBoxWH(TensorPoint): pass class TensorBBoxTL(TensorPoint): pass # Cell @Transform def ToWH(x:TensorBBox): return TensorBBoxWH(box_xyxy_to_cxcywh(x*0.5+0.5), img_size=x.img_size) # Cell @Transform def ToXYXY(x:TensorBBoxWH)->None: return TensorBBox(box_cxcywh_to_xyxy(x)*2-1, img_size=x.img_size) # Cell class ToTL(Transform): def encodes(self, x:TensorBBoxWH)->None: return TensorBBoxTL(box_cxcywh_to_xyxy(x), img_size=x.img_size) def encodes(self, x:TensorBBox)->None: return TensorBBoxTL((x+1)/2, img_size=x.img_size) # Cell def box_area(boxes): return (boxes[..., 2] - boxes[..., 0]) * (boxes[..., 3] - boxes[..., 1]) # Cell def all_op(cmp): "Compares all the elements of `a` and `b` using cmp." def _inner(a, b): if not is_iter(b): return False return all(cmp(a_,b_) for a_,b_ in itertools.zip_longest(a,b)) return _inner # Cell def generalized_box_iou(boxes1, boxes2, pairwise=False): """ Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2). This implemenation expects bs as first dim. """ # degenerate boxes gives inf / nan results # so do an early check #pexpt((boxes1, boxes2)) #set_trace() boxes1, boxes2 = ToTL()((boxes1, boxes2)) #pexpt((boxes1, boxes2)) assert (boxes1[..., 2:] >= boxes1[..., :2]).all(), 'boxes1 are not in [left_x, top_y, right_x, bottom_y] coords' assert (boxes2[..., 2:] >= boxes2[..., :2]).all(), 'boxes2 are not in [left_x, top_y, right_x, bottom_y] coords' area1 = box_area(boxes1) area2 = box_area(boxes2) if pairwise: boxes1 = boxes1[:, :, None, :] boxes2 = boxes2[:, None, :, :] area1 = area1[:, :, None] area2 = area2[:, None, :] lt = torch.max(boxes1[..., :2], boxes2[..., :2]) # [N,M,2] rb = torch.min(boxes1[..., 2:], boxes2[..., 2:]) # [N,M,2] wh = (rb - lt).clamp(min=0) # [N,M,2] inter = wh[..., 0] * wh[..., 1] # [N,M] union = (area1 + area2) - inter iou = inter / union lt = torch.min(boxes1[..., :2], boxes2[..., :2]) # [N,M,2] rb = torch.max(boxes1[..., 2:], boxes2[..., 2:]) # [N,M,2] wh = (rb - lt).clamp(min=0) # [N,M,2] area = wh[..., 0] * wh[..., 1] #set_trace() return iou - (area - union) / area # Cell class DETRLoss(nn.Module): def __init__(self, classw=1, boxw=1, giouw=1, n_queries=100, th=0.7, eos_coef=0.1, n_classes=92): super().__init__() store_attr() self.emptyw = torch.ones(n_classes) self.emptyw[-1] = eos_coef self.entropy = nn.CrossEntropyLoss(weight=self.emptyw) def class_loss(self, output_classes, target_id, indices): bs, nq, nc = output_classes.shape target_id_full = torch.full((bs, nq), nc-1, dtype=torch.int64, device=target_id.device) for i, ind in enumerate(indices): target_id_full[i, ind[0]] = target_id[i, ind[1]] return self.entropy(output_classes.transpose(1,2), target_id_full) def box_loss(self, output_boxes, target_boxes, indices): output_boxes, target_boxes = ToWH((output_boxes, target_boxes)) output_boxes_ind = [] target_boxes_ind = [] for i, (src, dst) in enumerate(indices): output_boxes_ind.append(output_boxes[i, src, :]) target_boxes_ind.append(target_boxes[i, dst, :]) output_boxes_ind = torch.cat(output_boxes_ind) target_boxes_ind = torch.cat(target_boxes_ind) l1_loss = nn.L1Loss()(output_boxes_ind, target_boxes_ind) giou = 1 - generalized_box_iou(output_boxes_ind, target_boxes_ind) return self.boxw * l1_loss + self.giouw * giou.mean() def box_cost(self, output_boxes, target_boxes): output_boxes, target_boxes = ToWH((output_boxes, target_boxes)) return torch.cdist(output_boxes, target_boxes, p=1) def class_cost(self, output_class, target_ids): bs, nq, _ = output_class.shape _, mc = target_ids.shape p = output_class.flatten(0,1).softmax(-1) # [bs*nq, num_classes] ids = target_ids.flatten() # [bs*nq] loss = -p[:, ids].reshape(bs, nq, -1) # [bs, nq, bs*mc] return torch.cat([loss[i, :, i*mc:(i+1)*mc][None, ...] for i in range(bs)], 0) # [bs, nq, mc] @torch.no_grad() def matcher(self, output, target): output_boxes, output_class = output # [bs, nq, 4], [bs, nq, num_classes] target_boxes, target_ids = target # [bs, max(n in batch), 4], [bs, max(n in batch)] l_iou = -generalized_box_iou(output_boxes, target_boxes, pairwise=True) l_box = self.box_cost(output_boxes, target_boxes) l_class = self.class_cost(output_class, target_ids) C = self.classw*l_class + self.boxw*l_box + self.giouw*l_iou C = C.cpu() sizes = [(v<self.n_classes-1).type(torch.int).sum() for v in target[1]] Cs = [C[i, :, :s] for i, s in enumerate(sizes)] indices = [linear_sum_assignment(C[i, :, :s]) for i, s in enumerate(sizes)] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] def forward(self, output, target_boxes, target_ids): output_boxes, output_class, aux_outputs = output indices = self.matcher((output_boxes, output_class), (target_boxes, target_ids)) l_class = self.class_loss(output_class, target_ids, indices) l_box = self.box_loss(output_boxes, target_boxes, indices) loss = l_class * self.classw + l_box if aux_outputs: for output in aux_outputs: output_boxes, output_class = output['pred_boxes'], output['pred_logits'] indices = self.matcher((output_boxes, output_class), (target_boxes, target_ids)) l_class = self.class_loss(output_class, target_ids, indices) l_box = self.box_loss(output_boxes, target_boxes, indices) loss += l_class * self.classw + l_box return loss def activation(self, x): return (ToXYXY(x[0]), F.softmax(x[1], dim=-1)) def decodes(self, x, pad=True): pred_boxes, probs = x max_probs, pred_ids = probs.max(axis=-1) ind = (max_probs>self.th) & (pred_ids<probs.shape[-1]-1) & (box_area(pred_boxes)>0) max_probs = [max_probs[i, ind[i]] for i in range(ind.shape[0])] pred_ids = [pred_ids[i, ind[i]] for i in range(ind.shape[0])] #pred_boxes = L([pred_boxes[i, ind[i], :] for i in range(ind.shape[0])]).map(TensorBBox) pred_boxes = L(pred_boxes[i, ind[i], :] for i in range(ind.shape[0])) if pad: imgs = [None for i in range_of(pred_ids)] z_inp = zip(imgs ,pred_boxes, pred_ids) out = bb_pad(list(z_inp), pad_idx=self.n_classes-1) pred_boxes = torch.cat([x[1].unsqueeze(0) for x in out]) pred_ids = torch.cat([x[2].unsqueeze(0) for x in out]) pred_boxes, pred_ids = TensorBBox(pred_boxes), TensorMultiCategory(pred_ids) self.scores = max_probs return pred_boxes, pred_ids # Cell class DETR(nn.Module): def __init__(self, pretrained=True, n_classes=92, aux_loss=False): super().__init__() self.model = torch.hub.load('facebookresearch/detr', 'detr_resnet50', pretrained=pretrained, verbose=False) if self.model.class_embed.out_features!=n_classes: self.model.class_embed = nn.Linear(256, n_classes) self.model.aux_loss = aux_loss def forward(self, x): img_sz = x.shape[2:] x = self.model(x) pred_boxes, pred_logits = x['pred_boxes'], x['pred_logits'] aux_outputs = x.get('aux_outputs', None) if aux_outputs: for o in aux_outputs: o['pred_boxes'] = TensorBBoxWH(o['pred_boxes'], img_size=img_sz) return TensorBBoxWH(pred_boxes, img_size=img_sz), pred_logits, aux_outputs # Cell class CocoEval(Callback): run_before=Recorder run_train = False def __init__(self): metrics = 'AP AP50 AP75 AP_small AP_medium AP_large AR1 AR10 AR100 AR_small AR_medium AR_large'.split() self.metrics = L(metrics).map(partial(getattr, self)).map(ValueMetric) def before_validate(self): vocab = self.dls.vocab bs = self.learn.dls.bs self.gt_ds = {'annotations': [], 'images': [], 'categories': []} self.dt_ds = {'annotations': [], 'images': [], 'categories': []} self.gt_ds['categories'] = [{'id': i+1,'name':o} for i,o in enumerate(vocab)] self.dt_ds['categories'] = [{'id': i+1,'name':o} for i,o in enumerate(vocab)] self.reset_counters() self.bs = bs self.dec_bbox = compose(ToXYXY, to_cpu, self.learn.dls.after_item.decode)# self.dec_cls = compose(to_cpu, lambda x: x[x>0]) self.batch_to_samples = compose(partial(batch_to_samples, max_n=self.bs), L) def reset_counters(self): self.img_id = Inf.count self.gtann = Inf.count self.dtann = Inf.count def after_batch(self): pred_boxes, pred_ids = self.learn.loss_func.decodes(self.loss_func.activation(self.pred), pad=False) max_probs = self.learn.loss_func.scores _, _, w, h = self.xb[0].shape gt_cls = self.batch_to_samples(self.yb[1]).map(to_cpu) dt_cls = L(pred_ids).map(to_cpu) gt_boxes = self.batch_to_samples(self.yb[0]).map(self.dec_bbox) dt_boxes = L(pred_boxes).map(self.dec_bbox) for gtb, gtc, dtb, dtc, i, socres in zip(gt_boxes, gt_cls, dt_boxes, dt_cls, self.img_id, max_probs): self.gt_ds['images'].append({'id': i, 'height': h, 'width': w}) self.gt_ds['annotations'].extend([{'iscrowd': 0, 'bbox': o.tolist(), 'area': box_area(o), 'category_id': int(c), 'image_id': i, 'id': j} for o, c, j in zip(gtb, gtc, self.gtann)]) self.dt_ds['images'].append({'id': i, 'height': h, 'width': w}) self.dt_ds['annotations'].extend([{'iscrowd': 0, 'score': s, 'bbox': o.tolist(), 'area': box_area(o), 'category_id': int(c), 'image_id': i, 'id': j} for o, c, j, s in zip(dtb, dtc, self.dtann, socres)]) def after_validate(self): with redirect_stdout(io.StringIO()): gt = COCO() gt.dataset = self.gt_ds gt.createIndex() dt = COCO() dt.dataset = self.dt_ds dt.createIndex() coco_eval = COCOeval(gt, dt, iouType='bbox') coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() self.stats = coco_eval.stats self.reset_counters() def AP(self): return self.stats[0] def AP50(self): return self.stats[1] def AP75(self): return self.stats[2] def AP_small(self): return self.stats[3] def AP_medium(self): return self.stats[4] def AP_large(self): return self.stats[5] def AR1(self): return self.stats[6] def AR10(self): return self.stats[7] def AR100(self): return self.stats[8] def AR_small(self): return self.stats[9] def AR_medium(self): return self.stats[10] def AR_large(self): return self.stats[11] # Cell @typedispatch def show_results(x:TensorImage, y:tuple, samples, outs, ctxs=None, max_n=6, nrows=None, ncols=1, figsize=None, **kwargs): if ctxs is None: ctxs = get_grid(min(2*len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True, title='Target/Prediction') for i in [0, 2]: ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(2*max_n))] ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(0),ctxs[1::2],range(2*max_n))] ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(1),ctxs[1::2],range(2*max_n))] return ctxs # Cell def sorted_detr_trainable_params(m): named_params = (L(m.named_parameters())).map(L).sorted(itemgetter(0)) backbone_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.backbone')) input_proj_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.input_proj')) transformer_enc_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.transformer.encoder')) transformer_dec_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.transformer.decoder')) query_embed_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.query_embed')) bbox_head_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.bbox_embed')) class_head_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.class_embed')) transformer_enc = named_params[transformer_enc_mask].itemgot(1) transformer_dec = named_params[transformer_dec_mask].itemgot(1) query_embed = named_params[query_embed_mask].itemgot(1) input_proj = named_params[input_proj_mask].itemgot(1) backbone = named_params[backbone_mask].itemgot(1) bbox_head = named_params[bbox_head_mask].itemgot(1) class_head = named_params[class_head_mask].itemgot(1) return L(backbone + input_proj, transformer_enc + transformer_dec + query_embed, bbox_head + class_head) # Cell class GetAnnotatedImageFiles: def __init__(self, img2bbox): self.img2bbox = img2bbox def __call__(self, x): return compose(get_image_files, partial(filter, compose(attrgetter('name'), self.img2bbox.__contains__)), L)(x) class GetBboxAnnotation: def __init__(self, img2bbox): self.img2bbox = img2bbox def __call__(self, x): return compose(attrgetter('name'), self.img2bbox.__getitem__, itemgetter(0))(x) class GetClassAnnotation: def __init__(self, img2bbox): self.img2bbox = img2bbox def __call__(self, x): return compose(attrgetter('name'), self.img2bbox.__getitem__, itemgetter(1))(x) # Cell class CocoDataLoaders(DataLoaders): @classmethod @delegates(DataLoaders.from_dblock) def from_path(cls, path, train='train', valid='val', vocab=None, bs=16, item_tfms=Resize(800), batch_tfms=None, **kwargs): source = Path(path) ann_files = source.ls(file_exts='.json') train_ann = ann_files.filter(lambda x: x.name.startswith(train)) assert len(train_ann)==1, 'More than one (or none) training annotation file' val_ann = ann_files.filter(lambda x: x.name.startswith(valid)) assert len(val_ann)<2, 'More than one validation annotation file' ann_files = [train_ann[0]] if val_ann: ann_files.append(val_ann[0]) img2bbox = {} for ann_file in ann_files: img2bbox = merge(img2bbox, dict(zip(*get_annotations(ann_file)))) if not vocab: vocab = L(chain(*L(img2bbox.values()).itemgot(1))).unique() if not '#na#' in vocab: vocab = L(vocab) + '#na#' elif '#na#'!=vocab[-1]: warn('Empty category #na# should be the last element of the vocab.') warn('Moving category #na# at the end of vocab.') vocab.pop(vocab.index('#na#')) vocab = L(vocab) + '#na#' img_folders = source.ls().filter(Self.is_dir()) train_name = img_folders.filter(Self.name.startswith(train)) val_name = img_folders.filter(Self.name.startswith(valid)) assert len(train_name)==1 train_name = train_name[0].name if len(ann_files)==2: assert len(val_name)==1 val_name = val_name[0].name splitter = ParentSplitter(train_name=train_name, valid_name=val_name) else: splitter = RandomSplitter() BBoxBlock.dls_kwargs = {'before_batch': partial(bb_pad, pad_idx=len(vocab)-1)} dblock = DataBlock(blocks=(ImageBlock, BBoxBlock, BBoxLblBlock(vocab=list(vocab), add_na=False)), get_items=GetAnnotatedImageFiles(img2bbox), splitter=splitter, get_y=[GetBboxAnnotation(img2bbox), GetClassAnnotation(img2bbox)], item_tfms=item_tfms, batch_tfms=batch_tfms, n_inp=1) return cls.from_dblock(dblock, source, bs=bs, **kwargs) @classmethod @delegates(DataLoaders.from_dblock) def from_sources(cls, sources, vocab=None, bs=16, item_tfms=Resize(800), batch_tfms=None, **kwargs): ann_files = [sources['train_ann'], sources['val_ann']] img2bbox = {} for ann_file in ann_files: img2bbox = merge(img2bbox, dict(zip(*get_annotations(ann_file)))) if not vocab: vocab = L(chain(*L(img2bbox.values()).itemgot(1))).unique() if not '#na#' in vocab: vocab = L(vocab) + '#na#' elif '#na#'!=vocab[-1]: warn('Empty category #na# should be the last element of the vocab.') warn('Moving category #na# at the end of vocab.') vocab.pop(vocab.index('#na#')) vocab = L(vocab) + '#na#' splitter = ParentSplitter(train_name=sources['train'].name, valid_name=sources['val'].name) BBoxBlock.dls_kwargs = {'before_batch': partial(bb_pad, pad_idx=len(vocab)-1)} dblock = DataBlock(blocks=(ImageBlock, BBoxBlock, BBoxLblBlock(vocab=list(vocab), add_na=False)), get_items=GetAnnotatedImageFiles(img2bbox), splitter=splitter, get_y=[GetBboxAnnotation(img2bbox), GetClassAnnotation(img2bbox)], item_tfms=item_tfms, batch_tfms=batch_tfms, n_inp=1) return cls.from_dblock(dblock, sources['base'], bs=bs, **kwargs) # Cell def detr_learner(dls, pretrained=True, bs=16): model = DETR(pretrained=pretrained, n_classes=len(dls.vocab), aux_loss=True) loss = DETRLoss(classw=1, boxw=5, giouw=2).cuda() ce = CocoEval() learn = Learner(dls, model, loss, splitter=sorted_detr_trainable_params, cbs=[ce], metrics=ce.metrics, opt_func=partial(Adam, decouple_wd=True)) learn.coco_eval = ce return learn
45.69379
215
0.634753
3,069
21,339
4.215705
0.184751
0.019555
0.012985
0.017623
0.349127
0.298578
0.276859
0.254522
0.254522
0.231952
0
0.01994
0.212709
21,339
467
216
45.69379
0.750179
0.049018
0
0.181081
1
0
0.106126
0.00592
0
0
0
0
0.016216
1
0.143243
false
0.005405
0.045946
0.056757
0.286486
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
daea62739f3ebdaefddcd10908b331885bbe8bcd
1,063
py
Python
tests/test_models/test_author.py
OliverHofkens/dephell
6303f416018910668f1635b70cd828a2fd2b2d9e
[ "MIT" ]
1,880
2019-03-21T10:08:25.000Z
2022-03-31T12:41:55.000Z
tests/test_models/test_author.py
rachmadaniHaryono/dephell
0ef500c8f2d5f05244bac191b1b1383f68464cd2
[ "MIT" ]
356
2019-03-21T19:08:56.000Z
2021-01-08T17:45:43.000Z
tests/test_models/test_author.py
rachmadaniHaryono/dephell
0ef500c8f2d5f05244bac191b1b1383f68464cd2
[ "MIT" ]
157
2019-04-23T01:13:37.000Z
2022-03-24T22:41:18.000Z
# external import pytest # project from dephell.models.author import Author @pytest.mark.parametrize('name, mail, formatted', [ ('gram', 'example@mail.com', 'gram <example@mail.com>'), ('gram', None, 'gram'), ('Грам @orsinium', 'example_mail@mail.com', 'Грам @orsinium <example_mail@mail.com>'), ('Грам @orsinium', None, 'Грам @orsinium'), ]) def test_format(name, mail, formatted): author = Author(name=name, mail=mail) assert str(author) == formatted @pytest.mark.parametrize('name, mail, formatted', [ ('gram', 'example@mail.com', 'gram <example@mail.com>'), ('gram', None, 'gram'), ('Грам @orsinium', 'example_mail@mail.com', 'Грам @orsinium <example_mail@mail.com>'), ('Грам @orsinium', None, 'Грам @orsinium'), ]) def test_parse(name, mail, formatted): author = Author.parse(formatted) assert author.name == name assert author.mail == mail
34.290323
94
0.557855
112
1,063
5.241071
0.223214
0.149915
0.115843
0.122658
0.715503
0.616695
0.616695
0.616695
0.616695
0.616695
0
0
0.28222
1,063
30
95
35.433333
0.769332
0.015052
0
0.571429
0
0
0.331418
0.084291
0
0
0
0
0.142857
1
0.095238
false
0
0.095238
0
0.190476
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
daeae031a92bdd332e807b1e1b225a48f958d13f
4,283
py
Python
pymultimediacompression/videomedia/compressor.py
AbdullrhmanAljasser/PyMultiMediaCompression
0ed5a1ac43050b1c9fe84f277a18beaf4bbdc705
[ "MIT" ]
null
null
null
pymultimediacompression/videomedia/compressor.py
AbdullrhmanAljasser/PyMultiMediaCompression
0ed5a1ac43050b1c9fe84f277a18beaf4bbdc705
[ "MIT" ]
null
null
null
pymultimediacompression/videomedia/compressor.py
AbdullrhmanAljasser/PyMultiMediaCompression
0ed5a1ac43050b1c9fe84f277a18beaf4bbdc705
[ "MIT" ]
null
null
null
import os import ffmpeg import subprocess import shutil from pymultimediacompression.utilities.utils import bitrate_size_based, gb_to_bit, mb_to_bit, kb_to_bit, b_to_bit MISSING_REQUIREMENTS = "FFmpeg required to be installed to use PyMultiMediaCompression \n Check https://github.com/AbdullrhmanAljasser/PyMultiMediaCompression" ''' Check if required installs are satisfied Raise an error if not ''' def check_required(): check = subprocess.call(['which', 'ffmpeg']) if check != 0: raise Exception(MISSING_REQUIREMENTS) check = subprocess.call(['which', 'ffprobe']) if check != 0: raise Exception(MISSING_REQUIREMENTS) ''' Video Compression Based on given Size Description: A function to allow users to compressa given video to their expected video size Input Paramters: filepath (Required): Path of the video file being compressed finalsize (Required): expected final size of video size_type (Optional): Specify final size type gb, mb, kb, or b (Default mb) output (Optional): To keep original video specify output path to stop overwriting codec (Optional): Specify the codec used to compress (Default x264) #TODO: More parameters to give more freedom to user ''' def video_compress_size_based( filepath, finalsize, size_type='mb', output=None, codec='libx264' ): # Check if the required installs are satisfied check_required() # Check if filepath is a file if not os.path.isfile(filepath): raise Exception("File path is not a valid file") # Check if filepath is absolute or not if not os.path.isabs(filepath): filepath = os.getcwd() + filepath # Check if asked size is a correct number ==> try: float(finalsize) except Exception as e: raise (e) if finalsize <= 0: raise Exception("Unable to compress to 0 or below size") # END <== # Retrieve file extension to ensure it applicable ==> ext = os.path.splitext(filepath)[-1].lower() file_name_w_ext = filepath.split('\\')[-1] splitter = filepath.split('\\') path_to_file = '' for x in range(len(splitter)-1): path_to_file = path_to_file + '\\' +splitter[x] if not valid_video_ext(ext): raise Exception("Input file is not of valid video type") # END <== # Setup output (Overwrite/None) if output is None: if not os.path.isdir('compressed'): os.mkdir('compressed') os.chdir(os.getcwd()+'\\compressed') else: os.chdir(os.getcwd()+'\\compressed') else: if os.path.isdir(output): if not os.path.isfile(output): if not os.path.isabs(output): output = os.getcwd() + output os.chdir(output) else: os.chdir(output) else: raise Exception("Output path is a file not a directory") else: raise Exception("Output path is not a valid directory, maybe file doesn't exists?") file_info = ffmpeg.probe(filepath) file_info_size = file_info['format']['size'] file_info_duration = file_info['format']['duration'] file_info_bitrate = (float(file_info_size)) / float(file_info_duration) if not valid_size_type(size_type): raise Exception("Size type is not correct, must be gb, mb, kb, or b") finalsize = final_bit_size(finalsize, size_type=size_type) bitrate_for_compression = bitrate_size_based(finalsize, file_info_duration) try: ffmpeg.input(filepath)\ .output(file_name_w_ext, **{'vcodec':codec, 'video_bitrate':bitrate_for_compression})\ .overwrite_output()\ .run() except Exception as e: raise (e) path_to_compressed = os.getcwd() + '\\' + file_name_w_ext if output is None: # Moving to overwrite file shutil.move(path_to_compressed, filepath) else: # Moving to the specified output True # shutil.move(path_to_compressed, output) return True __all__ = [ 'video_compress_size_based', 'check_required' ]
32.694656
160
0.632267
541
4,283
4.855823
0.273567
0.027408
0.013323
0.020936
0.159878
0.094404
0.031214
0
0
0
0
0.004177
0.273406
4,283
131
161
32.694656
0.839974
0.081018
0
0.268293
0
0
0.176433
0.015287
0
0
0
0.007634
0
1
0.02439
false
0
0.060976
0
0.097561
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daed2dcd02c1f4f68af01100da13ec4d4ceacfa7
7,548
py
Python
kubernetes/test/test_v1_pod_status.py
mariusgheorghies/python
68ac7e168963d8b5a81dc493b1973d29e903a15b
[ "Apache-2.0" ]
null
null
null
kubernetes/test/test_v1_pod_status.py
mariusgheorghies/python
68ac7e168963d8b5a81dc493b1973d29e903a15b
[ "Apache-2.0" ]
null
null
null
kubernetes/test/test_v1_pod_status.py
mariusgheorghies/python
68ac7e168963d8b5a81dc493b1973d29e903a15b
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Kubernetes No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: v1.20.7 Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import unittest import datetime import kubernetes.client from kubernetes.client.models.v1_pod_status import V1PodStatus # noqa: E501 from kubernetes.client.rest import ApiException class TestV1PodStatus(unittest.TestCase): """V1PodStatus unit test stubs""" def setUp(self): pass def tearDown(self): pass def make_instance(self, include_optional): """Test V1PodStatus include_option is a boolean, when False only required params are included, when True both required and optional params are included """ # model = kubernetes.client.models.v1_pod_status.V1PodStatus() # noqa: E501 if include_optional : return V1PodStatus( conditions = [ kubernetes.client.models.v1/pod_condition.v1.PodCondition( last_probe_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), last_transition_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), message = '0', reason = '0', status = '0', type = '0', ) ], container_statuses = [ kubernetes.client.models.v1/container_status.v1.ContainerStatus( container_id = '0', image = '0', image_id = '0', last_state = kubernetes.client.models.v1/container_state.v1.ContainerState( running = kubernetes.client.models.v1/container_state_running.v1.ContainerStateRunning( started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ), terminated = kubernetes.client.models.v1/container_state_terminated.v1.ContainerStateTerminated( container_id = '0', exit_code = 56, finished_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), message = '0', reason = '0', signal = 56, started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ), waiting = kubernetes.client.models.v1/container_state_waiting.v1.ContainerStateWaiting( message = '0', reason = '0', ), ), name = '0', ready = True, restart_count = 56, started = True, state = kubernetes.client.models.v1/container_state.v1.ContainerState(), ) ], ephemeral_container_statuses = [ kubernetes.client.models.v1/container_status.v1.ContainerStatus( container_id = '0', image = '0', image_id = '0', last_state = kubernetes.client.models.v1/container_state.v1.ContainerState( running = kubernetes.client.models.v1/container_state_running.v1.ContainerStateRunning( started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ), terminated = kubernetes.client.models.v1/container_state_terminated.v1.ContainerStateTerminated( container_id = '0', exit_code = 56, finished_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), message = '0', reason = '0', signal = 56, started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ), waiting = kubernetes.client.models.v1/container_state_waiting.v1.ContainerStateWaiting( message = '0', reason = '0', ), ), name = '0', ready = True, restart_count = 56, started = True, state = kubernetes.client.models.v1/container_state.v1.ContainerState(), ) ], host_ip = '0', init_container_statuses = [ kubernetes.client.models.v1/container_status.v1.ContainerStatus( container_id = '0', image = '0', image_id = '0', last_state = kubernetes.client.models.v1/container_state.v1.ContainerState( running = kubernetes.client.models.v1/container_state_running.v1.ContainerStateRunning( started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ), terminated = kubernetes.client.models.v1/container_state_terminated.v1.ContainerStateTerminated( container_id = '0', exit_code = 56, finished_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), message = '0', reason = '0', signal = 56, started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ), waiting = kubernetes.client.models.v1/container_state_waiting.v1.ContainerStateWaiting( message = '0', reason = '0', ), ), name = '0', ready = True, restart_count = 56, started = True, state = kubernetes.client.models.v1/container_state.v1.ContainerState(), ) ], message = '0', nominated_node_name = '0', phase = '0', pod_ip = '0', pod_i_ps = [ kubernetes.client.models.v1/pod_ip.v1.PodIP( ip = '0', ) ], qos_class = '0', reason = '0', start_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f') ) else : return V1PodStatus( ) def testV1PodStatus(self): """Test V1PodStatus""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) if __name__ == '__main__': unittest.main()
51
126
0.471118
712
7,548
4.849719
0.186798
0.111208
0.140168
0.152911
0.729511
0.695917
0.676803
0.676803
0.676803
0.676803
0
0.071445
0.417727
7,548
147
127
51.346939
0.714221
0.068627
0
0.689076
1
0
0.079258
0
0
0
0
0
0
1
0.033613
false
0.016807
0.05042
0
0.109244
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
daed5c9ed5fbf2bcdac59913e3386fad0938a9cb
831
py
Python
scripts/warm_ex.py
cp4cds/cmip6_range_check_2
6fca2632029a2adb9736bfc1382b39f82d8a27e1
[ "Apache-2.0" ]
null
null
null
scripts/warm_ex.py
cp4cds/cmip6_range_check_2
6fca2632029a2adb9736bfc1382b39f82d8a27e1
[ "Apache-2.0" ]
1
2021-09-27T15:18:39.000Z
2021-09-27T15:18:39.000Z
scripts/warm_ex.py
cp4cds/cmip6_range_check_2
6fca2632029a2adb9736bfc1382b39f82d8a27e1
[ "Apache-2.0" ]
null
null
null
import warnings def get_user_warning(data): class HERE(UserWarning): pass setattr( HERE, 'data', data ) return HERE class Message(object): def __init__(self,a,b): self.a = a self.b = b def fxn(): ##warnings.warn(Message('sample text','b'), UserWarning ) warnings.warn(Message('sample text','b'), get_user_warning('other sample text',45) ) ##warnings.warn(('sample text','b'), UserWarning ) with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") # Trigger a warning. fxn() # Verify some things assert len(w) == 1 assert issubclass(w[-1].category, UserWarning) ##assert "deprecated" in str(w[-1].message) print (w[-1].filename, w[-1].lineno ) print (w[-1].message)
25.96875
88
0.637786
112
831
4.651786
0.473214
0.023033
0.06334
0.095969
0.115163
0.115163
0
0
0
0
0
0.012232
0.212996
831
31
89
26.806452
0.784404
0.270758
0
0
0
0
0.065436
0
0
0
0
0
0.105263
1
0.157895
false
0.052632
0.052632
0
0.368421
0.105263
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
daed91c325a084d2dc6fae9cb74a954c288fb74c
979
py
Python
dreamy-dingos/chat/models.py
Vthechamp22/summer-code-jam-2021
0a8bf1f22f6c73300891fd779da36efd8e1304c1
[ "MIT" ]
40
2020-08-02T07:38:22.000Z
2021-07-26T01:46:50.000Z
dreamy-dingos/chat/models.py
Vthechamp22/summer-code-jam-2021
0a8bf1f22f6c73300891fd779da36efd8e1304c1
[ "MIT" ]
134
2020-07-31T12:15:45.000Z
2020-12-13T04:42:19.000Z
dreamy-dingos/chat/models.py
Vthechamp22/summer-code-jam-2021
0a8bf1f22f6c73300891fd779da36efd8e1304c1
[ "MIT" ]
101
2020-07-31T12:00:47.000Z
2021-11-01T09:06:58.000Z
from django.db import models from django.utils import timezone class Room(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name class SimpleUser(models.Model): objects = models.Manager() username = models.CharField(max_length=50, primary_key=True) room = models.ForeignKey(Room, null=True, blank=True, on_delete=models.CASCADE) def __str__(self): return self.username class Message(models.Model): objects = models.Manager() text = models.CharField(max_length=500) created_at = models.DateTimeField(default=timezone.now) room = models.ForeignKey(Room, on_delete=models.CASCADE) def __str__(self): return self._format_short_text(self.text) @staticmethod def _format_short_text(text: str) -> str: if len(text) <= 30: return text if text[30] == " ": return text return " ".join(text[:30].split(" ")[:-1]) + "..."
26.459459
83
0.658836
123
979
5.04065
0.398374
0.048387
0.087097
0.116129
0.348387
0.132258
0.132258
0.132258
0.132258
0
0
0.018301
0.21859
979
36
84
27.194444
0.792157
0
0
0.269231
0
0
0.006129
0
0
0
0
0
0
1
0.153846
false
0
0.076923
0.115385
0.884615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
2
daef2527ac6d9f73a92aca7b8ece3f66a8db4875
198
py
Python
aulaszeus/modules/buscaCep.py
zerossB/aulas_zeus
002fc29578aed3aa36a9b400882e638b68c123ea
[ "BSD-3-Clause" ]
null
null
null
aulaszeus/modules/buscaCep.py
zerossB/aulas_zeus
002fc29578aed3aa36a9b400882e638b68c123ea
[ "BSD-3-Clause" ]
null
null
null
aulaszeus/modules/buscaCep.py
zerossB/aulas_zeus
002fc29578aed3aa36a9b400882e638b68c123ea
[ "BSD-3-Clause" ]
null
null
null
import re import urllib def get_cep(cep): """.""" url = "" cepCorrigido = re.sub('[^\d]', '', cep) if len(cepCorrigido) != 8: return False; payload = {'relaxation'}
13.2
43
0.520202
22
198
4.636364
0.772727
0
0
0
0
0
0
0
0
0
0
0.007092
0.287879
198
14
44
14.142857
0.716312
0.005051
0
0
0
0
0.078534
0
0
0
0
0
0
1
0.125
false
0
0.25
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
daf17f6b45920374fa2e9b1df4b56aef139d6271
5,822
py
Python
python/interpreter/environment.py
yehzhang/x9
2d06767ecd3521e9255748d14ab43938d1693bbc
[ "MIT" ]
1
2017-11-19T02:04:44.000Z
2017-11-19T02:04:44.000Z
python/interpreter/environment.py
yehzhang/x9
2d06767ecd3521e9255748d14ab43938d1693bbc
[ "MIT" ]
null
null
null
python/interpreter/environment.py
yehzhang/x9
2d06767ecd3521e9255748d14ab43938d1693bbc
[ "MIT" ]
null
null
null
from collections import defaultdict class Environment: def __init__(self, config): self.aliases = {} self.registers = Registers(self.aliases, config) self.memory = Memory(config) # :type Dict[str, Label]: {label_name: Label} self.labels = {} self.execution_count = 0 # :type Dict[str, Dict[str, int]]: populated by TokenMappers in asm module # e.g. instruction_id = luts[BranchEqual.mnemonic][immediate] # e.g. addr = luts[LoadWord.mnemonic][immediate] self.luts = defaultdict(dict) self._acc_luts = defaultdict(dict) # :type int: not a genuine register self.pc = 0 self.cout = 0 def __repr__(self): items = [ ('Memory', repr(self.memory)), ('Registers', self.registers.as_str(self.aliases)), ('Dynamic instruction count', str(self.execution_count)), ] str_items = [] for k, v in items: lns = v.splitlines() if len(lns) > 1: v = ''.join('\n\t' + ln for ln in lns) s = '{}: {}'.format(k, v) str_items.append(s) return '\n'.join(str_items) def unalias(self, op): return self.aliases.get(op, op) class Registers: def __init__(self, aliases, config): assert len(config['reg_names']) <= 16 super().__setattr__('names', config['reg_names']) regs = make_bytes(config['reg_default'], len(self.names)) regs = dict(zip(self.names, regs)) super().__setattr__('registers', regs) super().__setattr__('aliases', aliases) def __repr__(self): return self.as_str() def as_str(self, aliases=None): inv_aliases = {v: k for k, v in self.aliases.items()} return '\n'.join('{}: {}'.format( inv_aliases.get(n, n), self.registers[n]) for n in self.names) def __getattr__(self, name): name = self.aliases.get(name, name) return self.registers[name].get() def __setattr__(self, name, value): name = self.aliases.get(name, name) self.registers[name].set(value) def __getitem__(self, key): """ :param int key: """ return self.__getattr__(self.names[key]) def __setitem__(self, key, value): """ :param int key: """ return self.__setattr__(self.names[key], value) class Memory: """ Memory is of big-endian format. """ def __init__(self, config): assert 0 < config['mem_size'] self.memory = make_bytes(config['mem_default'], config['mem_size']) def __repr__(self): return '\n'.join(' '.join(map(str, self.memory[i:i + 8])) for i in range(0, len(self.memory), 8)) def __getitem__(self, key): """ Load an unsigned byte at address 0xff: memory[0xff] Load an unsigned word at address 0xff: memory[0xff, 4] """ if not isinstance(key, tuple): key = key, return self.load(*key) def __setitem__(self, key, value): """ Store a byte at address 0xff: memory[0xff] = 0xab Store a word at address 0xff: memory[0xff, 4] = 0xab """ if isinstance(key, tuple): addr, size = key key = addr, value, size else: key = key, value self.store(*key) def load(self, addr, size=1, signed=False): assert 1 <= size assert 0 <= addr assert addr + size <= len(self.memory) value = 0 for i in range(addr, addr + size): byte = self.memory[i].get() value = (value << 8) | byte if signed: value = convert_to_signed_integer(value, size * 8) return value def store(self, addr, value, size=1): assert 1 <= size assert 0 <= addr assert addr + size <= len(self.memory) value = convert_to_unsigned_integer(value, size * 8) for i in range(size - 1, -1, -1): self.memory[addr + i].set(value & 0xff) value >>= 8 class Byte: def __init__(self): self.value = 0 def __repr__(self): return '0x{:02x}'.format(self.value) def set(self, value): # Signed minimum and unsigned maximum self.value = convert_to_unsigned_integer(value, 8) return self def get(self): return self.value def convert_to_unsigned_integer(value, size): """ :param int size: number of bits containing this integer """ upper_bound = 2 ** size if not (-upper_bound // 2 <= value < upper_bound): msg = '{} is out of range of {} bits'.format(value, size) raise ValueError(msg) all_f_mask = upper_bound - 1 return value & all_f_mask def convert_to_signed_integer(value, size): """ :param int size: number of bits containing this integer """ upper_bound = 2 ** size if not (-upper_bound // 2 <= value < upper_bound): msg = '{} is out of range of {} bits'.format(value, size) raise ValueError(msg) if value >= 0: msb_mask = 1 << (size - 1) if value & msb_mask: value -= upper_bound return value def make_bytes(default, size=None): """ :param int|List[int] default: :param int size: number of bytes in the list, if default is int :return List[Byte]: """ if isinstance(default, int): if size is None: raise ValueError("'size' is not specified when default is int") return [Byte().set(default) for _ in range(size)] bytes = [Byte().set(d) for d in default] if size is not None and len(bytes) != size: raise ValueError("'default' and 'size' are not of the same length") return bytes
29.256281
82
0.567159
748
5,822
4.241979
0.195187
0.031201
0.013867
0.023952
0.270407
0.243303
0.153167
0.135518
0.135518
0.135518
0
0.012389
0.306767
5,822
198
83
29.40404
0.773786
0.137238
0
0.224
0
0
0.061031
0
0
0
0.000825
0
0.064
1
0.184
false
0
0.008
0.04
0.36
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daf21d401b40f3f6a6807cf4ef69459e7b2f3d68
731
py
Python
pynaries/tests.py
marshall/pynaries
52c0376b95de43ad9b910b1cab08c17d3600e070
[ "Apache-2.0" ]
1
2016-05-09T10:08:26.000Z
2016-05-09T10:08:26.000Z
pynaries/tests.py
marshall/pynaries
52c0376b95de43ad9b910b1cab08c17d3600e070
[ "Apache-2.0" ]
null
null
null
pynaries/tests.py
marshall/pynaries
52c0376b95de43ad9b910b1cab08c17d3600e070
[ "Apache-2.0" ]
null
null
null
import unittest from version import Version class VersionTestCase(unittest.TestCase): def testFromString(self): v = Version.fromObject("1.3.3sp1") self.assertEquals(v.major, 1) self.assertEquals(v.minor, '3') self.assertEquals(v.micro, '3sp1') self.assertEquals(Version.getNumericPiece(v.micro), '3') self.assertEquals(Version.getAnnotationPiece(v.micro), 'sp1') self.assertEquals(str(v), "1.3.3sp1") def testComparisons(self): self.assertTrue(Version.fromObject('1.3.3') < Version.fromObject('1.3.3sp1')) self.assertTrue(Version.fromObject('1.4.0') > Version.fromObject('1.3.3sp1')) self.assertTrue(Version.fromObject('1.3.3p1') < Version.fromObject('1.4.0')) if __name__ == '__main__': unittest.main()
34.809524
79
0.730506
99
731
5.313131
0.313131
0.226236
0.239544
0.180608
0.36692
0.325095
0.209125
0.209125
0.209125
0.209125
0
0.05279
0.093023
731
21
80
34.809524
0.740573
0
0
0
0
0
0.096995
0
0
0
0
0
0.529412
1
0.117647
false
0
0.117647
0
0.294118
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
4
daf3263299ef088c986b08d37248e5a9199e71b6
250
py
Python
python/resistor-color-duo/resistor_color_duo.py
Victor-Chinewubeze/algorithms-exercism
34669348762eef69b68a2f43260ab10ac1c4eb2a
[ "MIT" ]
1
2020-04-16T23:06:33.000Z
2020-04-16T23:06:33.000Z
python/resistor-color-duo/resistor_color_duo.py
Victor-Chinewubeze/algorithms-exercism
34669348762eef69b68a2f43260ab10ac1c4eb2a
[ "MIT" ]
7
2021-05-08T11:46:15.000Z
2021-05-10T19:31:11.000Z
python/resistor-color-duo/resistor_color_duo.py
Victor-Chinewubeze/algorithms-exercism
34669348762eef69b68a2f43260ab10ac1c4eb2a
[ "MIT" ]
1
2020-01-09T16:33:39.000Z
2020-01-09T16:33:39.000Z
def value(colors): colorList = ["black","brown","red","orange","yellow","green","blue","violet","grey","white"] code = "" for color in colors: if len(code) < 2: code += str(colorList.index(color)) return int(code)
31.25
96
0.564
31
250
4.548387
0.806452
0
0
0
0
0
0
0
0
0
0
0.005181
0.228
250
7
97
35.714286
0.725389
0
0
0
0
0
0.196
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
daf44bfec449975c869de1732b37b6845adb00a5
4,225
py
Python
WebUtils/Funcs.py
PeaceWorksTechnologySolutions/w4py
74f5a03a63f1a93563502b908474aefaae2abda2
[ "MIT" ]
18
2016-08-01T20:15:59.000Z
2019-12-24T16:00:03.000Z
WebUtils/Funcs.py
WebwareForPython/w4py
bba08f5974d49f5da7e88abe3eeda1037d0824a3
[ "MIT" ]
6
2016-09-13T05:48:45.000Z
2020-01-09T18:29:12.000Z
WebUtils/Funcs.py
WebwareForPython/w4py
bba08f5974d49f5da7e88abe3eeda1037d0824a3
[ "MIT" ]
6
2016-09-16T14:32:29.000Z
2020-01-03T18:52:16.000Z
"""WebUtils.Funcs This module provides some basic functions that are useful in HTML and web development. You can safely import * from WebUtils.Funcs if you like. """ __all__ = [ 'htmlEncode', 'htmlEncodeStr', 'htmlDecode', 'urlEncode', 'urlDecode', 'htmlForDict', 'requestURI', 'normURL'] htmlForNone = '-' # used by htmlEncode htmlCodes = ( ('&', '&amp;'), ('<', '&lt;'), ('>', '&gt;'), ('"', '&quot;'), # ['\n', '<br>'], ) htmlCodesReversed = tuple(reversed(htmlCodes)) def htmlEncode(what, codes=htmlCodes): """Return the HTML encoded version of the given object. The optional 'codes' parameter allows passing custom translations. """ if what is None: return htmlForNone if hasattr(what, 'html'): # allow objects to specify their own translation to html # via a method, property or attribute ht = what.html if callable(ht): ht = ht() return ht what = str(what) return htmlEncodeStr(what, codes) def htmlEncodeStr(s, codes=htmlCodes): """Return the HTML encoded version of the given string. This is useful to display a plain ASCII text string on a web page. The optional 'codes' parameter allows passing custom translations. """ for c, e in codes: s = s.replace(c, e) return s def htmlDecode(s, codes=htmlCodesReversed): """Return the ASCII decoded version of the given HTML string. This does NOT remove normal HTML tags like <p>. It is the inverse of htmlEncode(). The optional 'codes' parameter allows passing custom translations. """ for c, e in codes: s = s.replace(e, c) return s # Aliases for URL encoding and decoding functions: from urllib import quote_plus as urlEncode, unquote_plus as urlDecode def htmlForDict(d, addSpace=None, filterValueCallBack=None, maxValueLength=None, topHeading=None, isEncoded=None): """Return an HTML string with a table where each row is a key-value pair.""" if not d: return '' # A really great (er, bad) example of hardcoding. :-) html = ['<table class="NiceTable">\n'] if topHeading: html.append('<tr class="TopHeading"><th') html.append(('>%s</th><th>%s' if isinstance(topHeading, tuple) else ' colspan="2">%s') % topHeading) html.append('</th></tr>\n') for key in sorted(d): value = d[key] if addSpace and key in addSpace: target = addSpace[key] value = (target + ' ').join(value.split(target)) if filterValueCallBack: value = filterValueCallBack(value, key, d) if maxValueLength and not isEncoded: value = str(value) if len(value) > maxValueLength: value = value[:maxValueLength-3] + '...' key = htmlEncode(key) if not isEncoded: value = htmlEncode(value) html.append('<tr><th style="text-align:left">%s</th><td>%s</td></tr>\n' % (key, value)) html.append('</table>') return ''.join(html) def requestURI(env): """Return the request URI for a given CGI-style dictionary. Uses REQUEST_URI if available, otherwise constructs and returns it from SCRIPT_URL, SCRIPT_NAME, PATH_INFO and QUERY_STRING. """ uri = env.get('REQUEST_URI') if uri is None: uri = env.get('SCRIPT_URL') if uri is None: uri = env.get('SCRIPT_NAME', '') + env.get('PATH_INFO', '') query = env.get('QUERY_STRING', '') if query != '': uri += '?' + query return uri def normURL(path): """Normalizes a URL path, like os.path.normpath. Acts on a URL independent of operating system environment. """ if not path: return initialslash = path[0] == '/' lastslash = path[-1] == '/' comps = path.split('/') newcomps = [] for comp in comps: if comp in ('', '.'): continue if comp != '..': newcomps.append(comp) elif newcomps: newcomps.pop() path = '/'.join(newcomps) if path and lastslash: path += '/' if initialslash: path = '/' + path return path
28.741497
80
0.59574
517
4,225
4.837524
0.355899
0.019992
0.014394
0.020392
0.145542
0.145542
0.145542
0.145542
0.102359
0.102359
0
0.001307
0.27574
4,225
146
81
28.938356
0.816013
0.300592
0
0.068182
0
0.011364
0.11676
0.024895
0
0
0
0
0
1
0.068182
false
0
0.011364
0
0.193182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daf646afbefc8a29534d759d8a3042f9a4ff070a
8,996
py
Python
src/slizzy/main.py
matheushsouza/slizzy
f224b8e4621d11031315da9178202781b4a2dcef
[ "BSD-3-Clause" ]
1
2019-12-24T03:08:12.000Z
2019-12-24T03:08:12.000Z
src/slizzy/main.py
matheushsouza/slizzy
f224b8e4621d11031315da9178202781b4a2dcef
[ "BSD-3-Clause" ]
null
null
null
src/slizzy/main.py
matheushsouza/slizzy
f224b8e4621d11031315da9178202781b4a2dcef
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 import argparse import enum import sys import slizzy.config as config import slizzy.version as version from slizzy.track import Track from slizzy.util import color, logging, time __all__ = [ "module", "all_modules", "slizzy" ] module = enum.Enum("modules", "slider mp3co zippy") all_modules = { module.slider, module.mp3co, module.zippy } def picky_selection(available_downloads, logger): logger.log("\"picky\" flag: Select which entries to download by providing their " + "(comma-separated) indexes in the list below. Alternatively, enter 'A'" + "to download (a)ll entries.", level = logging.level.info) print("\nA. download all entries") template = "{0}. {1:80} | {2:4} | {3:4.2f} MB | {4:3.2f} Kbps" for i, entry in enumerate(available_downloads) : print(template.format( i, entry.name, time.to_str(entry.duration), entry.size, entry.bitrate )) tries = 3 for i in range(tries) : print("\nYour selection: ", end='') try: selection = input().strip() if selection in ['a', 'A', 'all', 'All'] : return available_downloads else : selection = list(map(int, selection.split(','))) tracks_to_download = list(map(available_downloads.__getitem__, selection)) logger.br() return tracks_to_download except Exception as e: if i < tries - 1 : print("Error: " + str(e) + ". Was that a typo?", file = sys.stderr) else : print("Error: " + str(e) + ".", file = sys.stderr) sys.exit(1) def slizzy(track, modules, download_tracks, picky = False, fetch_limit = False): logger = logging.Logger("slizzy") logger.log("Slizzy magic for track '" + track.title + "'", logging.level.info) logger.log("Query string: " + track.query_string, logging.level.info) logger.br() if fetch_limit : logger.log("\"fetch_limit\" flag: a maximum of {} {} will be fetched from each provider.".format( fetch_limit, "files" if fetch_limit > 1 else "file" ), logging.level.info) logger.br() if not track.duration: # Duration not supplied from command line. try: from slizzy.google import google from slizzy.beatport import beatport track.duration = next( # Extract duration from the first matching page. duration for page in google(track, beatport.domain) for duration in [ beatport.get_metadata(track, page) ] if duration ) except StopIteration: logger.log("Track duration unavailable", logging.level.error) return if module.slider in modules: from slizzy.slider import slider slider_downloads = slider(track, fetch_limit) else: slider_downloads = [] if module.mp3co in modules: from slizzy.mp3co import mp3co mp3co_downloads = mp3co(track, fetch_limit) else: mp3co_downloads = [] if module.zippy in modules: from slizzy.google import google from slizzy.zippy import zippy zippy_downloads = [ dl for page in google(track, zippy.domain, fetch_limit) for dl in [ zippy.get_download(track, page) ] if dl ] else: zippy_downloads = [] if module.slider in modules: logger.log( "Selected " + color.result(len(slider_downloads)) + " slider entries.", logging.level.info ) if module.mp3co in modules: logger.log( "Selected " + color.result(len(mp3co_downloads)) + " mp3co entries.", logging.level.info ) if module.zippy in modules: logger.log( "Selected " + color.result(len(zippy_downloads)) + " zippy entries.", logging.level.info ) available_downloads = slider_downloads + mp3co_downloads + zippy_downloads if not available_downloads: logger.log("No entries to download.") return if picky : tracks_to_download = picky_selection(available_downloads, logger) else: tracks_to_download = available_downloads if download_tracks: from slizzy.downloader import download download(tracks_to_download) else: logger.log("Selected urls:\n " + "\n ".join( download.name + " | " + download.link for download in tracks_to_download )) logger.br() logger.finish("Slizzied " + str(len(tracks_to_download)) + " files.") def parse_args(argv): parser = argparse.ArgumentParser( description = "Slizzy is a tool to search for and " "download slider.kz, mp3co.biz and zippyshare objects.", formatter_class = argparse.RawTextHelpFormatter ) parser.add_argument( "--version", "-v", action = "version", version = "\n".join([ "%(prog)s " + version.__version__, "Copyright (c) 2018, gahag.", "All rights reserved." ]) ) commands = parser.add_subparsers(dest = "command", help = "commands") dl = commands.add_parser("dl", help="download tracks") lns = commands.add_parser("lns", help="get download links") cfg = commands.add_parser("cfg", help="config") for command in [ dl, lns ]: command.add_argument( "tracks", help = "one or more tracks to seach, in the format: " "A & B ft. C - ID (D vs. E Remix)", nargs = "+" ) command.add_argument( "-d", "--duration", help = "manually specify the track duration, eliding the beatport search" ) command.add_argument( "--fetch_limit", help = "limits the number of entries fetched from each provider" ) command.add_argument( "--slider", action = "store_true", help = "search in slider.kz instead of all resources" ) command.add_argument( "--mp3co", action = "store_true", help = "search in mp3co.biz instead of all resources" ) command.add_argument( "--zippy", action = "store_true", help = "search only in zippyshare instead of all resources" ) command.add_argument( "--picky", action = "store_true", help = "pick which files to download instead of downloading all eligible files" ) cfg.add_argument("--google-key", help = "set the google API key") cfg.add_argument("--beatport-cx", help = "set the cx API key for the beatport search") cfg.add_argument("--zippyshare-cx", help = "set the cx API key for the zippyshare search") # add arguments for other settings, specially thresholds. if not argv: parser.print_usage() sys.exit(1) args = parser.parse_args(argv) if args.command in [ "dl", "lns" ]: if args.duration: if len(args.tracks) > 1: print( "Error: with the duration parameter, only one track may be specified.", file = sys.stderr ) sys.exit(1) try: args.duration = time.from_str(args.duration) except Exception as e: print("Error: " + str(e) + ".", file = sys.stderr) sys.exit(1) if args.fetch_limit : try: args.fetch_limit = int(args.fetch_limit) except Exception as e: print("Error: " + str(e) + ".", file = sys.stderr) sys.exit(1) if args.fetch_limit <= 0: print( "Error: fetch limit must be an integer greater than zero.", file = sys.stderr ) sys.exit(1) if args.command == "cfg": pass # validate args return args def main(argv): args = parse_args(argv) if args.command in [ "dl", "lns" ]: for i, track in enumerate(args.tracks): try: args.tracks[i] = Track(track, args.duration) except: print("Error: invalid track format '" + track + "'.", file = sys.stderr) sys.exit(1) modules = { m for m, arg in [ (module.slider, args.slider), (module.mp3co, args.mp3co), (module.zippy, args.zippy) ] if arg } or all_modules download_tracks = args.command == "dl" tracks = iter(args.tracks) try: slizzy( next(tracks), modules, download_tracks, fetch_limit = args.fetch_limit, picky = args.picky ) for track in tracks: print(color.yellow(70 * "-")) slizzy(track, modules, download_tracks) except config.ConfigError as e: print("Error (config): " + str(e), file = sys.stderr) sys.exit(2) if args.command == "cfg": if args.google_key: config.cfg["google"]["key"] = args.google_key if args.beatport_cx: config.cfg["beatport"]["cx"] = args.beatport_cx if args.zippyshare_cx: config.cfg["zippyshare"]["cx"] = args.zippyshare_cx try: config.update(config.cfg) except config.ConfigError as e: print("Error (config): " + str(e), file = sys.stderr) sys.exit(2) def cli(): import signal def sigint(sig, frame): print() # Exit progress logging print("Slizzy: interrupted.", file = sys.stderr) sys.exit(130) signal.signal(signal.SIGINT, sigint) main(sys.argv[1:])
25.925072
101
0.619942
1,130
8,996
4.841593
0.215044
0.029245
0.023762
0.026321
0.235971
0.178212
0.139828
0.104551
0.077682
0.056114
0
0.008097
0.258671
8,996
346
102
26
0.812266
0.022343
0
0.277154
0
0.003745
0.200933
0
0
0
0
0
0
1
0.022472
false
0.003745
0.05618
0
0.097378
0.059925
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
daf74249a6fb322cbc113ac8d72e67fa77ee0be4
95
py
Python
tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial3_Solution_f0861370.py
liuxiaomiao123/NeuroMathAcademy
16a7969604a300bf9fbb86f8a5b26050ebd14c65
[ "CC-BY-4.0" ]
2
2020-07-03T04:39:09.000Z
2020-07-12T02:08:31.000Z
tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial3_Solution_f0861370.py
NinaHKivanani/course-content
3c91dd1a669cebce892486ba4f8086b1ef2e1e49
[ "CC-BY-4.0" ]
1
2020-06-22T22:57:03.000Z
2020-06-22T22:57:03.000Z
tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial3_Solution_f0861370.py
NinaHKivanani/course-content
3c91dd1a669cebce892486ba4f8086b1ef2e1e49
[ "CC-BY-4.0" ]
1
2021-08-06T08:05:01.000Z
2021-08-06T08:05:01.000Z
score, evectors, evals = pca(X) with plt.xkcd(): plot_eigenvalues(evals) plt.xlim([0,100])
19
31
0.684211
15
95
4.266667
0.866667
0
0
0
0
0
0
0
0
0
0
0.04878
0.136842
95
5
32
19
0.731707
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
daf767e70655a7695277ba68df583907a9b0cda7
319
py
Python
research_user_interest/model/config.py
sinchir0/research_user_interest
eb073aaae0e1dba0abb7064c1921c9c96671e2b0
[ "MIT" ]
null
null
null
research_user_interest/model/config.py
sinchir0/research_user_interest
eb073aaae0e1dba0abb7064c1921c9c96671e2b0
[ "MIT" ]
null
null
null
research_user_interest/model/config.py
sinchir0/research_user_interest
eb073aaae0e1dba0abb7064c1921c9c96671e2b0
[ "MIT" ]
null
null
null
import luigi class MasterConfig(luigi.Config): model_path: str = luigi.Parameter() embedding_dim: int = luigi.IntParameter() target_title1: str = luigi.Parameter() target_title2: str = luigi.Parameter() target_title1_label: str = luigi.Parameter() target_title2_label: str = luigi.Parameter()
29
48
0.724138
38
319
5.868421
0.447368
0.179372
0.381166
0.309417
0.26009
0
0
0
0
0
0
0.015094
0.169279
319
10
49
31.9
0.826415
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.125
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
4
dafaac4260b03a784e5a2bbeabdd300085af3fb8
74
py
Python
diehard/e_vars.py
skyshy0707/diehard
a6aaf47c728939606158f8d124d6edd7e12ef761
[ "BSL-1.0" ]
null
null
null
diehard/e_vars.py
skyshy0707/diehard
a6aaf47c728939606158f8d124d6edd7e12ef761
[ "BSL-1.0" ]
null
null
null
diehard/e_vars.py
skyshy0707/diehard
a6aaf47c728939606158f8d124d6edd7e12ef761
[ "BSL-1.0" ]
null
null
null
SECRET_KEY='0nk#k)hck^)6y6*+%4(&r$ew+mhubb!trj-cz_0oturh48p*+y' DEBUG=True
37
63
0.72973
16
74
3.25
1
0
0
0
0
0
0
0
0
0
0
0.09589
0.013514
74
2
64
37
0.616438
0
0
0
0
0.5
0.666667
0.666667
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
dafc15205b01c79014e37b7285dde88b163902fd
383
py
Python
hack_chat_related/cynthiabot/word_sources/scratch_4.py
Cynthia7979/tools-programs
0bf0b8bf33205c11d584b216d35c1ca3d23f04f2
[ "MIT" ]
2
2021-02-09T15:58:29.000Z
2021-10-08T04:34:51.000Z
hack_chat_related/cynthiabot/word_sources/scratch_4.py
Cynthia7979/tools-programs
0bf0b8bf33205c11d584b216d35c1ca3d23f04f2
[ "MIT" ]
null
null
null
hack_chat_related/cynthiabot/word_sources/scratch_4.py
Cynthia7979/tools-programs
0bf0b8bf33205c11d584b216d35c1ca3d23f04f2
[ "MIT" ]
null
null
null
f = open('essay_material_word_sources.txt', encoding='utf-16-le') fw = open('essay_material_word_sources_.txt', 'w', encoding='utf-8') output = [] for l in f.readlines(): print(l) if l and (l != ' ') and (l != '\n') and ('【' not in l) and ('】' not in l): fw.write(l.strip(' ')) # print(output[-1]) else: print('no') # print(''.join(output))
29.461538
79
0.545692
58
383
3.482759
0.517241
0.059406
0.168317
0.207921
0.306931
0.306931
0
0
0
0
0
0.013559
0.229765
383
12
80
31.916667
0.671186
0.104439
0
0
0
0
0.262195
0.192073
0
0
0
0
0
1
0
false
0
0
0
0
0.25
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
dafce01a1c13646ea816c979ed62d0361b09fc3a
330
py
Python
simplejson_test/testdata/allennlp/data/tokenizers/tmp.py
rahman-mahmudur/PyART
36591cd10b2b7a560bbcb47a6cf744b72466f92a
[ "Apache-2.0" ]
7
2021-03-20T21:12:51.000Z
2022-01-30T09:26:52.000Z
DataFlowEvaluation/data/allennlp/data/tokenizers/tmp.py
PYART0/PyART-demo
9a889662fb2610b7be2687a8304620855e7c76de
[ "Apache-2.0" ]
null
null
null
DataFlowEvaluation/data/allennlp/data/tokenizers/tmp.py
PYART0/PyART-demo
9a889662fb2610b7be2687a8304620855e7c76de
[ "Apache-2.0" ]
4
2021-03-25T14:10:09.000Z
2022-03-09T09:23:30.000Z
from typing import List from overrides import overrides from allennlp.data.tokenizers.token import Token from allennlp.data.tokenizers.tokenizer import Tokenizer @Tokenizer.register("just_spaces") class WhitespaceTokenizer(Tokenizer): @overrides def tokenize(self, text: str) -> List[Token]: reveal_type(text)
23.571429
56
0.775758
40
330
6.35
0.55
0.094488
0.125984
0.204724
0
0
0
0
0
0
0
0
0.142424
330
14
57
23.571429
0.897527
0
0
0
0
0
0.033233
0
0
0
0
0
0
1
0.111111
false
0
0.444444
0
0.666667
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
daff7eb461dc9d52057318faa8c976ebd75932d5
2,056
py
Python
finitio/types/set_type.py
llambeau/finitio.py
27c2799709993c6edb9d9038290792ed90a97346
[ "0BSD" ]
1
2016-02-06T17:16:22.000Z
2016-02-06T17:16:22.000Z
finitio/types/set_type.py
llambeau/finitio.py
27c2799709993c6edb9d9038290792ed90a97346
[ "0BSD" ]
null
null
null
finitio/types/set_type.py
llambeau/finitio.py
27c2799709993c6edb9d9038290792ed90a97346
[ "0BSD" ]
null
null
null
from ..support.decorators import TypeType from .collection_type import CollectionType from ..exceptions import UndressError @TypeType("set", ['element_type', 'metadata']) class SetType(CollectionType): def _include(self, value, world): if not isinstance(value, set) and not isinstance(value, list): return False if not all(self.element_type.include(v, world) for v in value): return False return True def _m_dress(self, value, monad): if not isinstance(value, set) and not isinstance(value, list): msg = 'set/list expected, got `{0}`'.format(value) return monad.failure(self, [msg, [value]]) def mapper(elm, _idx): return self.element_type.m_dress(elm, monad) def on_failure(causes): msg = "Invalid ${typeName}" return monad.failure(self, [msg, ["Set"]], causes) m = monad.map(list(value), mapper, on_failure) def finalise(_set): duplicates = set([x for x in _set if _set.count(x) > 1]) if len(duplicates): msg = "Duplicate values: `${duplicates}`" dupstr = [str(dup) for dup in duplicates] err = monad.failure(self, [msg, [', '.join(dupstr)]]) def cause_failure(cause): return monad.failure(self, "Invalid Set", [cause]) return err.on_failure(cause_failure) else: return m return m.on_success(finalise) def _undress(self, value, as_type): if not isinstance(as_type, CollectionType): raise UndressError('Unable to undress `{0}` to `{1}' .format(value, as_type)) return super(self, SetType).undress(value, as_type) def low(self): return SetType(self.element_type.low()) def resolve_proxies(self, system): return self.element_type.resolve_proxies(system) def __repr__(self): return '{' + '{0}'.format(self.element_type) + '}'
33.16129
71
0.587549
243
2,056
4.839506
0.312757
0.056122
0.063776
0.056122
0.12415
0.081633
0.081633
0.081633
0.081633
0.081633
0
0.003444
0.293774
2,056
61
72
33.704918
0.806474
0
0
0.090909
0
0
0.075389
0
0
0
0
0
0
1
0.227273
false
0
0.068182
0.113636
0.636364
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
97008b753f686abd12cd11cf56dd253fd134810a
2,120
py
Python
hugs/manager.py
Bogdanp/hugs
e7f16f15369fbe3da11d89882d76c7ef432f3709
[ "BSD-3-Clause" ]
22
2017-07-20T18:02:27.000Z
2021-06-10T13:06:22.000Z
hugs/manager.py
Bogdanp/hugs
e7f16f15369fbe3da11d89882d76c7ef432f3709
[ "BSD-3-Clause" ]
null
null
null
hugs/manager.py
Bogdanp/hugs
e7f16f15369fbe3da11d89882d76c7ef432f3709
[ "BSD-3-Clause" ]
2
2019-12-11T20:44:08.000Z
2021-02-02T04:37:04.000Z
import functools import sqlite3 from .repository import Repository try: from psycopg2.extras import DictRow, RealDictRow ROW_CLASSES = (dict, sqlite3.Row, DictRow, RealDictRow) except ImportError: # pragma: no cover ROW_CLASSES = (dict, sqlite3.Row) class Manager: """Managers extend repositories with the ability to iterate over queries and convert result rows to concrete data types. """ def __init__(self, value_factory=dict, *, repository=None): self.repository = repository or Repository() self.load_queries = self.repository.load_queries self.value_factory = value_factory def __getattr__(self, name): fn = getattr(self.repository, name) if getattr(fn, "is_command", False): return command_runner(fn) return query_iterator(fn, self.value_factory) def command_runner(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): fn(*args, **kwargs) # Grab the cursor late so that if the user forgets to provide # it the TypeError they get will refer to the query function, # not wrapper() itself. cursor = args[0] if cursor.description: result = cursor.fetchone() if isinstance(result, ROW_CLASSES): return result return {col[0]: val for col, val in zip(cursor.description, result)} return None return wrapper def query_iterator(fn, value_factory): @functools.wraps(fn) def wrapper(*args, **kwargs): fn(*args, **kwargs) # Grab the cursor late so that if the user forgets to provide # it the TypeError they get will refer to the query function, # not wrapper() itself. cursor = args[0] while True: results = cursor.fetchmany() if not results: break for result in results: if not isinstance(result, ROW_CLASSES): result = {col[0]: val for col, val in zip(cursor.description, result)} yield value_factory(**result) return wrapper
29.859155
90
0.628774
257
2,120
5.089494
0.357977
0.055046
0.036697
0.03211
0.365443
0.328746
0.328746
0.328746
0.328746
0.328746
0
0.005305
0.288679
2,120
70
91
30.285714
0.862069
0.197642
0
0.227273
0
0
0.005952
0
0
0
0
0
0
1
0.136364
false
0
0.113636
0
0.431818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
970230cccc075c9262639a742440eb46e3401e42
1,922
py
Python
src/Application/PythonScriptModule/pymodules_old/apitest/thread_test.py
antont/tundra
5c9b0a3957071f08ab425dff701cdbb34f9e1868
[ "Apache-2.0" ]
1
2018-04-02T15:38:10.000Z
2018-04-02T15:38:10.000Z
src/Application/PythonScriptModule/pymodules_old/apitest/thread_test.py
antont/tundra
5c9b0a3957071f08ab425dff701cdbb34f9e1868
[ "Apache-2.0" ]
null
null
null
src/Application/PythonScriptModule/pymodules_old/apitest/thread_test.py
antont/tundra
5c9b0a3957071f08ab425dff701cdbb34f9e1868
[ "Apache-2.0" ]
1
2021-09-04T12:37:34.000Z
2021-09-04T12:37:34.000Z
import time from collections import deque try: import rexviewer as r except ImportError: #not running under rex import mockviewer as r import circuits from circuits.core import handler class TestThread(circuits.Thread): """ A threaded circuits component inside Naali, now testing how this behaves, as can be needed for e.g. webdav inv """ def __init__(self): circuits.Thread.__init__(self) self.commands = deque() self.start() #gets events from the manager in the main thread @handler("on_input") #why is this needed for Thread, but not for Component? def on_input(self, evid): print "Test Thread received input event:", evid #this is executed in a separate thread def run(self): while 1: time.sleep(1) print "I'm a lazy sleeper. And even a blocking one." if len(self.commands) > 0: print self.commands c = self.commands.popleft() if c == "stop": self.stop() return #when this worker thread stops itself def stop(self): circuits.Thread.stop(self) print "stopped" #when the manager is stopped @handler("stopped") def stopped(self, caller): self.commands.append("stop") if __name__ == '__main__': from circuits import Event, Manager, Debugger class Input(Event): pass m = Manager() c = TestThread() m += c + Debugger() now = time.time() interval = 0.5 prev_time = now try: while 1: now = time.time() if prev_time + interval < now: m.push(Input(1), "on_input") prev_time = now while m: m.flush() except KeyboardInterrupt: print "stopping" m.stop() while m: m.flush() c.join()
24.961039
79
0.570239
237
1,922
4.531646
0.421941
0.055866
0.03352
0.022346
0
0
0
0
0
0
0
0.005529
0.341311
1,922
76
80
25.289474
0.842812
0.114984
0
0.218182
0
0
0.083706
0
0
0
0
0
0
0
null
null
0.018182
0.145455
null
null
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
970296f462844f00061a21409ca5bc6695b20b6a
5,113
py
Python
PlotlyandPython/Lessons/(02) Lineplots/Notebooks/Python Scripts/Lineplots (07) - Making dashed and dotted lines.py
peternewman22/Python_Courses
07a798b6f264fc6069eb1205c9d429f00fb54bc5
[ "MIT" ]
null
null
null
PlotlyandPython/Lessons/(02) Lineplots/Notebooks/Python Scripts/Lineplots (07) - Making dashed and dotted lines.py
peternewman22/Python_Courses
07a798b6f264fc6069eb1205c9d429f00fb54bc5
[ "MIT" ]
null
null
null
PlotlyandPython/Lessons/(02) Lineplots/Notebooks/Python Scripts/Lineplots (07) - Making dashed and dotted lines.py
peternewman22/Python_Courses
07a798b6f264fc6069eb1205c9d429f00fb54bc5
[ "MIT" ]
null
null
null
# coding: utf-8 # # Lineplots (07) - Making dashed and dotted lines # In the last lesson we saw how to change the colour and thickness of the lines in our plot. # # In this lesson we'll learn how to displayed a dashed or dotted line instead of a solid line. In the next lesson we'll find out how to set the marker symbol for each trace. Changing these two properties in addition to manipulating the colour and line width gives us the ability to distinguish between a large number of different data items. # ## Module Imports # In[1]: #plotly.offline doesn't push your charts to the clouds import plotly.offline as pyo #allows us to create the Data and Figure objects from plotly.graph_objs import * #plotly.plotly pushes your charts to the cloud import plotly.plotly as py #pandas is a data analysis library import pandas as pd from pandas import DataFrame # In[2]: #lets us see the charts in an iPython Notebook pyo.offline.init_notebook_mode() # run at the start of every ipython # ## Getting the data # We'll get the data from the same source as the previous lesson. # In[3]: expenseData = pd.read_csv("http://richard-muir.com/data/public/csv/NumberOfMPsExpenseClaims_2010-2015.csv") # In[4]: expenseData.head(5) # ## Plotting the data # # We'll plot the data for the different years using a For loop: # In[5]: traces = [] for i in range(2010, 2016): traces.append({'type' : 'scatter', 'x' : expenseData['month'], 'y' : expenseData['NumberOfClaims' + str(i)], 'name' : i, 'mode' : 'lines'}) # In[6]: data = Data(traces) # In[7]: layout = {'title' : 'Number of expenses by month for 2010 - 2015', 'xaxis' : {'title' : 'Month'}, 'yaxis' : {'title' : 'Yaxis title1'}} # In[8]: fig = Figure(data = data, layout = layout) pyo.iplot(fig) py.image.save_as(fig, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(02) Lineplots\Notebooks\images\Lineplots (07) - Making dashed and dotted lines\pyo.iplot-0.png") # # ## Changing the solidity of a line # We can change the solidity of a line by using the <code>'dash'</code> option in the <code>'line'</code> dictionary in the trace: # ````python # trace = {'type' : 'scatter', # 'line' : {'dash' : <Dash string/Dash length in pixels/Variable>} # ```` # # Some valid dash strings are: # - <code>'solid'</code> # - <code>'dash'</code> # - <code>'dot'</code> # - <code>'dashdot'</code> # # Here's what the dash strings look like. # # I'm using a list of dash strings to contain the possible values. # # I'm using the enumerate function and floor division to give me the number of the index to pass to the list. The <code>enumerate()</code> creates a variable which holds the value of each item's index in the list in addition to the variable which holds the actual value of the variable. This value starts at 0 for the first item and increments by one for each subsequent item. # In[9]: dashes = ['dash', 'dot', 'dashdot'] for i, yr in enumerate(range(2010, 2016)): print(i, yr) # In[10]: for i, yr in enumerate(range(2010, 2016)): print(dashes[(i//2)]) # Now I'll apply this to our loop which creates the traces: # In[11]: traces = [] for i, yr in enumerate(range(2010, 2016)): traces.append({'type' : 'scatter', 'x' : expenseData['month'], 'y' : expenseData['NumberOfClaims' + str(yr)], 'line' : {'dash' : dashes[i//2]}, 'name' : yr, 'mode' : 'lines'}) # Refreshing the Data and Figure objects to see the effect on the chart: # In[12]: data = Data(traces) fig = Figure(data = data, layout = layout) pyo.iplot(fig) py.image.save_as(fig, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(02) Lineplots\Notebooks\images\Lineplots (07) - Making dashed and dotted lines\pyo.iplot-1.png") # # Let's push this chart to the cloud. It's not quite production-ready, but it's good to record our progress! # In[13]: py.plot(fig, filename = "MP expenses by month 2010-2015 (Line solidity)", filopt="overwrite") py.image.save_as(fig, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(02) Lineplots\Notebooks\images\Lineplots (07) - Making dashed and dotted lines\py.plot-0.png") # # ### Changing line solidity - what have we learnt? # # We've seen how to manipulate the value of the <code>'dash'</code> key in the <code>'line'</code> dictionary to change the solidity of the line. We can use a string such as <code>'dashdot'</code> or set the dash length to be an integer number of pixels. We also utilised the <code>enumerate()</code> function to help us choose an item from a list by index and value. # # In the next lesson we'll find out how to specify that Plotly shows the marker in addition to the line. We'll also see how to change the marker symbol. # If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
32.157233
376
0.687463
819
5,113
4.282051
0.332112
0.011406
0.01939
0.026233
0.305389
0.282007
0.26661
0.256059
0.251212
0.231252
0
0.023239
0.192059
5,113
158
377
32.360759
0.825708
0.557207
0
0.414634
0
0.073171
0.414155
0.157534
0
0
0
0
0
1
0
false
0.073171
0.121951
0
0.121951
0.04878
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
970361b69af566e027101b9098d4da56f00db470
3,344
py
Python
spydrnet/__init__.py
ganeshgore/spydrnet
22672b8fc7d63461a71077bd20f29df6d38e96f4
[ "BSD-3-Clause" ]
null
null
null
spydrnet/__init__.py
ganeshgore/spydrnet
22672b8fc7d63461a71077bd20f29df6d38e96f4
[ "BSD-3-Clause" ]
null
null
null
spydrnet/__init__.py
ganeshgore/spydrnet
22672b8fc7d63461a71077bd20f29df6d38e96f4
[ "BSD-3-Clause" ]
null
null
null
""" SpyDrNet ======== SpyDrNet is an EDA tool for analyzing and transforming netlists. See https://byuccl.github.io/spydrnet for more details. """ import importlib import pkgutil import pathlib import sys import os discovered_plugins = { name: importlib.import_module(name) for finder, name, ispkg in pkgutil.iter_modules() if name.startswith('spydrnet_') } print("Installed Plugins", discovered_plugins.keys()) def get_active_plugins(): active_plugins = {} config_file = os.path.join(pathlib.Path.home(), ".spydrnet") if os.path.isfile(config_file): for plugin in open(config_file, "r").read().split(): if discovered_plugins.get(plugin, None): active_plugins.update({plugin: discovered_plugins[plugin]}) else: print("Plugin %s is not installed " % plugin) else: with open(config_file, "w") as fp: fp.write("\n".join(discovered_plugins.keys())) active_plugins.update(discovered_plugins) return active_plugins print("Active Plugins", get_active_plugins().keys()) # Release data from spydrnet import release __author__ = '%s <%s>\n%s <%s>\n%s <%s>' % \ (release.authors['Keller'] + release.authors['Skouson'] + release.authors['Wirthlin']) __license__ = release.license __date__ = release.date __version__ = release.version __release__ = release.release from spydrnet.ir import * from spydrnet.util.hierarchical_reference import HRef OUT = Port.Direction.OUT IN = Port.Direction.IN INOUT = Port.Direction.INOUT UNDEFINED = Port.Direction.UNDEFINED from spydrnet.util.selection import INSIDE, OUTSIDE, BOTH, ALL from spydrnet.testing.test import run as test from spydrnet.parsers import parse from spydrnet.composers import compose from spydrnet.plugins import namespace_manager from spydrnet.util import get_netlists, get_libraries, get_definitions, get_ports, get_cables, get_instances,\ get_wires, get_pins from spydrnet.util import get_hinstances, get_hports, get_hpins, get_hcables, get_hwires import os base_dir = os.path.dirname(os.path.abspath(__file__)) import glob example_netlist_names = list() for filename in glob.glob(os.path.join(base_dir, 'support_files', 'EDIF_netlists', "*")): basename = os.path.basename(filename) example_netlist_names.append(basename[:basename.index('.')]) example_netlist_names.sort() # logger for the module import logging import sys LOG_FORMAT = "%(levelname)5s %(filename)s:%(lineno)s (%(threadName)10s) - %(message)s" logger = logging.getLogger('spydrnet_logs') logger.setLevel(logging.DEBUG) stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setLevel(logging.INFO) stream_handler.setFormatter(logging.Formatter(LOG_FORMAT)) logger.addHandler(stream_handler) def enable_file_logging(LOG_LEVEL=None, filename=""): LOG_LEVEL = LOG_LEVEL or "INFO" file_handler = logging.FileHandler("_" + filename + "_spydrnet.log", mode='w') file_handler.setFormatter(logging.Formatter(LOG_FORMAT)) file_handler.setLevel(eval(f"logging.{LOG_LEVEL}")) logger.addHandler(file_handler) return file_handler def load_example_netlist_by_name(name): assert name in example_netlist_names, "Example netlist not found" return parse(os.path.join(base_dir, 'support_files', 'EDIF_netlists', name + ".edf.zip"))
30.962963
110
0.739833
440
3,344
5.395455
0.370455
0.050548
0.026959
0.00337
0.09604
0.071609
0.034541
0.034541
0.034541
0
0
0.001047
0.142943
3,344
107
111
31.252336
0.827285
0.052632
0
0.078947
0
0.013158
0.106046
0.007281
0
0
0
0
0.013158
1
0.039474
false
0
0.263158
0
0.342105
0.039474
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9703d55b61dbe9671b606d116415a69bd78ce48a
620
py
Python
tools/sparse_dense_size_comparison.py
3rror/aca_pathfinding_project
d7a91b1491e8eca08849e526634bdaa9a3833fa2
[ "MIT" ]
null
null
null
tools/sparse_dense_size_comparison.py
3rror/aca_pathfinding_project
d7a91b1491e8eca08849e526634bdaa9a3833fa2
[ "MIT" ]
null
null
null
tools/sparse_dense_size_comparison.py
3rror/aca_pathfinding_project
d7a91b1491e8eca08849e526634bdaa9a3833fa2
[ "MIT" ]
null
null
null
# Compare memory usage of a dense and a sparse adjancency matrix. # # Requires numpy. Install it with `pip3 install --user numpy` # Authors: Gianluca Andreotti, Aurora Lucrezia Castro import numpy as np from scipy.sparse import csr_matrix import sys def load_matrix(file): matrix = np.loadtxt(file, dtype=int, ndmin=2) print("Nodes: " + str(len(matrix))) print(f"Dense matrix: {matrix.nbytes / 1000}mb") sparse_csr_mat = csr_matrix(matrix) print(f"Sparse matrix: {sparse_csr_mat.data.nbytes / 1000}mb") print("") if __name__ == "__main__": for f in sys.argv[1:]: load_matrix(f)
25.833333
66
0.696774
93
620
4.473118
0.591398
0.043269
0.057692
0
0
0
0
0
0
0
0
0.021825
0.187097
620
23
67
26.956522
0.803571
0.282258
0
0
0
0
0.238636
0.061364
0
0
0
0
0
1
0.076923
false
0
0.230769
0
0.307692
0.307692
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
97062759c3659ac5db7690deb35f3812cc14b685
5,298
py
Python
app/crypto_algo.py
paul3bin/cryptography-api
13145ef897dcd3e5d4850bbf9f3572556a055e22
[ "MIT" ]
null
null
null
app/crypto_algo.py
paul3bin/cryptography-api
13145ef897dcd3e5d4850bbf9f3572556a055e22
[ "MIT" ]
null
null
null
app/crypto_algo.py
paul3bin/cryptography-api
13145ef897dcd3e5d4850bbf9f3572556a055e22
[ "MIT" ]
null
null
null
from pandas import DataFrame class CaesarCipher: def __init__(self, text, key) -> None: self.text = text self.key = key def decrypt(self): decipheredText = "" for ch in self.text.upper(): if ch.isalpha(): newchar = ord(ch)-self.key if newchar < ord('A'): newchar += 26 newShiftedALphabet = chr(newchar) decipheredText += newShiftedALphabet return decipheredText.upper() def encrypt(self): cipherText = "" for ch in self.text.upper(): if ch.isalpha(): alphabetInString = ord(ch) + self.key if alphabetInString > ord('Z'): alphabetInString -= 26 shiftedAlphabet = chr(alphabetInString) cipherText += shiftedAlphabet return cipherText.upper() class VigenereCipher: # Initialization funtion def __init__(self, text, key): # converting key and text to uppercase and removing spaces from them. self.text = "".join(text.upper().split(' ')) self.key = "".join(key.upper().split(' ')) # Function that encrypts the given plain text using given key. def encrypt(self): cipher_text, j = [], 0 for i in range(len(self.text)): if j > len(self.key)-1: j = 0 character = ((ord(self.text[i])+ord(self.key[j])) % 26)+ord('A') j += 1 cipher_text.append(chr(character)) return ''.join(cipher_text) # Function that decrypts the given plain text using given key. def decrypt(self): plain_text, j = [], 0 for i in range(len(self.text)): if j > len(self.key)-1: j = 0 character = ((ord(self.text[i])-ord(self.key[j])+26) % 26)+ord('A') plain_text.append(chr(character)) j += 1 return ''.join(plain_text) class MorseCode: def __init__(self, message: str) -> None: self.message = message.upper() # Function that returns value or key from morse_dict dictionary def getDictItems(self, val, option): morse_dict = {'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '0': '-----', '1': '.----', '2': '..--', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '.': '.-.-.-', ',': '--..--', '?': '..--..', '!': '-.-.--', '/': '-..-.', '(': '-.--.', ')': '-.--.-', '&': '.-...', ':': '---...', ';': '-.-.-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '_': '..--.-', '$': '...-..-', '@': '.--.-.'} operation = {1: morse_dict, 2: list(morse_dict.keys())} if option == 1: return operation[option][val] else: return operation[option][list(morse_dict.values()).index(val)] # Function to encrypt given message def encrypt(self): return "".join(['/ ' if character == ' ' else f'{self.getDictItems(character, 1)} ' for character in self.message]) # Function to decrypt given cipher text def decrypt(self): return "".join([' ' if character == '/' else f'{self.getDictItems(character, 2)}' for character in self.message.split(' ')]) class RunningKeyCipher: def __init__(self, plainText, key): # converting the plain text and key to upper case and removing spaces self.pt = "".join(plainText.upper().split(' ')) self.ky = "".join(key.upper().split(' ')) # creating a DataFrame of size 26x26 tab, tableau = [chr(a) for a in range(65, 91)], [] for i in range(26): row = tab[i:]+tab[:i] tableau.append(row) self.tabulaRecta = DataFrame(tableau, index=tab, columns=tab) def encrypt(self): encryptedText = '' for i in range(len(self.pt)): encryptedText += self.tabulaRecta.values[ord( self.pt[i])-65][ord(self.ky[i])-65] return encryptedText def decrypt(self): decryptedText = '' for i in range(len(self.pt)): decryptedText += ''.join( self.tabulaRecta[self.tabulaRecta[self.ky[i]] == self.pt[i]].index.values) return decryptedText class ROT13: def __init__(self, text: str): # removing spaces from the text self.text = "".join(text.upper().split(' ')) def encrypt(self): return "".join([chr(ord(alphabet)-13) if ord(alphabet) > ord('A')+12 else chr(ord(alphabet)+13) for alphabet in self.text]) def decrypt(self): return "".join([chr(ord(alphabet)-13) if ord(alphabet) > ord('A')+12 else chr(ord(alphabet)+13) for alphabet in self.text])
35.32
118
0.466214
554
5,298
4.400722
0.236462
0.045939
0.022559
0.022559
0.337982
0.283019
0.251846
0.235439
0.208368
0.182937
0
0.017001
0.322763
5,298
149
119
35.557047
0.662486
0.090411
0
0.291262
0
0
0.074667
0.012063
0.009709
0
0
0
0
1
0.15534
false
0
0.009709
0.038835
0.330097
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
97072840b3c078e668d6a47321880a4ea8713698
2,638
py
Python
MultiPManager/brokermq.py
sebastiantrianac/SoftTLON
3b798393efbd49b4a22acf19a056d64bef1d0ddf
[ "MIT" ]
null
null
null
MultiPManager/brokermq.py
sebastiantrianac/SoftTLON
3b798393efbd49b4a22acf19a056d64bef1d0ddf
[ "MIT" ]
null
null
null
MultiPManager/brokermq.py
sebastiantrianac/SoftTLON
3b798393efbd49b4a22acf19a056d64bef1d0ddf
[ "MIT" ]
null
null
null
#!/usr/bin/env python # coding=utf-8 # # A module for create a multi-agent system over Ad-hoc networks # Copyright (C) 2017-2018 # Juan Sebastian Triana Correa <justrianaco@unal.edu.co> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. __author__ = "Juan Sebastian Triana Correa" __copyright__ = "Copyright 2015, TLON group" __license__ = "LGPL" __version__ = "1.2" __email__ = "justrianaco@unal.edu.co" __status__ = "Development" import sys import stomp import dill as pickle import MultiPManager.managerImp as managerImp import MultiPManager.multiProc as multiProc AUTHKEY = '' tlon_resources = {} def BrokerConnect(ip, port): conn = stomp.Connection([(ip, port)]) conn.start() conn.connect(wait=True) return conn class __resourceTopicListener__(stomp.ConnectionListener): def on_error(self, headers, message): print('Received an error {}'.format(message)) def on_message(self, headers, message): global tlon_resources #print('Received a message {}'.format(message)) #print('Request for resource {}'.format(message)) if sys.version_info[0]<3: tmp = pickle.loads(message) else: tmp = pickle.loads(message.encode()) tlon_resources[tmp.__name__] = tmp class __ordersTopicListener__(stomp.ConnectionListener): def on_error(self, headers, message): print('Received an error {}'.format(message)) def on_message(self, headers, message): global tlon_resources if sys.version_info[0]<3: tmp = pickle.loads(message) else: tmp = pickle.loads(message.encode()) print("{},{},{}".format(tmp['ip'], tmp['portnum'], tmp['authkey'])) print(tmp) if tmp['resourceName'] in tlon_resources: manager = managerImp.make_client_manager(tmp['ip'], tmp['portnum'], tmp['authkey']) job_q = manager.get_job_q() result_q = manager.get_result_q() multiProc.tlon_multiprocessing(job_q, result_q, tlon_resources[tmp['resourceName']])
33.820513
96
0.689917
344
2,638
5.119186
0.453488
0.044293
0.040886
0.0477
0.303805
0.289608
0.230551
0.230551
0.230551
0.230551
0
0.009497
0.201668
2,638
77
97
34.25974
0.826686
0.331691
0
0.363636
0
0
0.114237
0.013203
0
0
0
0
0
1
0.113636
false
0
0.113636
0
0.295455
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9707987edbffdd07707988c15199cd2d6df39cc8
1,289
py
Python
web/models.py
naturaln0va/flask-ackermann-io
85916aaf93b0635f18f164da3e2c6ce86edadfc0
[ "MIT" ]
1
2019-12-09T07:26:43.000Z
2019-12-09T07:26:43.000Z
web/models.py
naturaln0va/flask-ackermann-io
85916aaf93b0635f18f164da3e2c6ce86edadfc0
[ "MIT" ]
4
2019-06-17T15:21:28.000Z
2021-03-20T05:05:42.000Z
web/models.py
naturaln0va/flask-ackermann-io
85916aaf93b0635f18f164da3e2c6ce86edadfc0
[ "MIT" ]
null
null
null
from app import db from datetime import datetime as dt from slugify import slugify class Post(db.Model): id = db.Column(db.Integer, primary_key = True) title = db.Column(db.String, nullable=False) description = db.Column(db.String, nullable=False) content = db.Column(db.String, nullable=False) slug = db.Column(db.String, nullable=False) timestamp = db.Column(db.DateTime, nullable=False) draft = db.Column(db.Boolean, default=True) category_id = db.Column(db.Integer, db.ForeignKey('category.id')) category = db.relationship('Category', backref=db.backref('posts', lazy='dynamic')) def __init__(self, title, description, content, category=None): self.title = title self.description = description self.content = content self.timestamp = dt.utcnow() self.slug = slugify(title) self.category = category def __repr__(self): return '<Post [%r] %r - %r>' % self.slug % self.title % self.category def url(self): return '/posts/' + self.slug class Category(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(50)) def __init__(self, name): self.name = name def __repr__(self): return '<Category %r>' % self.name
32.225
87
0.657099
170
1,289
4.870588
0.270588
0.096618
0.120773
0.096618
0.259662
0.236715
0.096618
0.096618
0.096618
0.096618
0
0.001969
0.211792
1,289
40
88
32.225
0.812992
0
0
0.129032
0
0
0.054306
0
0
0
0
0
0
1
0.16129
false
0
0.096774
0.096774
0.774194
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
9707bc0df9d0b4b62a5f4981d1500fdead7c395d
672
py
Python
tests/span/span_helpers.py
dmytroxshevchuk/sonic-mgmt
b784e598a063aba71fdf9ca23fe142840926e4cf
[ "Apache-2.0" ]
132
2016-10-19T12:34:44.000Z
2022-03-16T09:00:39.000Z
tests/span/span_helpers.py
dmytroxshevchuk/sonic-mgmt
b784e598a063aba71fdf9ca23fe142840926e4cf
[ "Apache-2.0" ]
3,152
2016-09-21T23:05:58.000Z
2022-03-31T23:29:08.000Z
tests/span/span_helpers.py
dmytroxshevchuk/sonic-mgmt
b784e598a063aba71fdf9ca23fe142840926e4cf
[ "Apache-2.0" ]
563
2016-09-20T01:00:15.000Z
2022-03-31T22:43:54.000Z
''' Helper functions for span tests ''' import ptf.testutils as testutils def send_and_verify_mirrored_packet(ptfadapter, src_port, monitor): ''' Send packet from ptf and verify it on monitor port Args: ptfadapter: ptfadapter fixture src_port: ptf port index, from which packet will be sent monitor: ptf port index, where packet will be verified on ''' src_mac = ptfadapter.dataplane.get_mac(0, src_port) pkt = testutils.simple_icmp_packet(eth_src=src_mac, eth_dst='ff:ff:ff:ff:ff:ff') ptfadapter.dataplane.flush() testutils.send(ptfadapter, src_port, pkt) testutils.verify_packet(ptfadapter, pkt, monitor)
29.217391
84
0.71875
96
672
4.864583
0.4375
0.042827
0.051392
0.051392
0.025696
0
0
0
0
0
0
0.001848
0.19494
672
22
85
30.545455
0.861368
0.36756
0
0
0
0
0.044041
0
0
0
0
0
0
1
0.142857
false
0
0.142857
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
97089962ed3ea0a8a6e34e92416d3e39210a2bda
9,039
py
Python
bonobo/nodes/basics.py
a-musing-moose/bonobo
b3e9ddd028aed4a8a4df8e4b89334951a343e6fa
[ "Apache-2.0" ]
null
null
null
bonobo/nodes/basics.py
a-musing-moose/bonobo
b3e9ddd028aed4a8a4df8e4b89334951a343e6fa
[ "Apache-2.0" ]
null
null
null
bonobo/nodes/basics.py
a-musing-moose/bonobo
b3e9ddd028aed4a8a4df8e4b89334951a343e6fa
[ "Apache-2.0" ]
null
null
null
import functools import html import itertools import pprint from bonobo import settings from bonobo.config import Configurable, Option, Method, use_raw_input, use_context, use_no_input from bonobo.config.functools import transformation_factory from bonobo.config.processors import ContextProcessor, use_context_processor from bonobo.constants import NOT_MODIFIED from bonobo.util.objects import ValueHolder from bonobo.util.term import CLEAR_EOL from mondrian import term __all__ = [ 'FixedWindow', 'Format', 'Limit', 'OrderFields', 'PrettyPrinter', 'Rename', 'SetFields', 'Tee', 'UnpackItems', 'count', 'identity', 'noop', ] def identity(x): return x class Limit(Configurable): """ Creates a Limit() node, that will only let go through the first n rows (defined by the `limit` option), unmodified. .. attribute:: limit Number of rows to let go through. TODO: simplify into a closure building factory? """ limit = Option(positional=True, default=10) @ContextProcessor def counter(self, context): yield ValueHolder(0) def __call__(self, counter, *args, **kwargs): counter += 1 if counter <= self.limit: yield NOT_MODIFIED def Tee(f): from bonobo.constants import NOT_MODIFIED @functools.wraps(f) def wrapped(*args, **kwargs): nonlocal f f(*args, **kwargs) return NOT_MODIFIED return wrapped def _shorten(s, w): if w and len(s) > w: s = s[0:w - 3] + '...' return s class PrettyPrinter(Configurable): max_width = Option( int, default=term.get_size()[0], required=False, __doc__=''' If set, truncates the output values longer than this to this width. ''' ) filter = Method( default= (lambda self, index, key, value: (value is not None) and (not isinstance(key, str) or not key.startswith('_'))), __doc__=''' A filter that determine what to print. Default is to ignore any key starting with an underscore and none values. ''' ) @ContextProcessor def context(self, context): context.setdefault('_jupyter_html', None) yield context if context._jupyter_html is not None: from IPython.display import display, HTML display(HTML('\n'.join(['<table>'] + context._jupyter_html + ['</table>']))) def __call__(self, context, *args, **kwargs): if not settings.QUIET: if term.isjupyter: self.print_jupyter(context, *args, **kwargs) return NOT_MODIFIED if term.istty: self.print_console(context, *args, **kwargs) return NOT_MODIFIED self.print_quiet(context, *args, **kwargs) return NOT_MODIFIED def print_quiet(self, context, *args, **kwargs): for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())): if self.filter(index, key, value): print(self.format_quiet(index, key, value, fields=context.get_input_fields())) def format_quiet(self, index, key, value, *, fields=None): # XXX should we implement argnames here ? return ' '.join(((' ' if index else '-'), str(key), ':', str(value).strip())) def print_console(self, context, *args, **kwargs): print('\u250c') for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())): if self.filter(index, key, value): print(self.format_console(index, key, value, fields=context.get_input_fields())) print('\u2514') def format_console(self, index, key, value, *, fields=None): fields = fields or [] if not isinstance(key, str): if len(fields) > key and str(key) != str(fields[key]): key = '{}{}'.format(fields[key], term.lightblack('[{}]'.format(key))) else: key = str(index) prefix = '\u2502 {} = '.format(key) prefix_length = len(prefix) def indent(text, prefix): for i, line in enumerate(text.splitlines()): yield (prefix if i else '') + line + CLEAR_EOL + '\n' repr_of_value = ''.join( indent(pprint.pformat(value, width=self.max_width - prefix_length), '\u2502' + ' ' * (len(prefix) - 1)) ).strip() return '{}{}{}'.format(prefix, repr_of_value.replace('\n', CLEAR_EOL + '\n'), CLEAR_EOL) def print_jupyter(self, context, *args): if not context._jupyter_html: context._jupyter_html = [ '<thead><tr>', *map('<th>{}</th>'.format, map(html.escape, map(str, context.get_input_fields() or range(len(args))))), '</tr></thead>', ] context._jupyter_html += [ '<tr>', *map('<td>{}</td>'.format, map(html.escape, map(repr, args))), '</tr>', ] @use_no_input def noop(*args, **kwargs): return NOT_MODIFIED class FixedWindow(Configurable): """ Transformation factory to create fixed windows of inputs, as lists. For example, if the input is successively 1, 2, 3, 4, etc. and you pass it through a ``FixedWindow(2)``, you'll get lists of elements 2 by 2: [1, 2], [3, 4], ... """ length = Option(int, positional=True) # type: int @ContextProcessor def buffer(self, context): buffer = yield ValueHolder([]) if len(buffer): last_value = buffer.get() last_value += [None] * (self.length - len(last_value)) context.send(*last_value) @use_raw_input def __call__(self, buffer, bag): buffer.append(bag) if len(buffer) >= self.length: yield tuple(buffer.get()) buffer.set([]) @transformation_factory def OrderFields(fields): """ Transformation factory to reorder fields in a data stream. :param fields: :return: callable """ fields = list(fields) @use_context @use_raw_input def _OrderFields(context, row): nonlocal fields context.setdefault('remaining', None) if not context.output_type: context.remaining = list(sorted(set(context.get_input_fields()) - set(fields))) context.set_output_fields(fields + context.remaining) yield tuple(row.get(field) for field in context.get_output_fields()) return _OrderFields @transformation_factory def SetFields(fields): """ Transformation factory that sets the field names on first iteration, without touching the values. :param fields: :return: callable """ @use_context @use_no_input def _SetFields(context): nonlocal fields if not context.output_type: context.set_output_fields(fields) return NOT_MODIFIED return _SetFields @transformation_factory def UnpackItems(*items, fields=None, defaults=None): """ >>> UnpackItems(0) :param items: :param fields: :param defaults: :return: callable """ defaults = defaults or {} @use_context @use_raw_input def _UnpackItems(context, bag): nonlocal fields, items, defaults if fields is None: fields = () for item in items: fields += tuple(bag[item].keys()) context.set_output_fields(fields) values = () for item in items: values += tuple(bag[item].get(field, defaults.get(field)) for field in fields) return values return _UnpackItems @transformation_factory def Rename(**translations): # XXX todo handle duplicated fields = None translations = {v: k for k, v in translations.items()} @use_context @use_raw_input def _Rename(context, bag): nonlocal fields, translations if not fields: fields = tuple(translations.get(field, field) for field in context.get_input_fields()) context.set_output_fields(fields) return NOT_MODIFIED return _Rename @transformation_factory def Format(**formats): fields, newfields = None, None @use_context @use_raw_input def _Format(context, bag): nonlocal fields, newfields, formats if not context.output_type: fields = context.input_type._fields newfields = tuple(field for field in formats if not field in fields) context.set_output_fields(fields + newfields) return tuple( formats[field].format(**bag._asdict()) if field in formats else bag.get(field) for field in fields + newfields ) return _Format def _count(self, context): counter = yield ValueHolder(0) context.send(counter.get()) @use_no_input @use_context_processor(_count) def count(counter): counter += 1
27.557927
120
0.605819
1,056
9,039
5.042614
0.214962
0.022535
0.021972
0.01784
0.212207
0.176338
0.073615
0.073615
0.058592
0.039437
0
0.005682
0.279566
9,039
327
121
27.642202
0.812039
0.093263
0
0.229358
0
0
0.060144
0
0
0
0
0.006116
0
1
0.133028
false
0
0.06422
0.013761
0.316514
0.059633
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9709d0267ed5104b57450430822594c17f2e9707
1,093
py
Python
examplesimple.py
atxarib99/kNN
20c41a549f9f101478c1523b2ba683a8058fa6c8
[ "MIT" ]
null
null
null
examplesimple.py
atxarib99/kNN
20c41a549f9f101478c1523b2ba683a8058fa6c8
[ "MIT" ]
null
null
null
examplesimple.py
atxarib99/kNN
20c41a549f9f101478c1523b2ba683a8058fa6c8
[ "MIT" ]
null
null
null
''' This file serves to be an example on how to use kNN. This file is the simplified version with no graphs. For an example with graphs, check example.py Change k, noise, and amount of data to see how accuracy is affected. ''' import kNN as knn import modeler #setup the trainer. Tune your k parameter here. trainer = knn.knn(k=5) #using basic modeler provided, can define how many elements, and how much noise we want. parameters, labels = modeler.generateData(100, noiseFactor=.25) #get parameters to test on. These should have 0 noise so we can accurately test them testParameters, testLabel = modeler.generateData(25, noiseFactor=0) #load the train data into the trainer trainer.loadData(parameters, labels) #holds the number of incorrect error = 0 #for each test element for i in range(len(testParameters)): #use trainer to get a guess confidence,guess = trainer.predict(testParameters[i], negativeValue=0) #check if we were incorrect if guess != testLabel[i]: error += 1 #calcuate and print error print("Accuracy", 1 - error / len(testParameters))
34.15625
153
0.743824
169
1,093
4.810651
0.544379
0.01968
0
0
0
0
0
0
0
0
0
0.015625
0.180238
1,093
32
154
34.15625
0.891741
0.547118
0
0
0
0
0.016949
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
970a946d901c0d355d2502d9d1f07264bbcb2be3
424
py
Python
python/simpletest/test.py
hackin-zhao/interesting_code
25fecb716f5c8f9143ec2824bbf8996b347ce8a9
[ "MIT" ]
1
2021-09-22T10:56:13.000Z
2021-09-22T10:56:13.000Z
python/simpletest/test.py
hackin-zhao/interesting_code
25fecb716f5c8f9143ec2824bbf8996b347ce8a9
[ "MIT" ]
1
2022-02-15T03:53:47.000Z
2022-02-15T03:53:47.000Z
python/simpletest/test.py
hackin-zhao/interesting_code
25fecb716f5c8f9143ec2824bbf8996b347ce8a9
[ "MIT" ]
null
null
null
import unittest from mathtest import add, minus class TestMathFunc(unittest.TestCase): """ Test math function """ def test_add(self): """ Test method add(a,b) """ self.assertEqual(3, add(1, 2)) self.assertNotEqual(3, add(2, 2)) def test_minus(self): """ Test method test minus(a,b) """ self.assertEqual(1, minus(3, 2)) if __name__ == '__main__': unittest.main()
21.2
43
0.601415
56
424
4.375
0.446429
0.057143
0.114286
0.138776
0
0
0
0
0
0
0
0.028213
0.247642
424
19
44
22.315789
0.739812
0.162736
0
0
0
0
0.023881
0
0
0
0
0
0.3
1
0.2
false
0
0.2
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
970ba5beb8c6e85ad0c97cba7c1ffb95b3ebed8b
174
py
Python
src/tests/test_config.py
RomanRuby/machine-learning-project-template
d9c26786b50345e5665f76c9e6976531432b58c9
[ "MIT" ]
null
null
null
src/tests/test_config.py
RomanRuby/machine-learning-project-template
d9c26786b50345e5665f76c9e6976531432b58c9
[ "MIT" ]
null
null
null
src/tests/test_config.py
RomanRuby/machine-learning-project-template
d9c26786b50345e5665f76c9e6976531432b58c9
[ "MIT" ]
null
null
null
import sys import unittest sys.path.append('.') class TestDataSet(unittest.TestCase): def test_cfg(self): pass if __name__ == '__main__': unittest.main()
13.384615
37
0.672414
21
174
5.142857
0.761905
0
0
0
0
0
0
0
0
0
0
0
0.201149
174
12
38
14.5
0.776978
0
0
0
0
0
0.051724
0
0
0
0
0
0
1
0.125
false
0.125
0.25
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
970bb5ea492f11426c0a5b6357ef8e845eea6ad9
2,181
py
Python
terse/ParserFactory.py
talipovm/Terse
e6bfd3ac0d411b18d5167019623d5451695e787c
[ "MIT" ]
2
2015-04-08T21:28:35.000Z
2015-04-08T23:25:04.000Z
terse/ParserFactory.py
talipovm/Terse
e6bfd3ac0d411b18d5167019623d5451695e787c
[ "MIT" ]
null
null
null
terse/ParserFactory.py
talipovm/Terse
e6bfd3ac0d411b18d5167019623d5451695e787c
[ "MIT" ]
1
2015-04-08T21:29:22.000Z
2015-04-08T21:29:22.000Z
import logging import os from Interface.Isosurface import Isosurface from Interface.JVXL import JVXL from Interface.NBOinteractions import NBOinteractions from Top import Top log = logging.getLogger(__name__) class ParserFactory(Top): @staticmethod def interface_by_command_line(frecord): """ Assign a class to a task from the group_names dictionary """ rectype, params = frecord[0], frecord[1:] if rectype == 'file': return type2class = { 'inbo': NBOinteractions, 'iso': Isosurface, 'jvxl': JVXL, 'top': Top } if rectype in type2class: parser = type2class[rectype]() log.debug('Assigned parser was successfully loaded') return parser else: log.error("Parser '%s' cannot be loaded" % parser) return Top() @staticmethod def interface_by_file_extension(frecord, exts): """ Assign a class to frecord """ rectype, params = frecord[0], frecord[1:] if rectype != 'file': return top = Top() file = params[0] base, ext = os.path.splitext(file) ext = ext[1:] if ext in exts: ParsingClass = exts[ext] log.info('%s parser is selected for %s' % (ParsingClass, file)) else: log.error("Extension '%s' is not registered" % ext) return top ModName = 'Interface.' + ParsingClass try: GenericParser = __import__(ModName) module = getattr(GenericParser, ParsingClass) except: log.error("Module '%s' cannot be loaded" % ModName) return top try: cl = eval('module.' + ParsingClass)() log.debug('Assigned parser was successfully loaded') return cl except NameError: log.error("Parser '%s' cannot be loaded" % ParsingClass) return top @staticmethod def typeByContent(frecord): # TODO To be implemented rectype, params = frecord[0], frecord[1:] return Top()
27.961538
75
0.56121
223
2,181
5.421525
0.336323
0.044665
0.044665
0.052109
0.280397
0.232423
0.208437
0.160463
0.079404
0.079404
0
0.007714
0.346171
2,181
77
76
28.324675
0.840112
0.048602
0
0.322034
0
0
0.128698
0
0
0
0
0.012987
0
1
0.050847
false
0
0.118644
0
0.338983
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
970bccc21f91dbbbb912e9ec8f46b93496941b85
585
py
Python
src/data/439.py
NULLCT/LOMC
79a16474a8f21310e0fb47e536d527dd5dc6d655
[ "MIT" ]
null
null
null
src/data/439.py
NULLCT/LOMC
79a16474a8f21310e0fb47e536d527dd5dc6d655
[ "MIT" ]
null
null
null
src/data/439.py
NULLCT/LOMC
79a16474a8f21310e0fb47e536d527dd5dc6d655
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from collections import deque N, Q = map(int, input().split()) G = [[] for _ in range(N)] for _ in range(N - 1): a, b = map(lambda x: int(x) - 1, input().split()) G[a].append(b) G[b].append(a) s = 0 dist = [-1] * N que = deque() que.append(s) dist[s] = 0 while que: i = que.popleft() for j in G[i]: if dist[j] == -1: dist[j] = 1 - dist[i] que.append(j) for _ in range(Q): c, d = map(lambda x: int(x) - 1, input().split()) if dist[c] == dist[d]: print("Town") else: print("Road")
20.172414
53
0.499145
101
585
2.861386
0.386139
0.103806
0.103806
0.076125
0.17301
0.17301
0.17301
0.17301
0
0
0
0.021792
0.294017
585
28
54
20.892857
0.677966
0.035897
0
0
0
0
0.01421
0
0
0
0
0
0
1
0
false
0
0.041667
0
0.041667
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
970ca64e46ad17397f8882ea7b2df30130763068
5,384
py
Python
axis_inspection/axis_inspection/report/project_status_summary/project_status_summary.py
Subramani830/testrepo
4568c628dc0731d08315ebc830dc813e0abfc60d
[ "MIT" ]
null
null
null
axis_inspection/axis_inspection/report/project_status_summary/project_status_summary.py
Subramani830/testrepo
4568c628dc0731d08315ebc830dc813e0abfc60d
[ "MIT" ]
null
null
null
axis_inspection/axis_inspection/report/project_status_summary/project_status_summary.py
Subramani830/testrepo
4568c628dc0731d08315ebc830dc813e0abfc60d
[ "MIT" ]
null
null
null
# Copyright (c) 2013, veena and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ def execute(filters=None): columns = get_columns() data = [] conditions=get_conditions(filters) data = get_data(filters,conditions) for project in data: project["total_tasks"] = frappe.db.count("Task", filters={"project": project['name']}) project["completed_tasks"] = frappe.db.count("Task", filters={"project": project['name'], "status": "Completed"}) project["overdue_tasks"] = frappe.db.count("Task", filters={"project": project['name'], "status": "Overdue"}) chart = get_chart_data(data) report_summary = get_report_summary(data) return columns, data, None, chart, report_summary def get_columns(): return [ { "fieldname": "name", "label": _("Project"), "fieldtype": "Link", "options": "Project", "width": 200 }, { "fieldname": "project_type", "label": _("Type"), "fieldtype": "Link", "options": "Project Type", "width": 120 }, { "fieldname": "status", "label": _("Status"), "fieldtype": "Data", "width": 120 }, { "fieldname": "total_tasks", "label": _("Total Tasks"), "fieldtype": "Data", "width": 120 }, { "fieldname": "completed_tasks", "label": _("Tasks Completed"), "fieldtype": "Data", "width": 120 }, { "fieldname": "overdue_tasks", "label": _("Tasks Overdue"), "fieldtype": "Data", "width": 120 }, { "fieldname": "percent_complete", "label": _("Completion"), "fieldtype": "Data", "width": 120 }, { "fieldname": "expected_start_date", "label": _("Start Date"), "fieldtype": "Date", "width": 120 }, { "fieldname": "expected_end_date", "label": _("End Date"), "fieldtype": "Date", "width": 120 }, { "fieldname": "date_percentage", "label": _("Duration Left%"), "fieldtype": "percent", "width": 200 }, { "fieldname": "total_sales_amount", "label": _("Total Sales Amount"), "fieldtype": "currency", "width": 150 }, { "fieldname": "total_billed_amount", "label": _("Total Billed Amount"), "fieldtype": "currency", "width": 200 }, { "fieldname": "sales_percentage", "label": _("Sales Remaining%"), "fieldtype": "percent", "width": 150 } ] def get_data(filters,conditions): query="""select p.name,p.status,p.percent_complete,p.expected_start_date,p.expected_end_date,p.project_type,(CASE WHEN p.expected_start_date > NOW() THEN 100 ELSE ( (DATEDIFF(p.expected_end_date,Now()) / DATEDIFF(p.expected_end_date, p.expected_start_date))*100)END)as date_percentage,p.total_sales_amount,p.total_billed_amount,(((p.total_sales_amount-p.total_billed_amount)/p.total_sales_amount)*100) as sales_percentage from `tabProject` p WHERE {conditions} ORDER BY p.expected_end_date ASC""".format(conditions=conditions) proj=frappe.db.sql(query, as_dict=True) return proj def get_conditions(filters): conditions="" if filters.get('company'): conditions += " p.company = '{}'".format(filters.get('company')) conditions += " AND p.expected_end_date >= NOW()" if filters.get('is_active'): conditions += " AND p.is_active = '{}'".format(filters.get('is_active')) if filters.get('status'): conditions += " AND p.status = '{}'".format(filters.get('status')) if filters.get('project_type'): conditions += " AND p.project_type = '{}'".format(filters.get('project_type')) if filters.get('priority'): conditions += " AND p.priority = '{}'".format(filters.get('priority')) return conditions def get_chart_data(data): labels = [] total = [] completed = [] overdue = [] date_per = [] sales_per = [] for project in data: labels.append(project.name) total.append(project.total_tasks) completed.append(project.completed_tasks) overdue.append(project.overdue_tasks) date_per.append(project.date_percentage) sales_per.append(project.sales_percentage) return { "data": { 'labels': labels[:50], 'datasets': [ { "name": "Overdue", "values": overdue[:30] }, { "name": "Completed", "values": completed[:30] }, { "name": "Total Tasks", "values": total[:30] }, { "name": "Duration Left%", "values": date_per[:30] }, { "name": "Sales Remaining%", "values": sales_per[:50] }, ] }, "type": "bar", "colors": ["#fc4f51", "#ffd343","#00FF00", "#7575ff","#78d6ff"], "barOptions": { "stacked": False } } def get_report_summary(data): if not data: return None avg_completion = sum([project.percent_complete for project in data]) / len(data) total = sum([project.total_tasks for project in data]) total_overdue = sum([project.overdue_tasks for project in data]) completed = sum([project.completed_tasks for project in data]) return [ { "value": avg_completion, "indicator": "Green" if avg_completion > 50 else "Red", "label": "Average Completion", "datatype": "Percent", }, { "value": total, "indicator": "Blue", "label": "Total Tasks", "datatype": "Int", }, { "value": completed, "indicator": "Green", "label": "Completed Tasks", "datatype": "Int", }, { "value": total_overdue, "indicator": "Green" if total_overdue == 0 else "Red", "label": "Overdue Tasks", "datatype": "Int", } ]
25.158879
527
0.633172
617
5,384
5.345219
0.197731
0.030321
0.041237
0.029109
0.184657
0.09339
0.072771
0.072771
0.072771
0.05852
0
0.019035
0.180349
5,384
213
528
25.276995
0.728303
0.016716
0
0.169231
0
0.005128
0.381024
0.065961
0
0
0
0
0
1
0.030769
false
0
0.015385
0.005128
0.082051
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
970e676a6d9532be893c81dfaef34fee90098dda
267
py
Python
modelvshuman/__init__.py
TizianThieringer/model-vs-human
17729b8167520f682d93d55c340c27de07bb2681
[ "Apache-2.0", "CC-BY-4.0", "MIT" ]
158
2021-06-04T15:19:58.000Z
2022-03-30T00:31:28.000Z
modelvshuman/__init__.py
TizianThieringer/model-vs-human
17729b8167520f682d93d55c340c27de07bb2681
[ "Apache-2.0", "CC-BY-4.0", "MIT" ]
7
2021-07-20T03:57:34.000Z
2022-02-01T11:00:47.000Z
modelvshuman/__init__.py
TizianThieringer/model-vs-human
17729b8167520f682d93d55c340c27de07bb2681
[ "Apache-2.0", "CC-BY-4.0", "MIT" ]
14
2021-06-16T13:33:11.000Z
2022-03-29T15:04:09.000Z
from . import cli from . import datasets from . import evaluation from . import models from . import plotting from .model_evaluator import ModelEvaluator from .plotting.plot import plot from .version import __version__, VERSION Evaluate = ModelEvaluator Plot = plot
22.25
43
0.805243
34
267
6.176471
0.382353
0.238095
0
0
0
0
0
0
0
0
0
0
0.149813
267
11
44
24.272727
0.92511
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
970f7b10a07b98a1a06557eff5b9b16e7aa0fdb8
59,592
py
Python
sdk/python/pulumi_aws_native/elasticloadbalancingv2/outputs.py
AaronFriel/pulumi-aws-native
5621690373ac44accdbd20b11bae3be1baf022d1
[ "Apache-2.0" ]
29
2021-09-30T19:32:07.000Z
2022-03-22T21:06:08.000Z
sdk/python/pulumi_aws_native/elasticloadbalancingv2/outputs.py
AaronFriel/pulumi-aws-native
5621690373ac44accdbd20b11bae3be1baf022d1
[ "Apache-2.0" ]
232
2021-09-30T19:26:26.000Z
2022-03-31T23:22:06.000Z
sdk/python/pulumi_aws_native/elasticloadbalancingv2/outputs.py
AaronFriel/pulumi-aws-native
5621690373ac44accdbd20b11bae3be1baf022d1
[ "Apache-2.0" ]
4
2021-11-10T19:42:01.000Z
2022-02-05T10:15:49.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'ListenerAction', 'ListenerAuthenticateCognitoConfig', 'ListenerAuthenticateOidcConfig', 'ListenerCertificate', 'ListenerCertificateCertificate', 'ListenerFixedResponseConfig', 'ListenerForwardConfig', 'ListenerRedirectConfig', 'ListenerRuleAction', 'ListenerRuleAuthenticateCognitoConfig', 'ListenerRuleAuthenticateOidcConfig', 'ListenerRuleFixedResponseConfig', 'ListenerRuleForwardConfig', 'ListenerRuleHostHeaderConfig', 'ListenerRuleHttpHeaderConfig', 'ListenerRuleHttpRequestMethodConfig', 'ListenerRulePathPatternConfig', 'ListenerRuleQueryStringConfig', 'ListenerRuleQueryStringKeyValue', 'ListenerRuleRedirectConfig', 'ListenerRuleRuleCondition', 'ListenerRuleSourceIpConfig', 'ListenerRuleTargetGroupStickinessConfig', 'ListenerRuleTargetGroupTuple', 'ListenerTargetGroupStickinessConfig', 'ListenerTargetGroupTuple', 'LoadBalancerAttribute', 'LoadBalancerSubnetMapping', 'LoadBalancerTag', 'TargetGroupAttribute', 'TargetGroupMatcher', 'TargetGroupTag', 'TargetGroupTargetDescription', ] @pulumi.output_type class ListenerAction(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "authenticateCognitoConfig": suggest = "authenticate_cognito_config" elif key == "authenticateOidcConfig": suggest = "authenticate_oidc_config" elif key == "fixedResponseConfig": suggest = "fixed_response_config" elif key == "forwardConfig": suggest = "forward_config" elif key == "redirectConfig": suggest = "redirect_config" elif key == "targetGroupArn": suggest = "target_group_arn" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerAction. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerAction.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerAction.__key_warning(key) return super().get(key, default) def __init__(__self__, *, type: str, authenticate_cognito_config: Optional['outputs.ListenerAuthenticateCognitoConfig'] = None, authenticate_oidc_config: Optional['outputs.ListenerAuthenticateOidcConfig'] = None, fixed_response_config: Optional['outputs.ListenerFixedResponseConfig'] = None, forward_config: Optional['outputs.ListenerForwardConfig'] = None, order: Optional[int] = None, redirect_config: Optional['outputs.ListenerRedirectConfig'] = None, target_group_arn: Optional[str] = None): pulumi.set(__self__, "type", type) if authenticate_cognito_config is not None: pulumi.set(__self__, "authenticate_cognito_config", authenticate_cognito_config) if authenticate_oidc_config is not None: pulumi.set(__self__, "authenticate_oidc_config", authenticate_oidc_config) if fixed_response_config is not None: pulumi.set(__self__, "fixed_response_config", fixed_response_config) if forward_config is not None: pulumi.set(__self__, "forward_config", forward_config) if order is not None: pulumi.set(__self__, "order", order) if redirect_config is not None: pulumi.set(__self__, "redirect_config", redirect_config) if target_group_arn is not None: pulumi.set(__self__, "target_group_arn", target_group_arn) @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") @property @pulumi.getter(name="authenticateCognitoConfig") def authenticate_cognito_config(self) -> Optional['outputs.ListenerAuthenticateCognitoConfig']: return pulumi.get(self, "authenticate_cognito_config") @property @pulumi.getter(name="authenticateOidcConfig") def authenticate_oidc_config(self) -> Optional['outputs.ListenerAuthenticateOidcConfig']: return pulumi.get(self, "authenticate_oidc_config") @property @pulumi.getter(name="fixedResponseConfig") def fixed_response_config(self) -> Optional['outputs.ListenerFixedResponseConfig']: return pulumi.get(self, "fixed_response_config") @property @pulumi.getter(name="forwardConfig") def forward_config(self) -> Optional['outputs.ListenerForwardConfig']: return pulumi.get(self, "forward_config") @property @pulumi.getter def order(self) -> Optional[int]: return pulumi.get(self, "order") @property @pulumi.getter(name="redirectConfig") def redirect_config(self) -> Optional['outputs.ListenerRedirectConfig']: return pulumi.get(self, "redirect_config") @property @pulumi.getter(name="targetGroupArn") def target_group_arn(self) -> Optional[str]: return pulumi.get(self, "target_group_arn") @pulumi.output_type class ListenerAuthenticateCognitoConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "userPoolArn": suggest = "user_pool_arn" elif key == "userPoolClientId": suggest = "user_pool_client_id" elif key == "userPoolDomain": suggest = "user_pool_domain" elif key == "authenticationRequestExtraParams": suggest = "authentication_request_extra_params" elif key == "onUnauthenticatedRequest": suggest = "on_unauthenticated_request" elif key == "sessionCookieName": suggest = "session_cookie_name" elif key == "sessionTimeout": suggest = "session_timeout" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerAuthenticateCognitoConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerAuthenticateCognitoConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerAuthenticateCognitoConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, user_pool_arn: str, user_pool_client_id: str, user_pool_domain: str, authentication_request_extra_params: Optional[Any] = None, on_unauthenticated_request: Optional[str] = None, scope: Optional[str] = None, session_cookie_name: Optional[str] = None, session_timeout: Optional[str] = None): pulumi.set(__self__, "user_pool_arn", user_pool_arn) pulumi.set(__self__, "user_pool_client_id", user_pool_client_id) pulumi.set(__self__, "user_pool_domain", user_pool_domain) if authentication_request_extra_params is not None: pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params) if on_unauthenticated_request is not None: pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request) if scope is not None: pulumi.set(__self__, "scope", scope) if session_cookie_name is not None: pulumi.set(__self__, "session_cookie_name", session_cookie_name) if session_timeout is not None: pulumi.set(__self__, "session_timeout", session_timeout) @property @pulumi.getter(name="userPoolArn") def user_pool_arn(self) -> str: return pulumi.get(self, "user_pool_arn") @property @pulumi.getter(name="userPoolClientId") def user_pool_client_id(self) -> str: return pulumi.get(self, "user_pool_client_id") @property @pulumi.getter(name="userPoolDomain") def user_pool_domain(self) -> str: return pulumi.get(self, "user_pool_domain") @property @pulumi.getter(name="authenticationRequestExtraParams") def authentication_request_extra_params(self) -> Optional[Any]: return pulumi.get(self, "authentication_request_extra_params") @property @pulumi.getter(name="onUnauthenticatedRequest") def on_unauthenticated_request(self) -> Optional[str]: return pulumi.get(self, "on_unauthenticated_request") @property @pulumi.getter def scope(self) -> Optional[str]: return pulumi.get(self, "scope") @property @pulumi.getter(name="sessionCookieName") def session_cookie_name(self) -> Optional[str]: return pulumi.get(self, "session_cookie_name") @property @pulumi.getter(name="sessionTimeout") def session_timeout(self) -> Optional[str]: return pulumi.get(self, "session_timeout") @pulumi.output_type class ListenerAuthenticateOidcConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "authorizationEndpoint": suggest = "authorization_endpoint" elif key == "clientId": suggest = "client_id" elif key == "clientSecret": suggest = "client_secret" elif key == "tokenEndpoint": suggest = "token_endpoint" elif key == "userInfoEndpoint": suggest = "user_info_endpoint" elif key == "authenticationRequestExtraParams": suggest = "authentication_request_extra_params" elif key == "onUnauthenticatedRequest": suggest = "on_unauthenticated_request" elif key == "sessionCookieName": suggest = "session_cookie_name" elif key == "sessionTimeout": suggest = "session_timeout" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerAuthenticateOidcConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerAuthenticateOidcConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerAuthenticateOidcConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, authorization_endpoint: str, client_id: str, client_secret: str, issuer: str, token_endpoint: str, user_info_endpoint: str, authentication_request_extra_params: Optional[Any] = None, on_unauthenticated_request: Optional[str] = None, scope: Optional[str] = None, session_cookie_name: Optional[str] = None, session_timeout: Optional[str] = None): pulumi.set(__self__, "authorization_endpoint", authorization_endpoint) pulumi.set(__self__, "client_id", client_id) pulumi.set(__self__, "client_secret", client_secret) pulumi.set(__self__, "issuer", issuer) pulumi.set(__self__, "token_endpoint", token_endpoint) pulumi.set(__self__, "user_info_endpoint", user_info_endpoint) if authentication_request_extra_params is not None: pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params) if on_unauthenticated_request is not None: pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request) if scope is not None: pulumi.set(__self__, "scope", scope) if session_cookie_name is not None: pulumi.set(__self__, "session_cookie_name", session_cookie_name) if session_timeout is not None: pulumi.set(__self__, "session_timeout", session_timeout) @property @pulumi.getter(name="authorizationEndpoint") def authorization_endpoint(self) -> str: return pulumi.get(self, "authorization_endpoint") @property @pulumi.getter(name="clientId") def client_id(self) -> str: return pulumi.get(self, "client_id") @property @pulumi.getter(name="clientSecret") def client_secret(self) -> str: return pulumi.get(self, "client_secret") @property @pulumi.getter def issuer(self) -> str: return pulumi.get(self, "issuer") @property @pulumi.getter(name="tokenEndpoint") def token_endpoint(self) -> str: return pulumi.get(self, "token_endpoint") @property @pulumi.getter(name="userInfoEndpoint") def user_info_endpoint(self) -> str: return pulumi.get(self, "user_info_endpoint") @property @pulumi.getter(name="authenticationRequestExtraParams") def authentication_request_extra_params(self) -> Optional[Any]: return pulumi.get(self, "authentication_request_extra_params") @property @pulumi.getter(name="onUnauthenticatedRequest") def on_unauthenticated_request(self) -> Optional[str]: return pulumi.get(self, "on_unauthenticated_request") @property @pulumi.getter def scope(self) -> Optional[str]: return pulumi.get(self, "scope") @property @pulumi.getter(name="sessionCookieName") def session_cookie_name(self) -> Optional[str]: return pulumi.get(self, "session_cookie_name") @property @pulumi.getter(name="sessionTimeout") def session_timeout(self) -> Optional[str]: return pulumi.get(self, "session_timeout") @pulumi.output_type class ListenerCertificate(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "certificateArn": suggest = "certificate_arn" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerCertificate. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerCertificate.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerCertificate.__key_warning(key) return super().get(key, default) def __init__(__self__, *, certificate_arn: Optional[str] = None): if certificate_arn is not None: pulumi.set(__self__, "certificate_arn", certificate_arn) @property @pulumi.getter(name="certificateArn") def certificate_arn(self) -> Optional[str]: return pulumi.get(self, "certificate_arn") @pulumi.output_type class ListenerCertificateCertificate(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "certificateArn": suggest = "certificate_arn" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerCertificateCertificate. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerCertificateCertificate.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerCertificateCertificate.__key_warning(key) return super().get(key, default) def __init__(__self__, *, certificate_arn: Optional[str] = None): if certificate_arn is not None: pulumi.set(__self__, "certificate_arn", certificate_arn) @property @pulumi.getter(name="certificateArn") def certificate_arn(self) -> Optional[str]: return pulumi.get(self, "certificate_arn") @pulumi.output_type class ListenerFixedResponseConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "statusCode": suggest = "status_code" elif key == "contentType": suggest = "content_type" elif key == "messageBody": suggest = "message_body" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerFixedResponseConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerFixedResponseConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerFixedResponseConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, status_code: str, content_type: Optional[str] = None, message_body: Optional[str] = None): pulumi.set(__self__, "status_code", status_code) if content_type is not None: pulumi.set(__self__, "content_type", content_type) if message_body is not None: pulumi.set(__self__, "message_body", message_body) @property @pulumi.getter(name="statusCode") def status_code(self) -> str: return pulumi.get(self, "status_code") @property @pulumi.getter(name="contentType") def content_type(self) -> Optional[str]: return pulumi.get(self, "content_type") @property @pulumi.getter(name="messageBody") def message_body(self) -> Optional[str]: return pulumi.get(self, "message_body") @pulumi.output_type class ListenerForwardConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "targetGroupStickinessConfig": suggest = "target_group_stickiness_config" elif key == "targetGroups": suggest = "target_groups" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerForwardConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerForwardConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerForwardConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, target_group_stickiness_config: Optional['outputs.ListenerTargetGroupStickinessConfig'] = None, target_groups: Optional[Sequence['outputs.ListenerTargetGroupTuple']] = None): if target_group_stickiness_config is not None: pulumi.set(__self__, "target_group_stickiness_config", target_group_stickiness_config) if target_groups is not None: pulumi.set(__self__, "target_groups", target_groups) @property @pulumi.getter(name="targetGroupStickinessConfig") def target_group_stickiness_config(self) -> Optional['outputs.ListenerTargetGroupStickinessConfig']: return pulumi.get(self, "target_group_stickiness_config") @property @pulumi.getter(name="targetGroups") def target_groups(self) -> Optional[Sequence['outputs.ListenerTargetGroupTuple']]: return pulumi.get(self, "target_groups") @pulumi.output_type class ListenerRedirectConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "statusCode": suggest = "status_code" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRedirectConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRedirectConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRedirectConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, status_code: str, host: Optional[str] = None, path: Optional[str] = None, port: Optional[str] = None, protocol: Optional[str] = None, query: Optional[str] = None): pulumi.set(__self__, "status_code", status_code) if host is not None: pulumi.set(__self__, "host", host) if path is not None: pulumi.set(__self__, "path", path) if port is not None: pulumi.set(__self__, "port", port) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if query is not None: pulumi.set(__self__, "query", query) @property @pulumi.getter(name="statusCode") def status_code(self) -> str: return pulumi.get(self, "status_code") @property @pulumi.getter def host(self) -> Optional[str]: return pulumi.get(self, "host") @property @pulumi.getter def path(self) -> Optional[str]: return pulumi.get(self, "path") @property @pulumi.getter def port(self) -> Optional[str]: return pulumi.get(self, "port") @property @pulumi.getter def protocol(self) -> Optional[str]: return pulumi.get(self, "protocol") @property @pulumi.getter def query(self) -> Optional[str]: return pulumi.get(self, "query") @pulumi.output_type class ListenerRuleAction(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "authenticateCognitoConfig": suggest = "authenticate_cognito_config" elif key == "authenticateOidcConfig": suggest = "authenticate_oidc_config" elif key == "fixedResponseConfig": suggest = "fixed_response_config" elif key == "forwardConfig": suggest = "forward_config" elif key == "redirectConfig": suggest = "redirect_config" elif key == "targetGroupArn": suggest = "target_group_arn" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRuleAction. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRuleAction.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRuleAction.__key_warning(key) return super().get(key, default) def __init__(__self__, *, type: str, authenticate_cognito_config: Optional['outputs.ListenerRuleAuthenticateCognitoConfig'] = None, authenticate_oidc_config: Optional['outputs.ListenerRuleAuthenticateOidcConfig'] = None, fixed_response_config: Optional['outputs.ListenerRuleFixedResponseConfig'] = None, forward_config: Optional['outputs.ListenerRuleForwardConfig'] = None, order: Optional[int] = None, redirect_config: Optional['outputs.ListenerRuleRedirectConfig'] = None, target_group_arn: Optional[str] = None): pulumi.set(__self__, "type", type) if authenticate_cognito_config is not None: pulumi.set(__self__, "authenticate_cognito_config", authenticate_cognito_config) if authenticate_oidc_config is not None: pulumi.set(__self__, "authenticate_oidc_config", authenticate_oidc_config) if fixed_response_config is not None: pulumi.set(__self__, "fixed_response_config", fixed_response_config) if forward_config is not None: pulumi.set(__self__, "forward_config", forward_config) if order is not None: pulumi.set(__self__, "order", order) if redirect_config is not None: pulumi.set(__self__, "redirect_config", redirect_config) if target_group_arn is not None: pulumi.set(__self__, "target_group_arn", target_group_arn) @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") @property @pulumi.getter(name="authenticateCognitoConfig") def authenticate_cognito_config(self) -> Optional['outputs.ListenerRuleAuthenticateCognitoConfig']: return pulumi.get(self, "authenticate_cognito_config") @property @pulumi.getter(name="authenticateOidcConfig") def authenticate_oidc_config(self) -> Optional['outputs.ListenerRuleAuthenticateOidcConfig']: return pulumi.get(self, "authenticate_oidc_config") @property @pulumi.getter(name="fixedResponseConfig") def fixed_response_config(self) -> Optional['outputs.ListenerRuleFixedResponseConfig']: return pulumi.get(self, "fixed_response_config") @property @pulumi.getter(name="forwardConfig") def forward_config(self) -> Optional['outputs.ListenerRuleForwardConfig']: return pulumi.get(self, "forward_config") @property @pulumi.getter def order(self) -> Optional[int]: return pulumi.get(self, "order") @property @pulumi.getter(name="redirectConfig") def redirect_config(self) -> Optional['outputs.ListenerRuleRedirectConfig']: return pulumi.get(self, "redirect_config") @property @pulumi.getter(name="targetGroupArn") def target_group_arn(self) -> Optional[str]: return pulumi.get(self, "target_group_arn") @pulumi.output_type class ListenerRuleAuthenticateCognitoConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "userPoolArn": suggest = "user_pool_arn" elif key == "userPoolClientId": suggest = "user_pool_client_id" elif key == "userPoolDomain": suggest = "user_pool_domain" elif key == "authenticationRequestExtraParams": suggest = "authentication_request_extra_params" elif key == "onUnauthenticatedRequest": suggest = "on_unauthenticated_request" elif key == "sessionCookieName": suggest = "session_cookie_name" elif key == "sessionTimeout": suggest = "session_timeout" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRuleAuthenticateCognitoConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRuleAuthenticateCognitoConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRuleAuthenticateCognitoConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, user_pool_arn: str, user_pool_client_id: str, user_pool_domain: str, authentication_request_extra_params: Optional[Any] = None, on_unauthenticated_request: Optional[str] = None, scope: Optional[str] = None, session_cookie_name: Optional[str] = None, session_timeout: Optional[int] = None): pulumi.set(__self__, "user_pool_arn", user_pool_arn) pulumi.set(__self__, "user_pool_client_id", user_pool_client_id) pulumi.set(__self__, "user_pool_domain", user_pool_domain) if authentication_request_extra_params is not None: pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params) if on_unauthenticated_request is not None: pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request) if scope is not None: pulumi.set(__self__, "scope", scope) if session_cookie_name is not None: pulumi.set(__self__, "session_cookie_name", session_cookie_name) if session_timeout is not None: pulumi.set(__self__, "session_timeout", session_timeout) @property @pulumi.getter(name="userPoolArn") def user_pool_arn(self) -> str: return pulumi.get(self, "user_pool_arn") @property @pulumi.getter(name="userPoolClientId") def user_pool_client_id(self) -> str: return pulumi.get(self, "user_pool_client_id") @property @pulumi.getter(name="userPoolDomain") def user_pool_domain(self) -> str: return pulumi.get(self, "user_pool_domain") @property @pulumi.getter(name="authenticationRequestExtraParams") def authentication_request_extra_params(self) -> Optional[Any]: return pulumi.get(self, "authentication_request_extra_params") @property @pulumi.getter(name="onUnauthenticatedRequest") def on_unauthenticated_request(self) -> Optional[str]: return pulumi.get(self, "on_unauthenticated_request") @property @pulumi.getter def scope(self) -> Optional[str]: return pulumi.get(self, "scope") @property @pulumi.getter(name="sessionCookieName") def session_cookie_name(self) -> Optional[str]: return pulumi.get(self, "session_cookie_name") @property @pulumi.getter(name="sessionTimeout") def session_timeout(self) -> Optional[int]: return pulumi.get(self, "session_timeout") @pulumi.output_type class ListenerRuleAuthenticateOidcConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "authorizationEndpoint": suggest = "authorization_endpoint" elif key == "clientId": suggest = "client_id" elif key == "clientSecret": suggest = "client_secret" elif key == "tokenEndpoint": suggest = "token_endpoint" elif key == "userInfoEndpoint": suggest = "user_info_endpoint" elif key == "authenticationRequestExtraParams": suggest = "authentication_request_extra_params" elif key == "onUnauthenticatedRequest": suggest = "on_unauthenticated_request" elif key == "sessionCookieName": suggest = "session_cookie_name" elif key == "sessionTimeout": suggest = "session_timeout" elif key == "useExistingClientSecret": suggest = "use_existing_client_secret" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRuleAuthenticateOidcConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRuleAuthenticateOidcConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRuleAuthenticateOidcConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, authorization_endpoint: str, client_id: str, client_secret: str, issuer: str, token_endpoint: str, user_info_endpoint: str, authentication_request_extra_params: Optional[Any] = None, on_unauthenticated_request: Optional[str] = None, scope: Optional[str] = None, session_cookie_name: Optional[str] = None, session_timeout: Optional[int] = None, use_existing_client_secret: Optional[bool] = None): pulumi.set(__self__, "authorization_endpoint", authorization_endpoint) pulumi.set(__self__, "client_id", client_id) pulumi.set(__self__, "client_secret", client_secret) pulumi.set(__self__, "issuer", issuer) pulumi.set(__self__, "token_endpoint", token_endpoint) pulumi.set(__self__, "user_info_endpoint", user_info_endpoint) if authentication_request_extra_params is not None: pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params) if on_unauthenticated_request is not None: pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request) if scope is not None: pulumi.set(__self__, "scope", scope) if session_cookie_name is not None: pulumi.set(__self__, "session_cookie_name", session_cookie_name) if session_timeout is not None: pulumi.set(__self__, "session_timeout", session_timeout) if use_existing_client_secret is not None: pulumi.set(__self__, "use_existing_client_secret", use_existing_client_secret) @property @pulumi.getter(name="authorizationEndpoint") def authorization_endpoint(self) -> str: return pulumi.get(self, "authorization_endpoint") @property @pulumi.getter(name="clientId") def client_id(self) -> str: return pulumi.get(self, "client_id") @property @pulumi.getter(name="clientSecret") def client_secret(self) -> str: return pulumi.get(self, "client_secret") @property @pulumi.getter def issuer(self) -> str: return pulumi.get(self, "issuer") @property @pulumi.getter(name="tokenEndpoint") def token_endpoint(self) -> str: return pulumi.get(self, "token_endpoint") @property @pulumi.getter(name="userInfoEndpoint") def user_info_endpoint(self) -> str: return pulumi.get(self, "user_info_endpoint") @property @pulumi.getter(name="authenticationRequestExtraParams") def authentication_request_extra_params(self) -> Optional[Any]: return pulumi.get(self, "authentication_request_extra_params") @property @pulumi.getter(name="onUnauthenticatedRequest") def on_unauthenticated_request(self) -> Optional[str]: return pulumi.get(self, "on_unauthenticated_request") @property @pulumi.getter def scope(self) -> Optional[str]: return pulumi.get(self, "scope") @property @pulumi.getter(name="sessionCookieName") def session_cookie_name(self) -> Optional[str]: return pulumi.get(self, "session_cookie_name") @property @pulumi.getter(name="sessionTimeout") def session_timeout(self) -> Optional[int]: return pulumi.get(self, "session_timeout") @property @pulumi.getter(name="useExistingClientSecret") def use_existing_client_secret(self) -> Optional[bool]: return pulumi.get(self, "use_existing_client_secret") @pulumi.output_type class ListenerRuleFixedResponseConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "statusCode": suggest = "status_code" elif key == "contentType": suggest = "content_type" elif key == "messageBody": suggest = "message_body" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRuleFixedResponseConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRuleFixedResponseConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRuleFixedResponseConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, status_code: str, content_type: Optional[str] = None, message_body: Optional[str] = None): pulumi.set(__self__, "status_code", status_code) if content_type is not None: pulumi.set(__self__, "content_type", content_type) if message_body is not None: pulumi.set(__self__, "message_body", message_body) @property @pulumi.getter(name="statusCode") def status_code(self) -> str: return pulumi.get(self, "status_code") @property @pulumi.getter(name="contentType") def content_type(self) -> Optional[str]: return pulumi.get(self, "content_type") @property @pulumi.getter(name="messageBody") def message_body(self) -> Optional[str]: return pulumi.get(self, "message_body") @pulumi.output_type class ListenerRuleForwardConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "targetGroupStickinessConfig": suggest = "target_group_stickiness_config" elif key == "targetGroups": suggest = "target_groups" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRuleForwardConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRuleForwardConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRuleForwardConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, target_group_stickiness_config: Optional['outputs.ListenerRuleTargetGroupStickinessConfig'] = None, target_groups: Optional[Sequence['outputs.ListenerRuleTargetGroupTuple']] = None): if target_group_stickiness_config is not None: pulumi.set(__self__, "target_group_stickiness_config", target_group_stickiness_config) if target_groups is not None: pulumi.set(__self__, "target_groups", target_groups) @property @pulumi.getter(name="targetGroupStickinessConfig") def target_group_stickiness_config(self) -> Optional['outputs.ListenerRuleTargetGroupStickinessConfig']: return pulumi.get(self, "target_group_stickiness_config") @property @pulumi.getter(name="targetGroups") def target_groups(self) -> Optional[Sequence['outputs.ListenerRuleTargetGroupTuple']]: return pulumi.get(self, "target_groups") @pulumi.output_type class ListenerRuleHostHeaderConfig(dict): def __init__(__self__, *, values: Optional[Sequence[str]] = None): if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter def values(self) -> Optional[Sequence[str]]: return pulumi.get(self, "values") @pulumi.output_type class ListenerRuleHttpHeaderConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "httpHeaderName": suggest = "http_header_name" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRuleHttpHeaderConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRuleHttpHeaderConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRuleHttpHeaderConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, http_header_name: Optional[str] = None, values: Optional[Sequence[str]] = None): if http_header_name is not None: pulumi.set(__self__, "http_header_name", http_header_name) if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter(name="httpHeaderName") def http_header_name(self) -> Optional[str]: return pulumi.get(self, "http_header_name") @property @pulumi.getter def values(self) -> Optional[Sequence[str]]: return pulumi.get(self, "values") @pulumi.output_type class ListenerRuleHttpRequestMethodConfig(dict): def __init__(__self__, *, values: Optional[Sequence[str]] = None): if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter def values(self) -> Optional[Sequence[str]]: return pulumi.get(self, "values") @pulumi.output_type class ListenerRulePathPatternConfig(dict): def __init__(__self__, *, values: Optional[Sequence[str]] = None): if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter def values(self) -> Optional[Sequence[str]]: return pulumi.get(self, "values") @pulumi.output_type class ListenerRuleQueryStringConfig(dict): def __init__(__self__, *, values: Optional[Sequence['outputs.ListenerRuleQueryStringKeyValue']] = None): if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter def values(self) -> Optional[Sequence['outputs.ListenerRuleQueryStringKeyValue']]: return pulumi.get(self, "values") @pulumi.output_type class ListenerRuleQueryStringKeyValue(dict): def __init__(__self__, *, key: Optional[str] = None, value: Optional[str] = None): if key is not None: pulumi.set(__self__, "key", key) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[str]: return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> Optional[str]: return pulumi.get(self, "value") @pulumi.output_type class ListenerRuleRedirectConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "statusCode": suggest = "status_code" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRuleRedirectConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRuleRedirectConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRuleRedirectConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, status_code: str, host: Optional[str] = None, path: Optional[str] = None, port: Optional[str] = None, protocol: Optional[str] = None, query: Optional[str] = None): pulumi.set(__self__, "status_code", status_code) if host is not None: pulumi.set(__self__, "host", host) if path is not None: pulumi.set(__self__, "path", path) if port is not None: pulumi.set(__self__, "port", port) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if query is not None: pulumi.set(__self__, "query", query) @property @pulumi.getter(name="statusCode") def status_code(self) -> str: return pulumi.get(self, "status_code") @property @pulumi.getter def host(self) -> Optional[str]: return pulumi.get(self, "host") @property @pulumi.getter def path(self) -> Optional[str]: return pulumi.get(self, "path") @property @pulumi.getter def port(self) -> Optional[str]: return pulumi.get(self, "port") @property @pulumi.getter def protocol(self) -> Optional[str]: return pulumi.get(self, "protocol") @property @pulumi.getter def query(self) -> Optional[str]: return pulumi.get(self, "query") @pulumi.output_type class ListenerRuleRuleCondition(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "hostHeaderConfig": suggest = "host_header_config" elif key == "httpHeaderConfig": suggest = "http_header_config" elif key == "httpRequestMethodConfig": suggest = "http_request_method_config" elif key == "pathPatternConfig": suggest = "path_pattern_config" elif key == "queryStringConfig": suggest = "query_string_config" elif key == "sourceIpConfig": suggest = "source_ip_config" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRuleRuleCondition. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRuleRuleCondition.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRuleRuleCondition.__key_warning(key) return super().get(key, default) def __init__(__self__, *, field: Optional[str] = None, host_header_config: Optional['outputs.ListenerRuleHostHeaderConfig'] = None, http_header_config: Optional['outputs.ListenerRuleHttpHeaderConfig'] = None, http_request_method_config: Optional['outputs.ListenerRuleHttpRequestMethodConfig'] = None, path_pattern_config: Optional['outputs.ListenerRulePathPatternConfig'] = None, query_string_config: Optional['outputs.ListenerRuleQueryStringConfig'] = None, source_ip_config: Optional['outputs.ListenerRuleSourceIpConfig'] = None, values: Optional[Sequence[str]] = None): if field is not None: pulumi.set(__self__, "field", field) if host_header_config is not None: pulumi.set(__self__, "host_header_config", host_header_config) if http_header_config is not None: pulumi.set(__self__, "http_header_config", http_header_config) if http_request_method_config is not None: pulumi.set(__self__, "http_request_method_config", http_request_method_config) if path_pattern_config is not None: pulumi.set(__self__, "path_pattern_config", path_pattern_config) if query_string_config is not None: pulumi.set(__self__, "query_string_config", query_string_config) if source_ip_config is not None: pulumi.set(__self__, "source_ip_config", source_ip_config) if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter def field(self) -> Optional[str]: return pulumi.get(self, "field") @property @pulumi.getter(name="hostHeaderConfig") def host_header_config(self) -> Optional['outputs.ListenerRuleHostHeaderConfig']: return pulumi.get(self, "host_header_config") @property @pulumi.getter(name="httpHeaderConfig") def http_header_config(self) -> Optional['outputs.ListenerRuleHttpHeaderConfig']: return pulumi.get(self, "http_header_config") @property @pulumi.getter(name="httpRequestMethodConfig") def http_request_method_config(self) -> Optional['outputs.ListenerRuleHttpRequestMethodConfig']: return pulumi.get(self, "http_request_method_config") @property @pulumi.getter(name="pathPatternConfig") def path_pattern_config(self) -> Optional['outputs.ListenerRulePathPatternConfig']: return pulumi.get(self, "path_pattern_config") @property @pulumi.getter(name="queryStringConfig") def query_string_config(self) -> Optional['outputs.ListenerRuleQueryStringConfig']: return pulumi.get(self, "query_string_config") @property @pulumi.getter(name="sourceIpConfig") def source_ip_config(self) -> Optional['outputs.ListenerRuleSourceIpConfig']: return pulumi.get(self, "source_ip_config") @property @pulumi.getter def values(self) -> Optional[Sequence[str]]: return pulumi.get(self, "values") @pulumi.output_type class ListenerRuleSourceIpConfig(dict): def __init__(__self__, *, values: Optional[Sequence[str]] = None): if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter def values(self) -> Optional[Sequence[str]]: return pulumi.get(self, "values") @pulumi.output_type class ListenerRuleTargetGroupStickinessConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "durationSeconds": suggest = "duration_seconds" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRuleTargetGroupStickinessConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRuleTargetGroupStickinessConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRuleTargetGroupStickinessConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, duration_seconds: Optional[int] = None, enabled: Optional[bool] = None): if duration_seconds is not None: pulumi.set(__self__, "duration_seconds", duration_seconds) if enabled is not None: pulumi.set(__self__, "enabled", enabled) @property @pulumi.getter(name="durationSeconds") def duration_seconds(self) -> Optional[int]: return pulumi.get(self, "duration_seconds") @property @pulumi.getter def enabled(self) -> Optional[bool]: return pulumi.get(self, "enabled") @pulumi.output_type class ListenerRuleTargetGroupTuple(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "targetGroupArn": suggest = "target_group_arn" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerRuleTargetGroupTuple. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerRuleTargetGroupTuple.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerRuleTargetGroupTuple.__key_warning(key) return super().get(key, default) def __init__(__self__, *, target_group_arn: Optional[str] = None, weight: Optional[int] = None): if target_group_arn is not None: pulumi.set(__self__, "target_group_arn", target_group_arn) if weight is not None: pulumi.set(__self__, "weight", weight) @property @pulumi.getter(name="targetGroupArn") def target_group_arn(self) -> Optional[str]: return pulumi.get(self, "target_group_arn") @property @pulumi.getter def weight(self) -> Optional[int]: return pulumi.get(self, "weight") @pulumi.output_type class ListenerTargetGroupStickinessConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "durationSeconds": suggest = "duration_seconds" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerTargetGroupStickinessConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerTargetGroupStickinessConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerTargetGroupStickinessConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, duration_seconds: Optional[int] = None, enabled: Optional[bool] = None): if duration_seconds is not None: pulumi.set(__self__, "duration_seconds", duration_seconds) if enabled is not None: pulumi.set(__self__, "enabled", enabled) @property @pulumi.getter(name="durationSeconds") def duration_seconds(self) -> Optional[int]: return pulumi.get(self, "duration_seconds") @property @pulumi.getter def enabled(self) -> Optional[bool]: return pulumi.get(self, "enabled") @pulumi.output_type class ListenerTargetGroupTuple(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "targetGroupArn": suggest = "target_group_arn" if suggest: pulumi.log.warn(f"Key '{key}' not found in ListenerTargetGroupTuple. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ListenerTargetGroupTuple.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ListenerTargetGroupTuple.__key_warning(key) return super().get(key, default) def __init__(__self__, *, target_group_arn: Optional[str] = None, weight: Optional[int] = None): if target_group_arn is not None: pulumi.set(__self__, "target_group_arn", target_group_arn) if weight is not None: pulumi.set(__self__, "weight", weight) @property @pulumi.getter(name="targetGroupArn") def target_group_arn(self) -> Optional[str]: return pulumi.get(self, "target_group_arn") @property @pulumi.getter def weight(self) -> Optional[int]: return pulumi.get(self, "weight") @pulumi.output_type class LoadBalancerAttribute(dict): def __init__(__self__, *, key: Optional[str] = None, value: Optional[str] = None): if key is not None: pulumi.set(__self__, "key", key) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[str]: return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> Optional[str]: return pulumi.get(self, "value") @pulumi.output_type class LoadBalancerSubnetMapping(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "subnetId": suggest = "subnet_id" elif key == "allocationId": suggest = "allocation_id" elif key == "iPv6Address": suggest = "i_pv6_address" elif key == "privateIPv4Address": suggest = "private_i_pv4_address" if suggest: pulumi.log.warn(f"Key '{key}' not found in LoadBalancerSubnetMapping. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: LoadBalancerSubnetMapping.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: LoadBalancerSubnetMapping.__key_warning(key) return super().get(key, default) def __init__(__self__, *, subnet_id: str, allocation_id: Optional[str] = None, i_pv6_address: Optional[str] = None, private_i_pv4_address: Optional[str] = None): pulumi.set(__self__, "subnet_id", subnet_id) if allocation_id is not None: pulumi.set(__self__, "allocation_id", allocation_id) if i_pv6_address is not None: pulumi.set(__self__, "i_pv6_address", i_pv6_address) if private_i_pv4_address is not None: pulumi.set(__self__, "private_i_pv4_address", private_i_pv4_address) @property @pulumi.getter(name="subnetId") def subnet_id(self) -> str: return pulumi.get(self, "subnet_id") @property @pulumi.getter(name="allocationId") def allocation_id(self) -> Optional[str]: return pulumi.get(self, "allocation_id") @property @pulumi.getter(name="iPv6Address") def i_pv6_address(self) -> Optional[str]: return pulumi.get(self, "i_pv6_address") @property @pulumi.getter(name="privateIPv4Address") def private_i_pv4_address(self) -> Optional[str]: return pulumi.get(self, "private_i_pv4_address") @pulumi.output_type class LoadBalancerTag(dict): def __init__(__self__, *, key: str, value: str): pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: return pulumi.get(self, "value") @pulumi.output_type class TargetGroupAttribute(dict): def __init__(__self__, *, key: Optional[str] = None, value: Optional[str] = None): if key is not None: pulumi.set(__self__, "key", key) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[str]: return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> Optional[str]: return pulumi.get(self, "value") @pulumi.output_type class TargetGroupMatcher(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "grpcCode": suggest = "grpc_code" elif key == "httpCode": suggest = "http_code" if suggest: pulumi.log.warn(f"Key '{key}' not found in TargetGroupMatcher. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TargetGroupMatcher.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TargetGroupMatcher.__key_warning(key) return super().get(key, default) def __init__(__self__, *, grpc_code: Optional[str] = None, http_code: Optional[str] = None): if grpc_code is not None: pulumi.set(__self__, "grpc_code", grpc_code) if http_code is not None: pulumi.set(__self__, "http_code", http_code) @property @pulumi.getter(name="grpcCode") def grpc_code(self) -> Optional[str]: return pulumi.get(self, "grpc_code") @property @pulumi.getter(name="httpCode") def http_code(self) -> Optional[str]: return pulumi.get(self, "http_code") @pulumi.output_type class TargetGroupTag(dict): def __init__(__self__, *, key: str, value: str): pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: return pulumi.get(self, "value") @pulumi.output_type class TargetGroupTargetDescription(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "availabilityZone": suggest = "availability_zone" if suggest: pulumi.log.warn(f"Key '{key}' not found in TargetGroupTargetDescription. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TargetGroupTargetDescription.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TargetGroupTargetDescription.__key_warning(key) return super().get(key, default) def __init__(__self__, *, id: str, availability_zone: Optional[str] = None, port: Optional[int] = None): pulumi.set(__self__, "id", id) if availability_zone is not None: pulumi.set(__self__, "availability_zone", availability_zone) if port is not None: pulumi.set(__self__, "port", port) @property @pulumi.getter def id(self) -> str: return pulumi.get(self, "id") @property @pulumi.getter(name="availabilityZone") def availability_zone(self) -> Optional[str]: return pulumi.get(self, "availability_zone") @property @pulumi.getter def port(self) -> Optional[int]: return pulumi.get(self, "port")
36.116364
159
0.65044
6,266
59,592
5.857804
0.034791
0.027462
0.042855
0.062635
0.83302
0.814412
0.800654
0.751342
0.741915
0.737774
0
0.000422
0.244815
59,592
1,649
160
36.138266
0.81519
0.002702
0
0.810117
1
0.016862
0.204813
0.09257
0
0
0
0
0
1
0.16349
false
0
0.004399
0.08871
0.314516
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
971109a2af40ebe4d09693183a658f563772f76a
4,652
py
Python
animate_gp.py
mzwiessele/notebook_playground
14938eddf5491eb0356fa4f73d16047202131e4d
[ "BSD-2-Clause" ]
null
null
null
animate_gp.py
mzwiessele/notebook_playground
14938eddf5491eb0356fa4f73d16047202131e4d
[ "BSD-2-Clause" ]
null
null
null
animate_gp.py
mzwiessele/notebook_playground
14938eddf5491eb0356fa4f73d16047202131e4d
[ "BSD-2-Clause" ]
null
null
null
#=============================================================================== # Copyright (c) 2018, Max Zwiessele # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of animate_gp nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #=============================================================================== import numpy as np import GPy from matplotlib import pyplot as plt from matplotlib import animation def exp_map_sphere(mu, E): theta = np.sqrt((E ** 2).sum(0))[None, :] M = mu * np.sin(theta) M = M + (E * (np.cos(theta)) / theta) M[:, np.abs(theta[0]) <= 1e-7] = mu return M def exp_map(mu, E): theta = np.sqrt((E ** 2).sum(0))[None] M = mu * np.sin(theta) M = M + (E * (np.cos(theta)) / theta) M[:, np.abs(theta[0]) <= 1e-7] = mu return M def animation_matrix(N, n): u = np.random.normal(0, 1, size=(N, 1)) r = np.sqrt((u ** 2).sum()) u /= r t = np.random.normal(0, 1, size=(N, 1)) t = t - (t.T.dot(u)).dot(u.T).T t /= np.sqrt((t ** 2).sum()) # start = np.random.uniform(0,2*np.pi) # T = np.linspace(start, start+2*np.pi, n)[None, :] * t return r * exp_map_sphere(u, np.linspace(0.001, 2 * np.pi, n)[None] * t) def get_percs(mu, K): s = np.random.multivariate_normal(mu.squeeze(), K, size=(50000)).T return np.percentile(s, np.linspace(0.01, 99.99, 75), overwrite_input=True, axis=1) def create_empty_ax(): fig, ax = plt.subplots(figsize=(4.2 * (16 / 9), 4.20)) ax.set_frame_on(False) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) return fig, ax def plot_data(ax, X, Y): return ax.scatter(X, Y, marker='x', color='k') def fill_grad(ax, X, mu, K): from GPy.plotting.matplot_dep.plot_definitions import MatplotlibPlots mat_plot = MatplotlibPlots() mat_plot.fill_gradient(ax, X[:, 0], get_percs(mu, K), color='#687C8E', linewidth=0, alpha=1.) def animate_kernel(fig, ax, X, mu, K, out, frames=200): colors = ['#f7fbff', '#deebf7', '#c6dbef', "#9ecae1", "#6baed6", "#4292c6", '#2171b5', '#08519c', '#08306b', ] L = GPy.util.linalg.pdinv(K + np.eye(K.shape[0]) * 1e-8)[1] lines = [ax.plot([], [], lw=.8, color=c)[0] for c in colors] Rs = [animation_matrix(X.shape[0], frames) for _ in lines] def init(): for line in lines: line.set_data([], []) return lines def animate(i): for animatrix, line in zip(Rs, lines): # print y[:,i].shape, x.shape line.set_data(X[:, 0], mu[:,[0]] + L.dot(animatrix[:, [i]])) return lines anim = animation.FuncAnimation(fig, animate, init_func=init, frames=frames, interval=20, blit=False, repeat=True, save_count=frames, ) writer = animation.FFMpegFileWriter( fps=30, codec='libx264', extra_args=[ '-pix_fmt', 'yuva420p', ], ) anim.save( out, writer=writer, dpi=150, savefig_kwargs={'transparent': False, 'facecolor': 'white'}, ) return anim
36.34375
97
0.593508
649
4,652
4.201849
0.417565
0.003667
0.0033
0.016868
0.151815
0.134213
0.126146
0.126146
0.110011
0.110011
0
0.031945
0.246346
4,652
127
98
36.629921
0.745864
0.377902
0
0.126582
0
0
0.041929
0
0
0
0
0
0
1
0.126582
false
0
0.063291
0.012658
0.303797
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
971117541cde3e1903caaf3ca3390e530dc8443b
615
py
Python
tests/testapp/models.py
marteinn/wagtail-automatic-move-redirects
2321b6037d00284d447e346faf7841061e33495e
[ "BSD-3-Clause" ]
13
2020-02-13T20:55:32.000Z
2021-12-11T21:20:20.000Z
tests/testapp/models.py
marteinn/wagtail-automatic-move-redirects
2321b6037d00284d447e346faf7841061e33495e
[ "BSD-3-Clause" ]
6
2020-05-19T21:06:20.000Z
2021-05-28T13:31:09.000Z
tests/testapp/models.py
marteinn/wagtail-automatic-move-redirects
2321b6037d00284d447e346faf7841061e33495e
[ "BSD-3-Clause" ]
4
2020-05-19T13:40:05.000Z
2021-03-03T21:36:48.000Z
from django.db import models from wagtail.admin.edit_handlers import FieldPanel from wagtail.core.fields import RichTextField from wagtail.core.models import Page class AutomaticRedirectsTestIndexPage(Page): subtitle = models.CharField(max_length=255, blank=True, default='') body = RichTextField(blank=True, default='') content_panels = Page.content_panels + [ FieldPanel('subtitle'), FieldPanel('body'), ] class AutomaticRedirectsTestPage(Page): body = RichTextField(blank=True, default='') content_panels = Page.content_panels + [ FieldPanel('body'), ]
25.625
71
0.720325
65
615
6.723077
0.430769
0.118993
0.10984
0.118993
0.334096
0.334096
0.334096
0.334096
0.334096
0.334096
0
0.005906
0.173984
615
23
72
26.73913
0.854331
0
0
0.375
0
0
0.026016
0
0
0
0
0
0
1
0
false
0
0.25
0
0.6875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
9711a13f8bc8673b475c4605a1bf57d9fc492541
1,190
py
Python
M4_Eval_SDK/Source/samples/python/console_window.py
analogdevicesinc/ApplicationsWaveTool
0c1f236dd0745caa3187841ee1a882f209ac3ebe
[ "Apache-2.0" ]
2
2019-03-11T15:24:51.000Z
2022-03-07T09:42:05.000Z
M4_Eval_SDK/Source/samples/python/console_window.py
analogdevicesinc/ApplicationsWaveTool
0c1f236dd0745caa3187841ee1a882f209ac3ebe
[ "Apache-2.0" ]
null
null
null
M4_Eval_SDK/Source/samples/python/console_window.py
analogdevicesinc/ApplicationsWaveTool
0c1f236dd0745caa3187841ee1a882f209ac3ebe
[ "Apache-2.0" ]
1
2021-03-16T08:26:05.000Z
2021-03-16T08:26:05.000Z
import socket, sys, time, colorama, os from threading import * colorama.init(autoreset=True) serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = "localhost" port = int(sys.argv[1]) try: serversocket.bind((host, port)) except Exception as e: import traceback print("An error occurred while trying to open socket {}.{}:\n".format(host, port, str(e))) traceback.print_exc() input("Hit any key to exit") sys.exit(-1) class client(Thread): def __init__(self, socket, address): Thread.__init__(self) self.sock = socket self.addr = address self.start() def run(self): while 1: try: data = self.sock.recv(1024).decode() except: input("The socket was broken! Hit any key to exit") os._exit(0) if not data: input("The client disconnected! Hit any key to exit") os._exit(0) print (data) serversocket.listen(5) print ("Console monitor started, listening on {}:{}".format(host, port)) while 1: clientsocket, address = serversocket.accept() client(clientsocket, address)
28.333333
94
0.610084
150
1,190
4.753333
0.5
0.033661
0.037868
0.046283
0.082749
0.061711
0.061711
0.061711
0
0
0
0.012717
0.273109
1,190
41
95
29.02439
0.811561
0
0
0.166667
0
0
0.177311
0
0
0
0
0
0
1
0.055556
false
0
0.083333
0
0.166667
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
97127268ae73df5b3be209c30b4a2132ec27b166
1,517
py
Python
src/day11.py
chao-mu/aoc2021
9cb0590a8de100f260a78f22e50d00d6acb13ae4
[ "CC0-1.0" ]
null
null
null
src/day11.py
chao-mu/aoc2021
9cb0590a8de100f260a78f22e50d00d6acb13ae4
[ "CC0-1.0" ]
null
null
null
src/day11.py
chao-mu/aoc2021
9cb0590a8de100f260a78f22e50d00d6acb13ae4
[ "CC0-1.0" ]
null
null
null
#!/usr/bin/env python3 from aoc2021.util import print_solutions, import_matrix from math import inf def part_1(dumbos): flashes = 0 for _ in range(100): for coord in dumbos: dumbos[coord] += 1 flashed = set() for coord, dumbo in dumbos.items(): if coord not in flashed and dumbo > 9: flash(dumbos, coord, flashed) flashes += len(flashed) return flashes def part_2(dumbos): flashes = 0 step = 0 while True: step += 1 for coord in dumbos: dumbos[coord] += 1 flashed = set() for coord, dumbo in dumbos.items(): if coord not in flashed and dumbo > 9: flash(dumbos, coord, flashed) if len(flashed) == len(dumbos): return step def flash(dumbos, coord, flashed): if coord not in dumbos: return if coord in flashed: return dumbos[coord] += 1 if dumbos[coord] <= 9: return dumbos[coord] = 0 flashed.add(coord) for offset in [(0, 1), (0, -1), (1, 0), (-1, 0), (-1, -1), (-1, 1), (1, 1), (1, -1)]: flash(dumbos, tuple(map(sum, zip(coord, offset))), flashed) def main(): print_solutions( ["resources/day11-test.txt", "resources/day11.txt"], import_matrix, part_1 ) print_solutions( ["resources/day11-test.txt", "resources/day11.txt"], import_matrix, part_2 ) if __name__ == "__main__": main()
20.780822
89
0.544496
194
1,517
4.159794
0.262887
0.109046
0.022305
0.024783
0.506815
0.463445
0.463445
0.453532
0.453532
0.453532
0
0.046397
0.332235
1,517
72
90
21.069444
0.750247
0.013843
0
0.470588
0
0
0.062876
0.032107
0
0
0
0
0
1
0.078431
false
0
0.078431
0
0.254902
0.058824
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0