text string | size int64 | token_count int64 |
|---|---|---|
from time import ctime
from time import sleep
from sakshat import SAKSHAT
from sakspins import SAKSPins as Pins
saks = SAKSHAT()
alarm = [2011]
def tact_event_handler(pin, status):
global alarm_run
if pin == Pins.TACT_RIGHT:
print("Stop timer")
alarm_run = False
try:
while True:
current_time = ctime()
current_time = current_time[11:13] + current_time[14:16]
print(current_time)
if int(current_time) in alarm:
saks.buzzer.beep(1)
sleep(2)
except KeyboardInterrupt:
print("End")
saks.ledrow.off()
saks.buzzer.off() | 636 | 243 |
import os
import time
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from ntopo.monitors import SimulationMonitor
from ntopo.physics import compute_elasticity_energies, compute_opt_energy, compute_volume_penalty
from ntopo.filter import apply_sensitivity_filter
from ntopo.utils import write_to_file, get_sample_generator, get_single_random_q_sample_generator, get_q_sample_generator, stratified_sampling
from ntopo.oc import compute_oc_multi_batch
from ntopo.render import save_densities_to_file
def get_train_disp_step(opt, problem, disp_model, density_model, disp_variables):
@tf.function
def _train_disp_step(samples):
with tf.GradientTape() as tape:
tape.watch(disp_variables)
internal_energy, force_loss = compute_elasticity_energies(
problem, disp_model, density_model, samples, training=True)
reg_loss = tf.keras.backend.sum(disp_model.losses)
loss = internal_energy + force_loss + reg_loss
dLdwx = tape.gradient(loss, disp_variables)
opt.apply_gradients(zip(dLdwx, disp_variables))
return loss, internal_energy, force_loss, reg_loss
return _train_disp_step
def run_simulation(problem, disp_model, train_disp_step, n_sim_iterations, sim_sample_generator, saving=False, save_path='.', save_postfix=''):
simulation_monitor = SimulationMonitor(n_sim_iterations)
progress_bar = tqdm(simulation_monitor, total=n_sim_iterations)
for disp_iter in progress_bar:
start_time = time.time()
input_samples = next(sim_sample_generator)
loss, internal_energy, force_loss, reg_loss = train_disp_step(input_samples)
simulation_monitor.monitor(loss)
end_time = time.time()
loss = loss.numpy().item()
internal_energy = internal_energy.numpy().item()
duration = end_time - start_time
reg_loss = reg_loss.numpy().item()
progress_bar.set_description(f'loss {loss:.3e} int. energy {internal_energy:.3e}, dur.: {duration:.3e}, reg loss {reg_loss:.3e}')
progress_bar.refresh()
if saving:
simulation_monitor.save_plot(save_path, '', save_postfix)
def get_train_density_step(opt, problem, disp_model, density_model, density_variables, vol_penalty_strength, target_volume_ratio):
sample_volume = problem.domain_volume
target_volume = problem.free_volume * target_volume_ratio
@tf.function
def _train_densities_step(sample_positions):
with tf.GradientTape() as tape:
tape.watch(density_variables)
energy, densities = compute_opt_energy(
problem, disp_model, density_model, sample_positions)
penalty = compute_volume_penalty(densities, sample_volume=sample_volume,
vol_penalty_strength=vol_penalty_strength, target_volume=target_volume)
reg_loss = tf.keras.backend.sum(density_model.losses)
loss = energy + penalty + reg_loss
dLdwx = tape.gradient(loss, density_variables)
opt.apply_gradients(zip(dLdwx, density_variables))
return loss, penalty, reg_loss
return _train_densities_step
@tf.function
def compute_sensitivities(problem, disp_model, density_model, sample_positions, use_oc, vol_penalty_strength, target_volume_ratio=None):
sample_volume = problem.domain_volume
target_volume = problem.free_volume * target_volume_ratio
with tf.GradientTape() as tape:
energy, densities = compute_opt_energy(
problem, disp_model, density_model, sample_positions)
if use_oc:
loss = energy
else:
penalty = compute_volume_penalty(densities, sample_volume=sample_volume,
vol_penalty_strength=vol_penalty_strength, target_volume=target_volume)
loss = energy + penalty
old_densities = densities
grads = tape.gradient(loss, densities)
return old_densities, grads
@tf.function
def compute_target_densities_gradient_descent(old_densities, sensitivities):
projected_sensitivities = [tf.math.maximum(0.0, tf.math.minimum(
1.0, old_densities[i] - sensitivities[i])) - old_densities[i] for i in range(len(old_densities))]
step_size = 0.05 / tf.math.reduce_mean([tf.math.reduce_mean(tf.math.abs(si))
for si in projected_sensitivities])
return [old_densities[i] - step_size * sensitivities[i] for i in range(len(old_densities))]
@tf.function
def optimize_densities_mse(opt, density_model, sample_positions, targets, density_variables):
with tf.GradientTape() as tape:
tape.watch(density_variables)
err = density_model(sample_positions, training=True) - targets
reg_loss = tf.keras.backend.sum(density_model.losses)
reconstruction_loss = tf.reduce_mean(err*err, keepdims=True)
loss = reconstruction_loss + reg_loss
dLdwrho = tape.gradient(loss, density_variables)
opt.apply_gradients(zip(dLdwrho, density_variables))
return loss, reconstruction_loss, reg_loss
def save_model_configs(disp_model, density_model, save_path):
write_to_file(disp_model.to_json(), os.path.join(
save_path, 'disp_model_config.json'))
write_to_file(density_model.to_json(), os.path.join(
save_path, 'density_model_config.json'))
def save_model_weights(disp_model, density_model, save_path, save_postfix):
disp_model.save_weights(os.path.join(
save_path, 'disp_model' + save_postfix))
density_model.save_weights(os.path.join(
save_path, 'density_model' + save_postfix))
def train_non_mmse(problem, disp_model, density_model, opt_disp, opt_density,
opt_sample_generator, sim_sample_generator,
vol_penalty_strength,
target_volume_ratio,
save_path,
save_interval,
n_opt_iterations,
n_sim_iterations
):
train_disp_step = get_train_disp_step(
opt_disp, problem, disp_model, density_model=density_model,
disp_variables=disp_model.trainable_variables)
train_density_step = get_train_density_step(
opt_density, problem, disp_model, density_model=density_model,
density_variables=density_model.trainable_variables,
vol_penalty_strength=vol_penalty_strength,
target_volume_ratio=target_volume_ratio)
save_model_configs(disp_model, density_model, save_path)
def save_state(save_postfix):
save_model_weights(disp_model, density_model, save_path, save_postfix)
problem.plot_densities(density_model, save_path, '', save_postfix)
iteration = 0
saving = True
save_postfix = f'-{iteration:06d}'
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix)
if saving:
problem.plot_displacement(disp_model, save_path, '', save_postfix)
save_state(save_postfix)
for iteration in range(1, n_opt_iterations + 1):
print('Optimization iteration ', iteration)
saving = (iteration % save_interval == 0)
save_postfix = f'-{iteration:06d}'
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix)
if saving:
problem.plot_displacement(disp_model, save_path, '', save_postfix)
sample_positions = next(opt_sample_generator)
train_density_step(sample_positions)
if saving:
save_state(save_postfix)
def train_mmse(problem, disp_model, density_model, opt_disp, opt_density,
opt_sample_generator, sim_sample_generator,
n_opt_samples,
vol_penalty_strength,
target_volume_ratio,
save_path,
filter,
filter_radius,
use_oc,
save_interval,
n_opt_iterations,
n_sim_iterations,
n_opt_batches,
oc_config):
density_variables = density_model.trainable_variables
train_disp_step = get_train_disp_step(
opt_disp, problem, disp_model, density_model=density_model, disp_variables=disp_model.trainable_variables)
save_model_configs(disp_model, density_model, save_path)
def save_state(save_postfix, target_densities=None):
save_model_weights(disp_model, density_model, save_path, save_postfix)
problem.plot_densities(density_model, save_path, '', save_postfix)
if target_densities is not None and problem.dim == 2:
save_densities_to_file(np.reshape(target_densities[0], (n_opt_samples[1], n_opt_samples[0])), filename=os.path.join(
save_path, 'density' + save_postfix + '-target0.png'))
iteration = 0
saving = True
save_postfix = f'-{iteration:06d}'
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=True, save_path=save_path, save_postfix=save_postfix)
if saving:
problem.plot_displacement(disp_model, save_path, '', save_postfix)
save_state(save_postfix)
for iteration in range(1, n_opt_iterations + 1):
print('Optimization iteration ', iteration)
saving = (iteration % save_interval == 0)
save_postfix = f'-{iteration:06d}'
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix)
if saving:
problem.plot_displacement(disp_model, save_path, '', save_postfix)
old_densities = []
sensitivities = []
sample_positions = []
for _ in range(n_opt_batches):
input_samples = next(opt_sample_generator)
old_di, sensitivities_i = compute_sensitivities(
problem, disp_model, density_model, input_samples, use_oc, vol_penalty_strength=vol_penalty_strength, target_volume_ratio=target_volume_ratio)
if filter == 'sensitivity':
sensitivities_i = apply_sensitivity_filter(
input_samples, old_di, sensitivities_i, n_samples=n_opt_samples, domain=problem.domain, dim=problem.dim, radius=filter_radius)
else:
assert filter in ('none', ), 'not supported filter'
old_densities.append(old_di)
sensitivities.append(sensitivities_i)
sample_positions.append(input_samples)
if use_oc:
target_densities = compute_oc_multi_batch(
old_densities=old_densities, sensitivities=sensitivities, sample_volume=problem.domain_volume, target_volume=problem.free_volume * target_volume_ratio,
max_move=oc_config['max_move'], damping_parameter=oc_config['damping_parameter'])
else:
target_densities = compute_target_densities_gradient_descent(
old_densities=old_densities, sensitivities=sensitivities)
progress_bar = tqdm(range(n_opt_batches))
for i in progress_bar:
loss, reconstruction_loss, reg_loss = optimize_densities_mse(
opt_density, density_model, sample_positions[i], target_densities[i], density_variables)
loss = loss.numpy().item()
reconstruction_loss = reconstruction_loss.numpy().item()
reg_loss = reg_loss.numpy().item()
progress_bar.set_description(f'loss {loss} rec. loss {reconstruction_loss} reg loss {reg_loss}')
progress_bar.refresh()
if saving:
save_state(save_postfix, target_densities)
def train_mmse_space(problem, disp_model, density_model, opt_disp, opt_density,
n_sim_samples, n_opt_samples,
opt_sample_generator,
vol_penalty_strength,
target_volume_ratio,
save_path,
filter,
filter_radius,
use_oc,
save_interval,
n_opt_iterations,
n_sim_iterations,
n_opt_batches,
n_q_samples,
oc_config):
density_variables = density_model.trainable_variables
train_disp_step = get_train_disp_step(
opt_disp, problem, disp_model, density_model=density_model, disp_variables=disp_model.trainable_variables)
save_model_configs(disp_model, density_model, save_path)
def save_state(save_postfix, target_densities=None):
disp_model.save_weights(os.path.join(
save_path, 'disp_model' + save_postfix))
density_model.save_weights(os.path.join(
save_path, 'density_model' + save_postfix))
problem.plot_densities(density_model, save_path, '', save_postfix)
iteration = 0
saving = True
save_postfix = f'-{iteration:06d}'
sim_sample_generator = get_single_random_q_sample_generator(problem.q_domain, problem.domain, n_sim_samples)
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=2*n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix)
if saving:
qs = stratified_sampling(problem.q_domain, n_cells=[
n_q_samples], n_points_per_cell=1, dtype=np.float32).flatten()
for q in qs:
save_postfix_q = f'-{iteration:06d}-q={q:.6f}'
print('q', q)
problem.plot_displacement(
disp_model, save_path, '', save_postfix_q, q=np.array([[q]]))
save_state(save_postfix)
for iteration in range(1, n_opt_iterations + 1):
print('Optimization iteration ', iteration)
saving = (iteration % save_interval == 0)
print('saving', saving)
target_samples_all_q = []
target_densities_all_q = []
qs = stratified_sampling(problem.q_domain, n_cells=[
n_q_samples], n_points_per_cell=1, dtype=np.float32).flatten()
for q in qs:
save_postfix_q = f'-{iteration:06d}-q={q:.6f}'
if problem.volume_ratio_q_idx != -1:
assert problem.volume_ratio_q_idx == 0
target_volume_ratio = q
old_densities = []
sensitivities = []
sample_positions_with_q = []
sim_sample_generator = get_q_sample_generator(
q, problem.domain, n_samples=n_sim_samples)
run_simulation(problem, disp_model, train_disp_step, n_sim_iterations=n_sim_iterations,
sim_sample_generator=sim_sample_generator, saving=saving, save_path=save_path, save_postfix=save_postfix_q)
if saving:
problem.plot_displacement(
disp_model, save_path, '', save_postfix_q, q=np.array([[q]]))
for _ in range(n_opt_batches):
input_samples = next(opt_sample_generator)
q_vec = np.ones((np.prod(n_opt_samples), 1), dtype=np.float32) * q
input_samples_with_q = np.concatenate(
(input_samples, q_vec), axis=1)
old_di, sensitivities_i = compute_sensitivities(
problem, disp_model, density_model, input_samples_with_q, use_oc, vol_penalty_strength=vol_penalty_strength, target_volume_ratio=target_volume_ratio)
if filter == 'sensitivity':
sensitivities_i = apply_sensitivity_filter(
input_samples, old_di, sensitivities_i, n_samples=n_opt_samples, domain=problem.domain, dim=problem.dim, radius=filter_radius)
else:
assert filter in ('none', ), 'not supported filter'
old_densities.append(old_di)
sensitivities.append(sensitivities_i)
sample_positions_with_q.append(input_samples_with_q)
if use_oc:
target_densities = compute_oc_multi_batch(
old_densities, sensitivities, sample_volume=problem.domain_volume, target_volume=problem.free_volume * target_volume_ratio,
max_move=oc_config['max_move'], damping_parameter=oc_config['damping_parameter'])
else:
target_densities = compute_target_densities_gradient_descent(
old_densities=old_densities, sensitivities=sensitivities)
target_samples_all_q.append(sample_positions_with_q)
target_densities_all_q.append(target_densities)
n_batch = len(target_samples_all_q) * len(target_samples_all_q[0])
n_samples_total = n_batch * np.prod(n_opt_samples)
target_samples_all_q = tf.reshape(
target_samples_all_q, [n_samples_total, problem.dim + problem.q_dim])
target_densities_all_q = tf.reshape(
target_densities_all_q, [n_samples_total, 1])
indices = np.arange(n_samples_total)
np.random.shuffle(indices)
n_per_batch = n_samples_total // n_batch
progress_bar = tqdm(range(n_batch))
for i in progress_bar:
batch_samples = tf.gather(target_samples_all_q, tf.constant(
indices[i*n_per_batch:(i+1)*n_per_batch]), axis=0)
batch_densities = tf.gather(target_densities_all_q, tf.constant(
indices[i*n_per_batch:(i+1)*n_per_batch]), axis=0)
loss, reconstruction_loss, reg_loss = optimize_densities_mse(
opt_density, density_model, batch_samples, batch_densities, density_variables)
loss = loss.numpy().item()
reconstruction_loss = reconstruction_loss.numpy().item()
reg_loss = reg_loss.numpy().item()
progress_bar.set_description(f'loss {loss} rec. loss {reconstruction_loss} reg loss {reg_loss}')
progress_bar.refresh()
if saving:
save_postfix = f'-{iteration:06d}'
save_state(save_postfix, target_densities)
| 18,429 | 5,787 |
from django.shortcuts import render,get_object_or_404
from django.http import HttpResponse,Http404,HttpResponseRedirect
from appointment.models import Patient,Appointment
from django.template import RequestContext,loader
from django.core.urlresolvers import reverse
from django.utils import timezone
from datetime import datetime,timedelta
import phonenumbers
from django.core.mail import EmailMessage
from django.views.generic import View
# Create your views here.
class Index(View):
def get(self,request):
dt=[]
now=timezone.now()
one_day=timedelta(days=1)
two_day=timedelta(days=2)
h=timedelta(hours=1)
while now.hour!=int(9): #start time
now=now-h
s=timedelta(seconds=1)
while now.second != int(0):
now=now-s
m=timedelta(minutes=1)
while now.minute!=int(0):
now=now-m
m=timedelta(minutes=10)
dt.append(now)
won=now
while won.hour!=int(13): #check 1 loop logic
won=won+m
dt.append(won)
dt.pop()
for x in xrange(len(dt)):
won=dt[x]+one_day
dt.append(won)
won=dt[x]+two_day
dt.append(won) #now dt filled with all possible appointments remove
#one's already booked i.e in database
a=Appointment.objects.all()
a=[x.appointment_datetime for x in a]
display_list=[str(x) for x in list(set(dt)-set(a))] #remove already booked appointments
display_list.sort()
for x in xrange(len(display_list)):
bugfix=list(display_list[x])
bugfix=bugfix[:19]
display_list[x]="".join([y for y in bugfix])
context={'display_list': display_list}
return render(request,'appointment/index.html',context)
class Form_handle(View):
def post(self,request):
"""create patient object check wether it is already in db
if it is don't store check wether the appointment is
within 15 days of the previous one if it is 'review'
else it is 'fresh'.Retrieve the particular patient
object from db create the appointment object and
point it to the patient
if patient ain't in db store patient and create
appointment pointing to that patient and store it"""
F=request.POST
try:
pp=Patient.objects.get(patient_name=F['name'],
patient_email=F['email']
)
try:
app=pp.appointment
except Appointment.DoesNotExist:
pass
comp=datetime.strptime(F['datetime'],'%Y-%m-%d %H:%M:%S')
if comp.day-app.appointment_datetime.day <= 15: #review
store_app=Appointment(
appointment_datetime=comp,
fresh_or_review=True,
appointment_problem=F['problem'])
store_app.save()
pp.appointment=store_app
pp.save()
mail_to_doctor=EmailMessage('appointment for %s'%pp.patient_name,
store_app.appointment_problem,
to=['spvijayal@gmail.com']
)
mail_to_doctor.send() #returns 1 on success or SMTP standard errors
mess='''Respected Sir/Madam,
Your review appointment is scheduled on %s'''%F['datetime']
mail_to_patient=EmailMessage('clinic\'s name',
mess,
to=['%s'%pp.patient_email]
)
mail_to_patient.send()
else:
store_app=Appointment(
appointment_datetime=comp,
appointment_problem=F['problem'])
store_app.save()
pp.appointment=store_app
pp.save()
mail_to_doctor=EmailMessage('appointment for %s'%pp.patient_name,
store_app.appointment_problem,
to=['spvijayal@gmail.com']
)
mail_to_doctor.send()
mess='''Respected Sir/Madam,
Your fresh appointment is scheduled on %s'''%F['datetime']
mail_to_patient=EmailMessage('clinic\'s name',
mess,
to=['%s'%pp.patient_email]
)
mail_to_patient.send()
return HttpResponseRedirect('results/')
except Patient.DoesNotExist:
try:
z=phonenumbers.parse(F['phonenum'],"IN")
except phonenumbers.NumberParseException:
cont={'error_message': ' Invalid Phone Number '}
return render(request,'appointment/index_l.html',cont)
if int(F['age']) >= 120 or int(F['age']) < 1:
con={'error_message': '%s is your age eh !! Nice try'%F['age']}
return render(request,'appointment/index_l.html',con)
if len(F['phonenum'][3:])!=10:
cont={'error_message': ' Invalid Phone Number '}
return render(request,'appointment/index_l.html',cont)
try:
u=(int(x) for x in F['phonenum'][1:])
for uu in u:
uu=type(uu)
except ValueError:
cont={'error_message': ' Invalid Phone Number '}
return render(request,'appointment/index_l.html',cont)
if not phonenumbers.is_possible_number(z):
cont={'error_message': ' Invalid Phone Number '}
return render(request,'appointment/index_l.html',cont)
if not phonenumbers.is_valid_number:
cont={'error_message': ' Invalid Phone Number '}
return render(request,'appointment/index_l.html',cont)
email_doms=['aol.com','comcast.net','facebook.com',
'gmail.com', 'hotmail.com','msn.com'
'outlook.com','yahoo.com','yahoo.co.in'
]
if str(F['email']).split('@')[0] == '':
err_mail={'error_message':' Invalid email address '}
return render(request,'appointment/index_l.html',err_mail)
if F['email'].split('@')[1] not in email_doms :
err_mail={'error_message':' No support for email by %s'%F['email'].split('@')[1]}
return render(request,'appointment/index_l.html',err_mail)
comp=datetime.strptime(F['datetime'],'%Y-%m-%d %H:%M:%S')
store_app=Appointment(
appointment_datetime=comp,
appointment_problem=F['problem'])
store_app.save()
p=Patient(appointment=store_app,
patient_name=F['name'],
patient_age=int(F['age']),
patient_sex=F['sex'],
patient_email=F['email'],
patient_phone=F['phonenum'])
p.save()
mail_to_doctor=EmailMessage('appointment for %s'%p.patient_name,
store_app.appointment_problem,
to=['spvijayal@gmail.com']
)
mail_to_doctor.send()
mess='''Respected Sir/Madam,
We are glad to offer our services,Kindly visit the clinic on %s'''%F['datetime']
mail_to_patient=EmailMessage('clinic\'s name',
mess,
to=['%s'%p.patient_email]
)
mail_to_patient.send()
return HttpResponseRedirect('results/')
class Results(View):
def get(self,request):
return render(request,'appointment/index_l.html')
| 8,019 | 2,247 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-12-01 14:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("msgs", "0053_faq")]
operations = [migrations.AlterField(model_name="outgoing", name="text", field=models.TextField(max_length=800))]
| 357 | 137 |
import unittest
from wikitables.table import Table
from testClasses import TestPage
with open ("tests/data/List_of_national_parks_of_the_United_States.html", "r") as html_page:
text = html_page.read().replace('\n', '')
class TestTableInit(unittest.TestCase):
def setUp(self):
self.page = TestPage(text)
def test_table_init(self):
tables = self.page.tables
self.assertEqual(len(tables), 1)
class TestTableMethods(unittest.TestCase):
def setUp(self):
self.page = TestPage(text)
self.table = self.page.tables[0]
def test_repr(self):
result = "Table in section 'National Parks'"
self.assertEqual(result, self.table.__repr__())
def test_section(self):
article = 'National Parks'
self.assertEqual(article, self.table._section())
def test_key_extraction(self):
# key is 'name' on first position
self.assertEqual(0, self.table.key)
def test_is_key(self):
self.assertTrue(self.table.is_key(0))
self.assertTrue(self.table.is_key('Name'))
self.assertFalse(self.table.is_key(1))
self.assertFalse(self.table.is_key('Photo'))
def test_get_item(self):
self.assertTrue('http://dbpedia.org/resource/Acadia_National_Park' in self.table.__getitem__(0))
self.assertTrue('http://dbpedia.org/resource/Acadia_National_Park' in self.table.__getitem__('Name'))
def test_skip(self):
self.assertFalse(self.table.skip())
def test_name_match(self):
values = ['located_in', 'Location']
match = self.table.name_match(values[0], values[1])
id_match = self.table.name_match(values[0], 2)
self.assertEqual(match, id_match)
self.assertTrue(match <= 1.0 and match >= 0.0)
def test_str_col_name(self):
self.assertEqual(self.table._str_column_name(0), self.table._str_column_name('Name'))
| 1,906 | 638 |
# -*- coding: UTF-8 -*-
#Othello.java style : single file program
import os
import time
from flask import Flask
from flask import jsonify
from flask import request
# Global variables section
_CREATE_DELAY = 2
PORT = 30026 # Affectation port a updater pour CloudFoundry
CONTROLLER_VERSION = "v1"
_CONTROLLER_NAME = "Asset Mgmt Controller"
_26E_URL = "/"+CONTROLLER_VERSION+"/26e"
_26E_ID = "/"+CONTROLLER_VERSION+"/26e/<id>"
_HELPER_RESPONSE = {
_CONTROLLER_NAME: CONTROLLER_VERSION,
"GET "+_26E_URL : {
"method": "GET",
"parameters": "",
"code retour": "200"
},
"GET "+_26E_ID : {
"method": "GET",
"parameters": "un identifiant de vips",
"code retour": "200"
},
"POST "+_26E_URL : {
"method": "POST",
"parameters": "json body like {}",
"code retour": "201"
},
"PATCH "+_26E_ID : {
"method": "PATCH",
"parameters": "json body like : {vipid : 'DESCRIPTION':'DESCRIPTION'}",
"code retour": "200"
},
"DELETE "+_26E_ID : {
"method": "DELETE",
"parameters": "un identifiant de vip",
"code retour": "200"
}
}
ramDic = {}
app = Flask(__name__)
@app.route('/')
def index():
return 'WORKING'
@app.route('/help')
def help():
return jsonify(_HELPER_RESPONSE)
@app.route(_26E_URL, methods=['GET'])
def list_assets():
#PEP 448
response = jsonify(*ramDic)
response.status_code = 200
return response
@app.route(_26E_ID, methods=['GET'])
def list_asset(id):
response = jsonify(ramDic.get(id))
response.status_code = 200
return response
@app.route(_26E_URL, methods=['POST'])
def create_assets():
body = request.get_json(force=True)
ramDic[str(len(ramDic))] = body
response = jsonify({'id':str(len(ramDic)-1)},{"obj":ramDic.get(str(len(ramDic)-1))})
response.status_code = 201
time.sleep(_CREATE_DELAY)
return response
@app.route(_26E_ID, methods=['PATCH'])
def patch_assets():
response = jsonify('NOT IMPLEMENTED YET')
response.status_code = 200
return response
@app.route(_26E_ID, methods=['DELETE'])
def delete_assets(id):
response = jsonify(ramDic.pop(id))
response.status_code = 200
return response
app.debug = True
app.run(host='0.0.0.0', port=PORT)
| 2,395 | 953 |
# http://flask.pocoo.org/docs/1.0/patterns/packages/
from setuptools import setup, find_packages
import versioneer
authors = [
('David Ojeda', 'david@dojeda.com'),
]
author_names = ', '.join(tup[0] for tup in authors)
author_emails = ', '.join(tup[1] for tup in authors)
setup(
name='quetzal',
packages=find_packages(exclude=['docs', 'migrations', 'tests']),
namespace_packages=['quetzal'],
include_package_data=True,
python_requires='>=3.6, ~=3.7',
install_requires=[
'Flask',
'werkzeug',
'Flask-Login',
'Flask-Principal',
'connexion',
'celery',
'kombu',
'Flask-Celery-Helper',
'SQLAlchemy',
'Flask-SQLAlchemy',
'Flask-Migrate',
'alembic',
'psycopg2-binary',
'sqlparse',
'requests',
'Click',
'syslog-rfc5424-formatter',
'apscheduler',
'gunicorn',
'google-cloud-storage',
],
author=author_names,
author_email=author_emails,
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Flask',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Database',
'Topic :: Scientific/Engineering',
'Topic :: System :: Archiving',
],
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
zip_safe=False,
)
| 1,489 | 512 |
#Exercício Python 102:
# Crie um programa que tenha uma função fatorial()
# que receba dois parâmetros:
# o primeiro que indique o número a calcular e
# outro chamado show, que será um valor lógico (opcional)
# indicando se será mostrado ou não na tela o processo de
# cálculo do fatorial.
def fatorial(n, show=False):
'''
-> calcula um fatorial de um numero
:para n: o numero para ser calculado
:para show: (opcional) mostrar ou não aconta (False/True)
:return: o valor do fatorial 'numero'
'''
fatorial = [i for i in range(1, 1+n)]
soma = 1
resposta = ''
for i in fatorial:
soma *= i
if show:
for i in fatorial:
resposta += f'{i} X '
return f'{resposta[:-2] }= {soma}' if resposta else soma
print(fatorial(5, True))
print(fatorial(9))
help(fatorial)
| 849 | 301 |
f = open("two_cities_ascii.txt", "r")#Άνοιγμα αρχείου για διάβασμα
text=f.read()#Εκχώρηση του περιεχόμενου του αρχείου στηνν μεταβλητή text ως string
f.close()#Κλείσιμο του αρχείου
L = []#Δημιουργία κενής λίστας με σκοπό να βάλουμε ΄όλες τις λέξεις χωρίς τα σύμβολα
"""Καθάρισμα του κειμένου από όλους τους χαρακτήρες που δεν είναι γράμματα"""
text=text.replace(',',' ')
text=text.replace('.',' ')
text=text.replace('!',' ')
text=text.replace('"',' ')
text=text.replace('#',' ')
text=text.replace('$',' ')
text=text.replace('%',' ')
text=text.replace('^',' ')
text=text.replace('&',' ')
text=text.replace('(',' ')
text=text.replace(')',' ')
text=text.replace('*',' ')
text=text.replace('+',' ')
text=text.replace('-',' ')
text=text.replace('/',' ')
text=text.replace('0',' ')
text=text.replace('1',' ')
text=text.replace('2',' ')
text=text.replace('3',' ')
text=text.replace('4',' ')
text=text.replace('5',' ')
text=text.replace('6',' ')
text=text.replace('7',' ')
text=text.replace('8',' ')
text=text.replace('9',' ')
text=text.replace(':',' ')
text=text.replace(';',' ')
text=text.replace('<',' ')
text=text.replace('=',' ')
text=text.replace('>',' ')
text=text.replace('?',' ')
text=text.replace('@',' ')
text=text.replace('[',' ')
text=text.replace('\ ',' ')
text=text.replace(']',' ')
text=text.replace('_',' ')
text=text.replace('}',' ')
text=text.replace('|',' ')
text=text.replace('{',' ')
text=text.replace('~',' ')
text=text.replace('`',' ')
L = text.split()#Διαχωρισμός των λέξεων με βάση τον κενό χαρακτήρα και εκχώρηση αυτών στην λίστα
#Έλεγχος και εμφάνιση των ζευγαριών των λέξεων της λίστας των οποίων το μήκος χαρακτήρων είναι στο σύνολο ακριβώς 20
i=0
while(i<len(L)-1):
removeitems=L[i]
j=i+1
while(j<len(L)-2):
n=len(L[i]+L[j])
if n==20:
print('Ζευγάρι λέξεων: ',L[i],'-',L[j])
if(i<j):
L.pop(i)
L.pop(j+1)
else:
L.pop(i)
L.pop(j)
j=j+1
i=i+1
#Δημιουργία λιστών για την τοποθέτηση των λέξεων που απέμειναν με βάση το μήκος τους
ls1=[]
ls2=[]
ls3=[]
ls4=[]
ls5=[]
ls6=[]
ls7=[]
ls8=[]
ls9=[]
ls10=[]
ls11=[]
ls12=[]
ls13=[]
ls14=[]
ls15=[]
ls16=[]
ls17=[]
ls18=[]
ls19=[]
for i in range(len(L)):
if len(L[i])==1:
ls1.append(L[i])
elif len(L[i])==2:
ls2.append(L[i])
elif len(L[i])==3:
ls3.append(L[i])
elif len(L[i])==4:
ls4.append(L[i])
elif len(L[i])==5:
ls5.append(L[i])
elif len(L[i])==6:
ls6.append(L[i])
elif len(L[i])==7:
ls7.append(L[i])
elif len(L[i])==8:
ls8.append(L[i])
elif len(L[i])==9:
ls9.append(L[i])
elif len(L[i])==10:
ls10.append(L[i])
elif len(L[i])==11:
ls11.append(L[i])
elif len(L[i])==12:
ls12.append(L[i])
elif len(L[i])==13:
ls13.append(L[i])
elif len(L[i])==14:
ls14.append(L[i])
elif len(L[i])==15:
ls15.append(L[i])
elif len(L[i])==16:
ls16.append(L[i])
elif len(L[i])==17:
ls17.append(L[i])
elif len(L[i])==18:
ls18.append(L[i])
elif len(L[i])==19:
ls19.append(L[i])
#Εμφάνιση των ζευγαριών που απέμειναν στην λίστα με βάση το μήκος τους
print("*******************Λέξεις που απομένουν*****************")
print(" 1",ls1,"\n","2",ls2,"\n","3",ls3,"\n","4",ls4,"\n","5",ls5,"\n","6",ls6,"\n","7",ls7,"\n","8",ls8,"\n","9",ls9,"\n","10",ls10,"\n","11",ls11,"\n","12",ls12,"\n","13",ls13,"\n","14",ls14,"\n","15",ls15,"\n","16",ls16,"\n","17",ls17,"\n","18",ls18,"\n","19",ls19)
| 3,732 | 2,114 |
# Generated by Django 3.0.1 on 2020-01-20 19:35
from django.db import migrations, models
import shop.models
class Migration(migrations.Migration):
dependencies = [
('shop', '0013_auto_20200120_1949'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(blank=True, default='http://placehold.it/700x400', null=True, upload_to=shop.models.get_image_path),
),
]
| 488 | 179 |
# Generated by Django 3.2.9 on 2021-12-01 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20211117_2144'),
]
operations = [
migrations.AlterField(
model_name='starred_concerts',
name='id',
field=models.BigIntegerField(primary_key=True, serialize=False),
),
]
| 416 | 151 |
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
from facebook_data_analysis.conversation_analysis.activity_graphs import save_graph
from facebook_data_analysis.global_vars import messages_cols
from facebook_data_analysis.tools.helpers import cached
from sklearn.manifold import MDS
from tqdm import tqdm
@cached("people_distance")
def get_people_distances(
messages_df, conversations_df, min_messages_to_appear=10
): # pylint: disable=too-many-locals
group_conversations = conversations_df.loc[conversations_df["n_participants"] > 2][
messages_cols.conv_id
]
messages_in_group_conversations = messages_df[
messages_df[messages_cols.conv_id].isin(group_conversations.values)
]
friends_in_group_conversations = messages_in_group_conversations.groupby(
messages_cols.sender
)[messages_cols.timestamp].count()
selected_friends = friends_in_group_conversations[
friends_in_group_conversations > min_messages_to_appear
].index.values
messages_in_group_conversations = messages_in_group_conversations[
messages_in_group_conversations[messages_cols.sender].isin(selected_friends)
]
participants_list_by_conversation = messages_in_group_conversations.groupby(
messages_cols.conv_id
)[messages_cols.sender].unique()
messages_by_person_by_conversation = messages_in_group_conversations.groupby(
[messages_cols.conv_id, messages_cols.sender]
)[messages_cols.timestamp].count()
total_messages_by_person = messages_in_group_conversations.groupby(
[messages_cols.sender]
)[messages_cols.timestamp].count()
print("Counting common conversation messages for all friends...")
co_occurrence = pd.DataFrame(0, index=selected_friends, columns=selected_friends)
for conversation in tqdm(participants_list_by_conversation.index):
participants = participants_list_by_conversation[conversation]
messages_by_person = messages_by_person_by_conversation[conversation]
for participant1 in range( # pylint: disable=consider-using-enumerate # TODO
len(participants)
):
for participant2 in range(participant1, len(participants)):
exchanged_messages = (
messages_by_person[participants[participant1]]
+ messages_by_person[participants[participant2]]
)
co_occurrence.loc[
participants[participant1], participants[participant2]
] += exchanged_messages
if participant1 != participant2:
co_occurrence.loc[
participants[participant2], participants[participant1]
] += exchanged_messages
print()
print("Adjusting for total number of messages...")
with tqdm(total=len(selected_friends) ** 2) as pbar:
for friend1 in selected_friends:
for friend2 in selected_friends:
co_occurrence.loc[friend1, friend2] = co_occurrence.loc[
friend1, friend2
] / (
total_messages_by_person[friend1]
+ total_messages_by_person[friend2]
)
pbar.update()
distance = 1 - co_occurrence
return distance
@cached("projection_coordinates")
def get_projection_coordinates(distance):
mds = MDS(n_components=2, verbose=1, n_jobs=-1, dissimilarity="precomputed")
coordinates = mds.fit_transform(distance.values)
return pd.DataFrame(coordinates, index=distance.index)
@save_graph("friends_graph")
def friends_plot(coordinates):
_, ax = plt.subplots()
coordinates.plot(0, 1, kind="scatter", ax=ax)
for k, v in coordinates.iterrows():
ax.annotate(k, v)
return ax
@save_graph("friends_network")
def friends_network(distance, threshold):
graph = nx.convert_matrix.from_pandas_adjacency(distance < threshold)
_, ax = plt.subplots()
nx.draw(graph, ax=ax)
return ax
| 4,048 | 1,212 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['TeamArgs', 'Team']
@pulumi.input_type
class TeamArgs:
def __init__(__self__, *,
default_member_role: pulumi.Input[str],
default_team: pulumi.Input[bool],
privacy: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
members_can_create_pipelines: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Team resource.
:param pulumi.Input[str] default_member_role: Default role to assign to a team member.
:param pulumi.Input[bool] default_team: Whether to assign this team to a user by default.
:param pulumi.Input[str] privacy: The privacy level to set the team too.
:param pulumi.Input[str] description: The description to assign to the team.
:param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create.
:param pulumi.Input[str] name: The name of the team.
"""
pulumi.set(__self__, "default_member_role", default_member_role)
pulumi.set(__self__, "default_team", default_team)
pulumi.set(__self__, "privacy", privacy)
if description is not None:
pulumi.set(__self__, "description", description)
if members_can_create_pipelines is not None:
pulumi.set(__self__, "members_can_create_pipelines", members_can_create_pipelines)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="defaultMemberRole")
def default_member_role(self) -> pulumi.Input[str]:
"""
Default role to assign to a team member.
"""
return pulumi.get(self, "default_member_role")
@default_member_role.setter
def default_member_role(self, value: pulumi.Input[str]):
pulumi.set(self, "default_member_role", value)
@property
@pulumi.getter(name="defaultTeam")
def default_team(self) -> pulumi.Input[bool]:
"""
Whether to assign this team to a user by default.
"""
return pulumi.get(self, "default_team")
@default_team.setter
def default_team(self, value: pulumi.Input[bool]):
pulumi.set(self, "default_team", value)
@property
@pulumi.getter
def privacy(self) -> pulumi.Input[str]:
"""
The privacy level to set the team too.
"""
return pulumi.get(self, "privacy")
@privacy.setter
def privacy(self, value: pulumi.Input[str]):
pulumi.set(self, "privacy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description to assign to the team.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="membersCanCreatePipelines")
def members_can_create_pipelines(self) -> Optional[pulumi.Input[bool]]:
"""
Whether team members can create.
"""
return pulumi.get(self, "members_can_create_pipelines")
@members_can_create_pipelines.setter
def members_can_create_pipelines(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "members_can_create_pipelines", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the team.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _TeamState:
def __init__(__self__, *,
default_member_role: Optional[pulumi.Input[str]] = None,
default_team: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
members_can_create_pipelines: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
privacy: Optional[pulumi.Input[str]] = None,
slug: Optional[pulumi.Input[str]] = None,
uuid: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Team resources.
:param pulumi.Input[str] default_member_role: Default role to assign to a team member.
:param pulumi.Input[bool] default_team: Whether to assign this team to a user by default.
:param pulumi.Input[str] description: The description to assign to the team.
:param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create.
:param pulumi.Input[str] name: The name of the team.
:param pulumi.Input[str] privacy: The privacy level to set the team too.
:param pulumi.Input[str] slug: The name of the team.
:param pulumi.Input[str] uuid: The UUID for the team.
"""
if default_member_role is not None:
pulumi.set(__self__, "default_member_role", default_member_role)
if default_team is not None:
pulumi.set(__self__, "default_team", default_team)
if description is not None:
pulumi.set(__self__, "description", description)
if members_can_create_pipelines is not None:
pulumi.set(__self__, "members_can_create_pipelines", members_can_create_pipelines)
if name is not None:
pulumi.set(__self__, "name", name)
if privacy is not None:
pulumi.set(__self__, "privacy", privacy)
if slug is not None:
pulumi.set(__self__, "slug", slug)
if uuid is not None:
pulumi.set(__self__, "uuid", uuid)
@property
@pulumi.getter(name="defaultMemberRole")
def default_member_role(self) -> Optional[pulumi.Input[str]]:
"""
Default role to assign to a team member.
"""
return pulumi.get(self, "default_member_role")
@default_member_role.setter
def default_member_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_member_role", value)
@property
@pulumi.getter(name="defaultTeam")
def default_team(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to assign this team to a user by default.
"""
return pulumi.get(self, "default_team")
@default_team.setter
def default_team(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "default_team", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description to assign to the team.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="membersCanCreatePipelines")
def members_can_create_pipelines(self) -> Optional[pulumi.Input[bool]]:
"""
Whether team members can create.
"""
return pulumi.get(self, "members_can_create_pipelines")
@members_can_create_pipelines.setter
def members_can_create_pipelines(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "members_can_create_pipelines", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the team.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def privacy(self) -> Optional[pulumi.Input[str]]:
"""
The privacy level to set the team too.
"""
return pulumi.get(self, "privacy")
@privacy.setter
def privacy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "privacy", value)
@property
@pulumi.getter
def slug(self) -> Optional[pulumi.Input[str]]:
"""
The name of the team.
"""
return pulumi.get(self, "slug")
@slug.setter
def slug(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "slug", value)
@property
@pulumi.getter
def uuid(self) -> Optional[pulumi.Input[str]]:
"""
The UUID for the team.
"""
return pulumi.get(self, "uuid")
@uuid.setter
def uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uuid", value)
class Team(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
default_member_role: Optional[pulumi.Input[str]] = None,
default_team: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
members_can_create_pipelines: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
privacy: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## # Resource: team
This resource allows you to create and manage teams.
Buildkite Documentation: https://buildkite.com/docs/pipelines/permissions
Note: You must first enable Teams on your organization.
## Example Usage
```python
import pulumi
import pulumi_buildkite as buildkite
team = buildkite.Team("team",
default_member_role="MEMBER",
default_team=True,
privacy="VISIBLE")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] default_member_role: Default role to assign to a team member.
:param pulumi.Input[bool] default_team: Whether to assign this team to a user by default.
:param pulumi.Input[str] description: The description to assign to the team.
:param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create.
:param pulumi.Input[str] name: The name of the team.
:param pulumi.Input[str] privacy: The privacy level to set the team too.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TeamArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # Resource: team
This resource allows you to create and manage teams.
Buildkite Documentation: https://buildkite.com/docs/pipelines/permissions
Note: You must first enable Teams on your organization.
## Example Usage
```python
import pulumi
import pulumi_buildkite as buildkite
team = buildkite.Team("team",
default_member_role="MEMBER",
default_team=True,
privacy="VISIBLE")
```
:param str resource_name: The name of the resource.
:param TeamArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TeamArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
default_member_role: Optional[pulumi.Input[str]] = None,
default_team: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
members_can_create_pipelines: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
privacy: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TeamArgs.__new__(TeamArgs)
if default_member_role is None and not opts.urn:
raise TypeError("Missing required property 'default_member_role'")
__props__.__dict__["default_member_role"] = default_member_role
if default_team is None and not opts.urn:
raise TypeError("Missing required property 'default_team'")
__props__.__dict__["default_team"] = default_team
__props__.__dict__["description"] = description
__props__.__dict__["members_can_create_pipelines"] = members_can_create_pipelines
__props__.__dict__["name"] = name
if privacy is None and not opts.urn:
raise TypeError("Missing required property 'privacy'")
__props__.__dict__["privacy"] = privacy
__props__.__dict__["slug"] = None
__props__.__dict__["uuid"] = None
super(Team, __self__).__init__(
'buildkite:index/team:Team',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
default_member_role: Optional[pulumi.Input[str]] = None,
default_team: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
members_can_create_pipelines: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
privacy: Optional[pulumi.Input[str]] = None,
slug: Optional[pulumi.Input[str]] = None,
uuid: Optional[pulumi.Input[str]] = None) -> 'Team':
"""
Get an existing Team resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] default_member_role: Default role to assign to a team member.
:param pulumi.Input[bool] default_team: Whether to assign this team to a user by default.
:param pulumi.Input[str] description: The description to assign to the team.
:param pulumi.Input[bool] members_can_create_pipelines: Whether team members can create.
:param pulumi.Input[str] name: The name of the team.
:param pulumi.Input[str] privacy: The privacy level to set the team too.
:param pulumi.Input[str] slug: The name of the team.
:param pulumi.Input[str] uuid: The UUID for the team.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TeamState.__new__(_TeamState)
__props__.__dict__["default_member_role"] = default_member_role
__props__.__dict__["default_team"] = default_team
__props__.__dict__["description"] = description
__props__.__dict__["members_can_create_pipelines"] = members_can_create_pipelines
__props__.__dict__["name"] = name
__props__.__dict__["privacy"] = privacy
__props__.__dict__["slug"] = slug
__props__.__dict__["uuid"] = uuid
return Team(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="defaultMemberRole")
def default_member_role(self) -> pulumi.Output[str]:
"""
Default role to assign to a team member.
"""
return pulumi.get(self, "default_member_role")
@property
@pulumi.getter(name="defaultTeam")
def default_team(self) -> pulumi.Output[bool]:
"""
Whether to assign this team to a user by default.
"""
return pulumi.get(self, "default_team")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description to assign to the team.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="membersCanCreatePipelines")
def members_can_create_pipelines(self) -> pulumi.Output[Optional[bool]]:
"""
Whether team members can create.
"""
return pulumi.get(self, "members_can_create_pipelines")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the team.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def privacy(self) -> pulumi.Output[str]:
"""
The privacy level to set the team too.
"""
return pulumi.get(self, "privacy")
@property
@pulumi.getter
def slug(self) -> pulumi.Output[str]:
"""
The name of the team.
"""
return pulumi.get(self, "slug")
@property
@pulumi.getter
def uuid(self) -> pulumi.Output[str]:
"""
The UUID for the team.
"""
return pulumi.get(self, "uuid")
| 18,405 | 5,412 |
import sample
print('what is your name')
print(sample.d) | 56 | 18 |
# Generated by Django 2.2 on 2022-01-08 21:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=1023)),
('overview', models.CharField(max_length=1023)),
('price', models.IntegerField()),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('language', models.CharField(max_length=255)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='profiles_api.Category')),
],
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=1023)),
('video_path', models.FileField(upload_to='uploads/<django.db.models.fields.related.ForeignKey>/videos/')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles_api.Course')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review', models.CharField(max_length=511)),
('rating', models.IntegerField()),
('created_at', models.DateField(auto_now_add=True)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles_api.Course')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Instructor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('image_path', models.CharField(max_length=255)),
('description', models.CharField(max_length=1023)),
('rating', models.IntegerField()),
('courses', models.ManyToManyField(to='profiles_api.Course')),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('cover', 'cover'), ('overview', 'overview')], max_length=255)),
('image_path', models.FileField(upload_to='uploads/<django.db.models.fields.related.ForeignKey>/images/')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles_api.Course')),
],
),
]
| 3,739 | 1,073 |
from core import (
config, db
)
class Repo():
def __init__(self, table):
self.table = table
def create_one(self, model):
return db.create_one(self.table, model)
def update_one(self, model):
return db.update_one(self.table, model)
def delete_one(self, id):
return db.delete_one(self.table, id)
def find_one(self, where=None):
return db.find_one(self.table, where=where)
def find_many(self, where=None, limit=None):
return db.find_many(self.table, where=where, limit=limit)
for module in config.modules:
globals()[module] = Repo(module) | 616 | 204 |
import os
import json
import gzip
import zipfile
# Helpers
def read_asset(*paths):
dirname = os.path.dirname(__file__)
with open(os.path.join(dirname, "assets", *paths)) as file:
return file.read().strip()
# General
UNDEFINED = object()
VERSION = read_asset("VERSION")
COMPRESSION_FORMATS = ["zip", "gz"]
INQUIRY_PROFILE = json.loads(read_asset("profiles", "inquiry.json"))
PIPELINE_PROFILE = json.loads(read_asset("profiles", "pipeline.json"))
REPORT_PROFILE = json.loads(read_asset("profiles", "report.json"))
STATUS_PROFILE = json.loads(read_asset("profiles", "status.json"))
SCHEMA_PROFILE = json.loads(read_asset("profiles", "schema", "general.json"))
RESOURCE_PROFILE = json.loads(read_asset("profiles", "resource", "general.json"))
TABULAR_RESOURCE_PROFILE = json.loads(read_asset("profiles", "resource", "tabular.json"))
PACKAGE_PROFILE = json.loads(read_asset("profiles", "package", "general.json"))
FISCAL_PACKAGE_PROFILE = json.loads(read_asset("profiles", "package", "fiscal.json"))
TABULAR_PACKAGE_PROFILE = json.loads(read_asset("profiles", "package", "tabular.json"))
GEOJSON_PROFILE = json.loads(read_asset("profiles", "geojson", "general.json"))
TOPOJSON_PROFILE = json.loads(read_asset("profiles", "geojson", "topojson.json"))
# Defaults
DEFAULT_SCHEME = "file"
DEFAULT_FORMAT = "csv"
DEFAULT_HASHING = "md5"
DEFAULT_ENCODING = "utf-8"
DEFAULT_INNERPATH = ""
DEFAULT_COMPRESSION = ""
DEFAULT_HEADER = True
DEFAULT_HEADER_ROWS = [1]
DEFAULT_HEADER_JOIN = " "
DEFAULT_HEADER_CASE = True
DEFAULT_FLOAT_NUMBERS = False
DEFAULT_MISSING_VALUES = [""]
DEFAULT_LIMIT_ERRORS = 1000
DEFAULT_LIMIT_MEMORY = 1000
DEFAULT_BUFFER_SIZE = 10000
DEFAULT_SAMPLE_SIZE = 100
DEFAULT_ENCODING_CONFIDENCE = 0.5
DEFAULT_FIELD_CONFIDENCE = 0.9
DEFAULT_PACKAGE_PROFILE = "data-package"
DEFAULT_RESOURCE_PROFILE = "data-resource"
DEFAULT_TABULAR_RESOURCE_PROFILE = "tabular-data-resource"
DEFAULT_TRUE_VALUES = ["true", "True", "TRUE", "1"]
DEFAULT_FALSE_VALUES = ["false", "False", "FALSE", "0"]
DEFAULT_DATETIME_PATTERN = "%Y-%m-%dT%H:%M:%S%z"
DEFAULT_DATE_PATTERN = "%Y-%m-%d"
DEFAULT_TIME_PATTERN = "%H:%M:%S%z"
DEFAULT_BARE_NUMBER = True
DEFAULT_FLOAT_NUMBER = False
DEFAULT_GROUP_CHAR = ""
DEFAULT_DECIMAL_CHAR = "."
DEFAULT_SERVER_PORT = 8000
DEFAULT_CANDIDATES = [
{"type": "yearmonth"},
{"type": "geopoint"},
{"type": "duration"},
{"type": "geojson"},
{"type": "object"},
{"type": "array"},
{"type": "datetime"},
{"type": "time"},
{"type": "date"},
{"type": "integer"},
{"type": "number"},
{"type": "boolean"},
{"type": "year"},
{"type": "string"},
]
# Backports
# It can be removed after dropping support for Python 3.6 and Python 3.7
COMPRESSION_EXCEPTIONS = (
(zipfile.BadZipFile, gzip.BadGzipFile)
if hasattr(gzip, "BadGzipFile")
else (zipfile.BadZipFile)
)
| 2,863 | 1,092 |
from pathlib import Path
import re
import sys
from uuid import uuid4
import polib
from antx import transfer
from botok import Text
class Po:
def __init__(self):
self.transfer = Transfer()
self.file = polib.POFile()
self.file.metadata = {
'MIME-Version': '1.0',
'Content-Type': 'text/plain; charset=utf-8',
'Content-Transfer-Encoding': '8bit',
}
def _create_entry(self, msgid, msgstr="", msgctxt=None, comment=None, tcomment=None):
"""
:param msgid: string, the entry msgid.
:param msgstr: string, the entry msgstr.
:param msgctxt: string, the entry context.
:param comment: string, the entry comment.
:param tcomment: string, the entry translator comment.
"""
entry = polib.POEntry(
msgid=msgid,
msgstr=msgstr,
msgctxt=msgctxt,
comment=comment,
tcomment=tcomment
)
self.file.append(entry)
def write_to_file(self, filename):
self.file.save(filename)
def lines_to_entries(self, dump, po_file):
lines = self.transfer.generate_entries(dump, po_file)
for num, l in enumerate(lines):
line, ctxt = l
no_notes = self.remove_peydurma_notes(line)
if no_notes == "":
no_notes, line = line, no_notes
no_notes = re.sub('\[.+?\]', '', no_notes)
# segment
t = Text(no_notes)
no_notes = t.tokenize_words_raw_text
# format tokens
no_notes = re.sub('([^།་_]) ([^_།་])', '\g<1>␣\g<2>', no_notes) # affixed particles
no_notes = re.sub('_', ' ', no_notes) # spaces
self._create_entry(msgid=no_notes, msgctxt=ctxt, tcomment=line)
def txt_to_po(self, filename):
lines = filename.read_text(encoding='utf-8')
outfile = filename.parent / (filename.stem + ".po")
self.lines_to_entries(lines, outfile)
self.write_to_file(outfile)
@staticmethod
def remove_pagination(line):
note = re.split(r'(\[.*?\])', line)
if len(note) > 1:
return ''.join([a for a in note if not a.startswith('\[')])
else:
return ""
@staticmethod
def remove_peydurma_notes(line):
note = re.split(r'(<.*?>)', line)
if len(note) > 1:
return ''.join([a for a in note if not a.startswith('<')]).replace(':', '')
else:
return ""
class Transfer:
"""
limitation : in case a line is split on two lines in the updated .txt, it will keep
the same uuid on the second line and only add a new uuid on the first line.
"""
def __init__(self):
self.transfer = transfer
def generate_entries(self, dump, po_file):
if po_file.is_file():
dump = self.extract_entries(dump, po_file)
updated = self.add_missing_uuids(dump)
entries = []
for line in updated.strip().split('\n'):
line = line.strip()
line = self.remove_extra_uuid(line)
txt, ctxt = line[:-1].split('—')
entries.append([txt, ctxt])
return entries
def extract_entries(self, dump, po_file):
po_file = polib.pofile(po_file)
po_entries = []
for p in po_file:
if p.tcomment:
line = p.tcomment
else:
line = p.msgid.replace(' ', '').replace(' ', ' ')
po_entries.append([line, p.msgctxt])
po_dump = '\n'.join([''.join((a, f'—{b}—')) for a, b in po_entries])
pattern = [['uuid', '(—.+?—)']]
transfered = self.transfer(po_dump, pattern, dump, 'txt')
return transfered
def add_missing_uuids(self, dump):
lines = dump.strip().split('\n')
for num, l in enumerate(lines):
l = l.strip()
if not l.endswith('—'):
lines[num] = l + f'—{self.get_unique_id()}—'
return '\n'.join(lines)
@staticmethod
def remove_extra_uuid(line):
if line.count('—') > 2:
idx1 = line.find('—')
idx2 = line.find('—', idx1+1)
return (line[:idx1] + line[idx2+1:]).strip()
else:
return line
def get_unique_id(self):
return uuid4().hex
if __name__ == '__main__':
folder = 'literal/tibetan'
if len(sys.argv) > 1:
stem = sys.argv[1]
file = Path(folder) / (stem + '.txt')
print(file)
po = Po()
po.txt_to_po(file)
else:
files = sorted(list(Path(folder).glob('*.txt')))
for file in files:
print(file)
po = Po()
po.txt_to_po(file)
| 4,747 | 1,541 |
import gluon_zoo as gz
import mxnet as mx
from mxnet import ndarray as nd
from mxnet import gluon
import tvm
from tvm.contrib import graph_runtime
import nnvm
import pickle
import sym_pass as spass
import dataset as ds
import sym_calib as calib
import sim_quant_helper as sim
import ops_generator as opg
import utils
import mrt as _mrt
def load_fname(suffix=None, with_ext=False):
suffix = "."+suffix if suffix is not None else ""
prefix = "./data/trec%s" % (suffix)
return utils.extend_fname(prefix, with_ext=with_ext)
batch_size = 16
ctx = mx.gpu()
inputs_ext = { 'data': {
'shape': (38, batch_size)
}}
inputs = [mx.sym.var(n) for n in inputs_ext]
utils.log_init()
data_iter = ds.load_trec(batch_size)
def data_iter_func():
return next(data_iter)
data, label = data_iter_func()
sym_file, param_file = load_fname()
net1 = utils.load_model(sym_file, param_file, inputs, ctx=ctx)
def trec(data):
res = net1(data.as_in_context(ctx))
return res
sym, params = mx.sym.load(sym_file), nd.load(param_file)
sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
if True:
mrt = _mrt.MRT(sym, params, inputs_ext)
mrt.set_data('data', data)
mrt.calibrate(ctx=ctx)
mrt.set_input_prec('data', 16)
mrt.set_fixed('data')
mrt.set_output_prec(8)
qsym, qparams, inputs_ext = mrt.quantize()
else:
inputs_ext['data']['data'] = data
th_dict = calib.sym_calibrate(sym, params, inputs_ext, ctx=ctx)
qsym, qparams, _ = calib.pure_int8_quantize(sym, params, inputs_ext, th_dict)
net2 = gluon.nn.SymbolBlock(qsym, inputs)
utils.load_parameters(net2, qparams, ctx=ctx)
def quantize(data):
data = sim.load_real_data(data, 'data', inputs_ext)
res = net2(data.as_in_context(ctx))
return res
quant_sym, quant_params, quant_ext = load_fname("sym.quantize", with_ext=True)
open(quant_sym, "w").write(qsym.tojson())
if False:
inputs_ext['data']['shape'] = (38, 1)
data = data[:, 0].reshape(38, 1)
_mrt.std_dump(qsym, qparams, inputs_ext, data, "trec",
batch=True, data_dtype="int32", max_num=1000,
dump_ops=["sentimentnet0_embedding0_fwd"])
opg.dump_file("take",
["/data/std_out/trec/sentimentnet0_embedding0_fwd_0.mrt.dump.in.npy",
"/data/std_out/trec/sentimentnet0_embedding0_fwd_1.mrt.dump.in.npy"],
["/data/std_out/trec/sentimentnet0_embedding0_fwd_0.mrt.dump.out.npy"],
"/data/std_out/trec/sentimentnet0_embedding0_fwd.attr")
exit()
if False:
while True:
data, _ = next(data_iter)
data = sim.load_real_data(data, 'data', inputs_ext)
inputs_ext['data']['data'] = data
spass.sym_dump_ops(qsym, qparams, inputs_ext,
ctx=mx.gpu(3))
exit()
utils.multi_eval_accuracy(trec, data_iter_func,
quantize,
iter_num=1000)
| 2,845 | 1,136 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011, Marine Biological Laboratory
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import os
from stat import * # ST_SIZE etc
import sys
import shutil
import types
import time
import random
import logging
import csv, json
import configparser as ConfigParser
import fastalibAV as fastalib
import datetime
today = str(datetime.date.today())
import subprocess
import pymysql as MySQLdb
"""
"""
# class FastaReader:
# def __init__(self,file_name=None):
# self.file_name = file_name
# self.h = open(self.file_name, 'rb')
# #self.h = open(self.file_name)
# self.seq = ''
# self.id = None
#
# def next(self):
# def read_id():
# #return self.h.readline().decode('utf-8').strip()[1:]
# #print(self.h.readline())
# return self.h.readline().strip()[1:]
#
# def read_seq():
# #ret = bytearray(b'')
# ret = ''
# #ret = ''
# while True:
# line = self.h.readline()
# print(str(line))
# while len(line) and not len(line.strip()):
# # found empty line(s)
#
# line = self.h.readline()
# print(str(line))
# if not len(line):
# # EOF
# break
#
# if str(line).startswith('>'):
# # found new defline: move back to the start
# self.h.seek(-len(line), os.SEEK_CUR)
# break
#
# else:
# ret += str(line).strip()
#
# return ret
#
# self.id = read_id()
# self.seq = read_seq()
#
# if self.id:
# return True
#
def get_data(args):
pass
def parse_matrix(args):
print('running matrix')
n = 0
dirty_datasets = {}
clean_datasets = {}
project_count = 0
max_ds_count = 0
with open(args.file, mode='r') as infile:
for line in infile:
items = line.strip().split('\t')
#print('items',items)
if not line or items[0][:5] == 'VAMPS':
print('found vamps')
continue
if n==0:
ds_items = items[1:] #line.strip('\n').split('\t')[1:] # stip original line on '\n' only to retain first '\t' ip present
#print('ds_items',ds_items)
for ds in ds_items:
dirty_datasets[ds] = 0
else:
line_items = items #line.strip().split('\t')
#print('line_items',line_items)
counts = line_items[1:]
for i,cnt in enumerate(counts):
#print(i,cnt)
if cnt == '' or not cnt:
cnt = 0
print('MISSING COUNT - Setting to zero (line:'+str(n+1)+';col:'+str(i+2)+')')
try:
cnt = int(cnt)
except:
cnt = 0
print('NON-INTEGER COUNT - Setting to zero (line:'+str(n+1)+';col:'+str(i+2)+')')
project_count += int(cnt)
dirty_datasets[ds_items[i]] += int(cnt)
tax = line_items[0]
n+=1
for ds in dirty_datasets:
if not ds or ds == '':
print('REMOVING EMPTY Dataset and data')
else:
clean_datasets[ds] = dirty_datasets[ds]
print('clean datasets',clean_datasets.keys())
for ds in clean_datasets:
if clean_datasets[ds] > max_ds_count:
max_ds_count = clean_datasets[ds]
return(clean_datasets, project_count, max_ds_count)
def find_dataset_name(args, id):
"""
This should be the same fxn as in demultiplex
"""
# adjust to your specific defline
if args.verbose:
print(id)
if args.separator == 'space':
sampleName_items = id.split()[0].split('_')
else:
sampleName_items = id.split(args.separator)[0].split('_')
test = sampleName_items[-1]
try:
int(test)
sampleName = '_'.join(sampleName_items[:-1])
if args.verbose:
print('INT',sampleName_items[-1])
except:
sampleName = '_'.join(sampleName_items)
if args.verbose:
print('NO INT',sampleName_items[-1])
return sampleName
def parse_fasta(args):
print('running fasta')
f = fastalib.SequenceSource(args.file)
#f = FastaReader(args.file)
datasets={}
project_count = 0
max_ds_count = 0
# sample f.id:
# 308_1|M01028:283:000000000-CPKFG:1:1101:16551:1784 1:N:0:15|o:99|m/o:0.030303|MR:n=0;r1=3;r2=0|Q30:p=93;p=98|CO:0|mismatches:3
# PC.354_3 FLP3FBN01EEWKD orig_bc=AGCACGAGCCTA new_bc=AGCACGAGCCTA bc_diffs=0
# dataset1 FLP3FBN01EEWKD orig_bc=AGCACGAGCCTA new_bc=AGCACGAGCCTA bc_diffs=0
while f.next():
#print(f.seq)
#print(f.id)
project_count += 1
if args.separator == 'space':
defline_pts = f.id.split() # splits on white space
else:
defline_pts = f.id.split(args.separator) # splits on white space
dataset = find_dataset_name(args, f.id)
seq_id = defline_pts[1]
if args.verbose:
print(dataset)
print(seq_id)
sys.exit()
if dataset in datasets:
datasets[dataset] += 1
else:
datasets[dataset] = 1
# max_ds_count; number_of_ds; total_seqs;
for ds in datasets:
if datasets[ds] > max_ds_count:
max_ds_count = datasets[ds]
#print(datasets)
return(datasets, project_count, max_ds_count)
def write_config(args, datasets, project_count, max_ds_count):
ini_file = os.path.join(args.project_dir,'INFO.config')
print( 'Writing INFO.config file:',ini_file )
f = open(ini_file, 'w')
f.write('[MAIN]'+"\n")
f.write('project_name='+args.project+"\n")
f.write("total_seq_count="+str(project_count)+"\n")
f.write('owner='+args.owner+"\n")
f.write("max_dataset_count="+str(max_ds_count)+"\n")
f.write('public=0'+"\n")
f.write('project_dir='+args.project_dir+"\n")
f.write('type='+args.type+"\n")
f.write('number_of_datasets='+str(len(datasets))+"\n")
f.write("\n")
f.write('[MAIN.dataset]'+"\n")
for ds in datasets:
f.write(ds+'='+str(datasets[ds])+"\n")
f.close()
if __name__ == '__main__':
import argparse
myusage = """usage: 5-vamps-clean-db.py [options]
where
-pid/--project_id clean this pid only
-p/--project_name clean this name only
-site/--site vamps, vampsdev or localhost
-all/--all Remove ALL Data for fresh install
Be Careful -- will remove ALL data from db
"""
parser = argparse.ArgumentParser(description="" ,usage=myusage)
parser.add_argument("-f","--file",
required=True, action="store", dest = "file",
help="""ProjectID""")
parser.add_argument("-p", "--project",
required=True, action='store', dest = "project",
help=" ")
parser.add_argument("-d", "--project_dir",
required=True, action='store', dest = "project_dir",
help=" ")
# parser.add_argument("-host", "--host",
# required=True, action='store', dest = "host", default='localhost',
# help=" ")
parser.add_argument("-t", "--type",
required=True, action='store', dest = "type",
help=" ")
parser.add_argument("-u", "--user",
required=True, action='store', dest = "owner",
help=" ")
parser.add_argument("-sep", "--separator",
required=False, action='store', dest = "separator", default='space',
help=" ")
parser.add_argument("-v", "--verbose",
required=False, action="store_true", dest = "verbose", default=False,
help="""JSON Files Directory""")
# parser.add_argument("-data_dir", "--data_dir",
# required=True, action='store', dest = "data_dir", default='user_data',
# help=" config.USER_FILES_BASE ")
args = parser.parse_args()
# if args.host == 'vamps':
# #db_host = 'vampsdb'
# db_host = 'bpcweb8'
# args.NODE_DATABASE = 'vamps2'
# db_home = '/groups/vampsweb/vamps/'
# elif args.host == 'vampsdev':
# #db_host = 'vampsdev'
# db_host = 'bpcweb7'
# args.NODE_DATABASE = 'vamps2'
# db_home = '/groups/vampsweb/vampsdev/'
# else:
# db_host = 'localhost'
# db_home = '~/'
# args.NODE_DATABASE = 'vamps_development'
#
# args.obj = MySQLdb.connect( host=db_host, db=args.NODE_DATABASE, read_default_file=os.path.expanduser("~/.my.cnf_node") )
#
# #db = MySQLdb.connect(host="localhost", # your host, usually localhost
# # read_default_file="~/.my.cnf" )
# args.cur = args.obj.cursor()
#(args.proj, args.pid, args.dids, args.dsets) = get_data(args)
if args.type == 'fasta':
(datasets, project_count, max_ds_count) = parse_fasta(args)
elif args.type == 'matrix':
(datasets, project_count, max_ds_count) = parse_matrix(args)
write_config(args, datasets, project_count, max_ds_count)
print('Finished')
| 9,856 | 3,270 |
import os
import os.path
def get_location_for_code():
# :releative location
access_area = '/home/joseph/Desktop'
# access_area = os.getcwd()
return access_area
def select_case():
case = 4
return case
home_address = get_location_for_code()
case = select_case()
os.walk(home_address).next()[0] # returns 'C:\dir1\dir2\startdir'
os.walk(home_address).next()[1] # returns all the dirs in 'C:\dir1\dir2\startdir'
os.walk(home_address).next()[2]
| 468 | 176 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `cliently` package."""
import unittest
from cliently import client
import os
# TODO: This hard coding needs to be fixed with a sandbox when doing things for realsies
ACCESS_TOKEN=os.getenv("FEEDLY_ACCESS_TOKEN")
UID = os.getenv("FEEDLY_UID")
BASE_URL = "https://cloud.feedly.com"
VALID_STREAM_ID = os.getenv("FEEDLY_STREAM_ID")
VALID_JOURNAL = os.getenv("FEEDLY_TEST_JOURNAL")
VALID_ENTRY_IDS = os.getenv("FEEDLY_ENTRY_IDS")
class TestCliently(unittest.TestCase):
"""Tests for `cliently` package
"""
def setUp(self):
"""Set up test fixtures, if any."""
options = dict(access_token=ACCESS_TOKEN, base_url=BASE_URL)
self.myclient = client.FeedlyClient(**options)
def tearDown(self):
"""Tear down test fixtures, if any."""
pass
def test_set_access_token(self):
"""Set the access token"""
self.assertEqual(self.myclient.access_token, ACCESS_TOKEN)
def test_set_base_url(self):
"""Set the base_url"""
self.assertEqual(self.myclient.base_url, BASE_URL)
def test_get_user_subscriptions(self):
"""Get a valid streamID & Journal name from list of subscriptions"""
subscriptions = self.myclient.get_user_subscriptions()
streamIDs = set(i["categories"][0]["id"] for i in subscriptions)
subscribed_journals = set(i["title"] for i in subscriptions)
self.assertIn(VALID_JOURNAL, subscribed_journals)
self.assertIn(VALID_STREAM_ID, streamIDs)
def test_count_get_entry_ids(self):
"""Test getting entry IDs work"""
# test that count returns the correct number of things
self.assertEqual(len(self.myclient.get_entry_ids(VALID_STREAM_ID, maxcount=1)["ids"]), 1)
def test_continuation_entry_ids(self):
"""Test continuation works"""
# test that count returns the correct number of things
continuation = self.myclient.get_entry_ids(VALID_STREAM_ID, maxcount=1)["continuation"]
self.assertEqual(len(self.myclient.get_entry_ids(VALID_STREAM_ID, continuation=continuation, maxcount=1)["ids"]), 1)
def test_mark_read(self):
self.assertTrue(self.myclient.mark_article_read(VALID_ENTRY_IDS).ok)
def test_get_single_entry_from_id(self):
"""Retrieve a single entry from an entryId"""
self.assertEqual(self.myclient.get_entries_from_ids(VALID_ENTRY_IDS[0])[0]["id"], VALID_ENTRY_IDS[0])
def test_get_multiple_entries_from_ids(self):
"""Retrieve multiple entries from an entryId"""
res = [i["id"] for i in self.myclient.get_entries_from_ids(VALID_ENTRY_IDS)]
self.assertEqual(res, VALID_ENTRY_IDS) | 2,755 | 945 |
from .theExceptions import ValidationError
class Validator(object):
"""
All validators must inherit from this class
"""
def __init__(self, *args, **kwrags):
pass
def validate(self, value):
"""
The only function that a validator must implement.
Must return True if erevything went well or a ValidationError otherwise
"""
raise NotImplemented("Should be implemented in child")
def __str__(self):
"""
This function should be redifined in child to give a quick overview of the validator
"""
return self.__class__.__name__
class NotNull(Validator):
"""
Checks that the Field has a non null value
"""
def validate(self, value, zero=True, empty_string=True):
if value is None or (value == 0 is zero) or (value == "" and empty_string):
raise ValidationError("Field can't have a null value: '%s'" % value)
return True
class Email(Validator):
"""
Checks if the field contains an emailaddress
"""
def validate(self, value):
import re
pattern = '^[A-z0-9._-]+@[A-z0-9.-]+\.[A-z]{2,4}$'
if re.match(pattern, value) is None:
raise ValidationError("The email address: %s is invalid" % value)
return True
class Numeric(Validator):
"""
checks if the value is numerical
"""
def validate(self, value):
try:
float(value)
except:
raise ValidationError("%s is not valid numerical value" % value)
return True
class Int(Validator):
"""
The value must be an integer
"""
def validate(self, value):
if not isinstance(value, int):
raise ValidationError("%s is not a valid integer" % value)
return True
class Bool(Validator):
"""
The value must be a boolean
"""
def validate(self, value):
if not isinstance(value, bool):
raise ValidationError("%s is not a valid boolean" % value)
return True
class String(Validator):
"""
The value must be a string or unicode
"""
def validate(self, value):
if not isinstance(value, str) and not isinstance(value, unicode):
raise ValidationError("%s is not a valid string" % value)
return True
class Enumeration(Validator):
"""
The value must be in the allowed ones
"""
def __init__(self, allowed):
self.allowed = set(allowed)
def validate(self, value):
if value not in self.allowed:
raise ValidationError("%s is not among the allowed values %s" % (value, self.allowed))
return True
class Range(Validator):
"""
The value must une [lower, upper] range
"""
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def validate(self, value):
if value < self.lower or value > self.upper:
raise ValidationError("%s is not in [%s, %s]" % (value, self.lower, self.upper))
def __str__(self):
return "%s[%s, %s]" % (self.__class__.__name__, self.minLen, self.maxLen)
class Length(Validator):
"""
Validates that the value length is between given bounds
"""
def __init__(self, min_length, max_length):
self.minlength = min_length
self.maxlength = max_length
def validate(self, value):
try:
length = len(value)
except:
raise ValidationError("Field '%s' of type '%s' has no length" % (value, type(value)))
if self.min_length <= len(value) and len(value) <= self.max_length:
return True
raise ValidationError("Field must have a length in ['%s';'%s'] got: '%s'" % (self.min_length, self.max_length, len(value)))
def __str__(self):
return "%s[%s, %s]" % (self.__class__.__name__, self.min_length, self.max_length)
| 3,907 | 1,115 |
#!/usr/bin/env python
import sys
import tensorflow as tf
from image_utils import create_raw_image_loader
from model import OpenNsfwModel, InputType
class YahooNSFWClassifier:
def __init__(self, weights_path):
self.session = tf.Session()
self.model = OpenNsfwModel()
self.model.build(weights_path=weights_path)
self.session.run(tf.global_variables_initializer())
self.fn_load_image = create_raw_image_loader()
def classify(self, image):
image = self.fn_load_image(image)
predictions = self.session.run(self.model.predictions, feed_dict={self.model.input: image})
return predictions
if __name__ == "__main__":
from PIL import Image
classifier = YahooNSFWClassifier("data/open_nsfw-weights.npy")
print("NSFW score: %f" % classifier.classify(Image.open(sys.argv[1]))[0][1])
| 859 | 279 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2017-2020 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Subsytem handling localization-related data communication
"""
import collections
import logging
import struct
from cflib.crtp.crtpstack import CRTPPacket
from cflib.crtp.crtpstack import CRTPPort
from cflib.utils.callbacks import Caller
__author__ = 'Bitcraze AB'
__all__ = ['Localization', 'LocalizationPacket']
logger = logging.getLogger(__name__)
# A generic location packet contains type and data. When received the data
# may be decoded by the lib.
LocalizationPacket = collections.namedtuple('localizationPacket',
['type', 'raw_data', 'data'])
class Localization():
"""
Handle localization-related data communication with the Crazyflie
"""
# Implemented channels
POSITION_CH = 0
GENERIC_CH = 1
# Location message types for generig channel
RANGE_STREAM_REPORT = 0
RANGE_STREAM_REPORT_FP16 = 1
LPS_SHORT_LPP_PACKET = 2
EMERGENCY_STOP = 3
EMERGENCY_STOP_WATCHDOG = 4
COMM_GNSS_NMEA = 6
COMM_GNSS_PROPRIETARY = 7
EXT_POSE = 8
EXT_POSE_PACKED = 9
def __init__(self, crazyflie=None):
"""
Initialize the Extpos object.
"""
self._cf = crazyflie
self.receivedLocationPacket = Caller()
self._cf.add_port_callback(CRTPPort.LOCALIZATION, self._incoming)
def _incoming(self, packet):
"""
Callback for data received from the copter.
"""
if len(packet.data) < 1:
logger.warning('Localization packet received with incorrect' +
'length (length is {})'.format(len(packet.data)))
return
pk_type = struct.unpack('<B', packet.data[:1])[0]
data = packet.data[1:]
# Decoding the known packet types
# TODO: more generic decoding scheme?
decoded_data = None
if pk_type == self.RANGE_STREAM_REPORT:
if len(data) % 5 != 0:
logger.error('Wrong range stream report data lenght')
return
decoded_data = {}
raw_data = data
for i in range(int(len(data) / 5)):
anchor_id, distance = struct.unpack('<Bf', raw_data[:5])
decoded_data[anchor_id] = distance
raw_data = raw_data[5:]
pk = LocalizationPacket(pk_type, data, decoded_data)
self.receivedLocationPacket.call(pk)
def send_extpos(self, pos):
"""
Send the current Crazyflie X, Y, Z position. This is going to be
forwarded to the Crazyflie's position estimator.
"""
pk = CRTPPacket()
pk.port = CRTPPort.LOCALIZATION
pk.channel = self.POSITION_CH
pk.data = struct.pack('<fff', pos[0], pos[1], pos[2])
self._cf.send_packet(pk)
def send_extpose(self, pos, quat):
"""
Send the current Crazyflie pose (position [x, y, z] and
attitude quaternion [qx, qy, qz, qw]). This is going to be forwarded
to the Crazyflie's position estimator.
"""
pk = CRTPPacket()
pk.port = CRTPPort.LOCALIZATION
pk.channel = self.GENERIC_CH
pk.data = struct.pack('<Bfffffff',
self.EXT_POSE,
pos[0], pos[1], pos[2],
quat[0], quat[1], quat[2], quat[3])
self._cf.send_packet(pk)
def send_short_lpp_packet(self, dest_id, data):
"""
Send ultra-wide-band LPP packet to dest_id
"""
pk = CRTPPacket()
pk.port = CRTPPort.LOCALIZATION
pk.channel = self.GENERIC_CH
pk.data = struct.pack('<BB', self.LPS_SHORT_LPP_PACKET, dest_id) + data
self._cf.send_packet(pk)
| 4,823 | 1,637 |
class BadCallError(ValueError):
def __init__(self, message=None, documentation_web=None):
if message is None:
message = ('Wrong way of invoking this method. Check the online documentation for'
'more information.')
if documentation_web is not None:
message = message[:-1] + ': {}'.format(documentation_web)
super().__init__(message)
class NotImplementedError(NotImplementedError):
def __init__(self, message=None, issues_web=None):
if message is None:
if issues_web is not None:
message = ('It has not been implemeted yet. Write a new issue in'
'{} asking for it.'.format(issues_web))
super().__init__(message)
class LibraryNotFound(NotImplementedError):
def __init__(self, library):
message = 'The python library {} was not found.'.format(library)
super().__init__(message)
class InputArgumentError(NotImplementedError):
def __init__(self, argument, method, documentation_web=None):
message = ('Invalid value for input argument "{}" in method or class "{}".'
'Check the online documentation for more information.'.format(argument, method))
if documentation_web is not None:
message = message[:-1] + ': {}'.format(documentation_web)
super().__init__(message)
| 1,396 | 369 |
# import the necessary packages
import cv2
import sys
import os
import numpy as np
import argparse
sys.path.append(os.path.abspath('./modules/'))
import detection
import helpers
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--debug", type=bool, default=False,
help="experiment with different image filters")
ap.add_argument("-i", "--imagepath", type=str,
default = None,
help="path to test image")
args = ap.parse_args()
if(args.imagepath is None):
image_path = helpers.get_random_test_image()
else:
image_path = args.imagepath
image = cv2.imread(image_path,1)
ball = detection.GolfBallDetection(image)
if ball:
(x,y,w,h) = ball
x_plus_w = x+w
y_plus_h = y+h
section = image.copy()[y:y+h, x:x+w]
cv2.imshow("Crop to ball",section)
# generating the kernels
kernel1 = np.array([[0, -1, -1], # kernel for embossing bottom left side
[1, 0, -1],
[1, 1, 0]])
# you can generate kernels for embossing top as well
gray = cv2.cvtColor(section, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
if(args.debug):
cv2.imshow("Gray",gray)
if(args.debug):
embossdepth = np.ones((h, w), np.uint8) * 128
embossed = cv2.add(cv2.filter2D(gray, -1, kernel1),embossdepth) # emboss on bottom left side
cv2.imshow("Embossed",embossed)
if(args.debug):
_, binary = cv2.threshold(cv2.cvtColor(section, cv2.COLOR_BGR2GRAY), 20, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
binary = 255 - binary
cv2.imshow("Binary",binary)
if(args.debug):
edges = cv2.Canny(binary,100,200)
cv2.imshow("Edges",edges)
if(args.debug):
embossed_edges = cv2.Canny(embossed,100,200)
cv2.imshow("Embossed Edges",embossed_edges)
contours,heirarchy = cv2.findContours(embossed_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
im2 = cv2.cvtColor(section, cv2.COLOR_BGR2GRAY)
dimples = []
for contour in contours:
approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)
area = cv2.contourArea(contour)
if ((len(approx) > 8) & (area > 30) ):
dimples.append(contour)
cv2.drawContours(im2, dimples, -1, (0,255,0), 3)
cv2.imshow("Contours",im2)
circle = detection.get_ball_circle(image,x,y,w,h)
if circle is not None:
detection.draw_circle_around_ball(image,circle,(x,y))
else:
detection.draw_boundaries_and_label(image,(x,y),(w,h),(0,255,0),'Ball')
else:
print('[INFO] Failed to find ball')
cv2.imshow("Detecting features",image)
cv2.waitKey(0)
cv2.destroyAllWindows() | 2,757 | 1,072 |
from __future__ import division, with_statement, absolute_import
__copyright__ = "Copyright (C) 2012-16 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six
import numpy as np
from pytools import ImmutableRecord, memoize_method
from loopy.diagnostic import LoopyError
import logging
logger = logging.getLogger(__name__)
from pytools.persistent_dict import PersistentDict
from loopy.tools import LoopyKeyBuilder
from loopy.version import DATA_MODEL_VERSION
# {{{ object array argument packing
class _PackingInfo(ImmutableRecord):
"""
.. attribute:: name
.. attribute:: sep_shape
.. attribute:: subscripts_and_names
A list of type ``[(index, unpacked_name), ...]``.
"""
class SeparateArrayPackingController(object):
"""For argument arrays with axes tagged to be implemented as separate
arrays, this class provides preprocessing of the incoming arguments so that
all sub-arrays may be passed in one object array (under the original,
un-split argument name) and are unpacked into separate arrays before being
passed to the kernel.
It also repacks outgoing arrays of this type back into an object array.
"""
def __init__(self, kernel):
# map from arg name
self.packing_info = {}
from loopy.kernel.array import ArrayBase
for arg in kernel.args:
if not isinstance(arg, ArrayBase):
continue
if arg.shape is None or arg.dim_tags is None:
continue
subscripts_and_names = arg.subscripts_and_names()
if subscripts_and_names is None:
continue
self.packing_info[arg.name] = _PackingInfo(
name=arg.name,
sep_shape=arg.sep_shape(),
subscripts_and_names=subscripts_and_names,
is_written=arg.name in kernel.get_written_variables())
def unpack(self, kernel_kwargs):
if not self.packing_info:
return kernel_kwargs
kernel_kwargs = kernel_kwargs.copy()
for packing_info in six.itervalues(self.packing_info):
arg_name = packing_info.name
if packing_info.name in kernel_kwargs:
arg = kernel_kwargs[arg_name]
for index, unpacked_name in packing_info.subscripts_and_names:
assert unpacked_name not in kernel_kwargs
kernel_kwargs[unpacked_name] = arg[index]
del kernel_kwargs[arg_name]
return kernel_kwargs
def pack(self, outputs):
if not self.packing_info:
return outputs
for packing_info in six.itervalues(self.packing_info):
if not packing_info.is_written:
continue
result = outputs[packing_info.name] = \
np.zeros(packing_info.sep_shape, dtype=np.object)
for index, unpacked_name in packing_info.subscripts_and_names:
result[index] = outputs.pop(unpacked_name)
return outputs
# }}}
# {{{ KernelExecutorBase
typed_and_scheduled_cache = PersistentDict(
"loopy-typed-and-scheduled-cache-v1-"+DATA_MODEL_VERSION,
key_builder=LoopyKeyBuilder())
class KernelExecutorBase(object):
"""An object connecting a kernel to a :class:`pyopencl.Context`
for execution.
.. automethod:: __init__
.. automethod:: __call__
"""
def __init__(self, kernel):
"""
:arg kernel: a loopy.LoopKernel
"""
self.kernel = kernel
self.packing_controller = SeparateArrayPackingController(kernel)
self.output_names = tuple(arg.name for arg in self.kernel.args
if arg.name in self.kernel.get_written_variables())
self.has_runtime_typed_args = any(
arg.dtype is None
for arg in kernel.args)
def get_typed_and_scheduled_kernel_uncached(self, arg_to_dtype_set):
from loopy.kernel.tools import add_dtypes
kernel = self.kernel
if arg_to_dtype_set:
var_to_dtype = {}
for var, dtype in arg_to_dtype_set:
try:
dest_name = kernel.impl_arg_to_arg[var].name
except KeyError:
dest_name = var
try:
var_to_dtype[dest_name] = dtype
except KeyError:
raise LoopyError("cannot set type for '%s': "
"no known variable/argument with that name"
% var)
kernel = add_dtypes(kernel, var_to_dtype)
from loopy.type_inference import infer_unknown_types
kernel = infer_unknown_types(kernel, expect_completion=True)
if kernel.schedule is None:
from loopy.preprocess import preprocess_kernel
kernel = preprocess_kernel(kernel)
from loopy.schedule import get_one_scheduled_kernel
kernel = get_one_scheduled_kernel(kernel)
return kernel
@memoize_method
def get_typed_and_scheduled_kernel(self, arg_to_dtype_set):
from loopy import CACHING_ENABLED
cache_key = (type(self).__name__, self.kernel, arg_to_dtype_set)
if CACHING_ENABLED:
try:
return typed_and_scheduled_cache[cache_key]
except KeyError:
pass
logger.debug("%s: typed-and-scheduled cache miss" % self.kernel.name)
kernel = self.get_typed_and_scheduled_kernel_uncached(arg_to_dtype_set)
if CACHING_ENABLED:
typed_and_scheduled_cache[cache_key] = kernel
return kernel
def arg_to_dtype_set(self, kwargs):
if not self.has_runtime_typed_args:
return None
from loopy.types import NumpyType
target = self.kernel.target
impl_arg_to_arg = self.kernel.impl_arg_to_arg
arg_to_dtype = {}
for arg_name, val in six.iteritems(kwargs):
arg = impl_arg_to_arg.get(arg_name, None)
if arg is None:
# offsets, strides and such
continue
if arg.dtype is None and val is not None:
try:
dtype = val.dtype
except AttributeError:
pass
else:
arg_to_dtype[arg_name] = NumpyType(dtype, target)
return frozenset(six.iteritems(arg_to_dtype))
# }}}
# vim: foldmethod=marker
| 7,538 | 2,210 |
#!/usr/bin/python
import rebound
from rebound.interruptible_pool import InterruptiblePool
# Import other modules
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
from matplotlib.colors import LogNorm
# Runs one simulation.
def simulation(par):
saturn_a, saturn_e = par
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.integrator_whfast_safe_mode = 0
sim.dt = 5.
# These parameters are only approximately those of Jupiter and Saturn.
sim.add(m=1.)
sim.add(m=0.000954, a=5.204, anom=0.600, omega=0.257, e=0.048)
sim.add(m=0.000285, a=saturn_a, anom=0.871, omega=1.616, e=saturn_e)
sim.move_to_com()
sim.init_megno()
sim.integrate(5e2*2.*np.pi) # integrator for 500 years
return [sim.calculate_megno(),1./(sim.calculate_lyapunov()*2.*np.pi)] # returns MEGNO and Lypunov timescale in years
def updatePlot(first=False):
# This constructs a 2d array.
# The current implementation is slow, but simple.
keys = np.array(resd.keys())
x1 = np.unique(keys.T[0])
x2 = np.unique(keys.T[1])
res = np.empty((len(x2),len(x1),2))
for i,_x1 in enumerate(x1):
for j,_x2 in enumerate(x2):
res[j][i] = resd[(_x1,_x2)]
# Clip arrays
megno = np.clip(res[:,:,0],1.8,4.)
lyaptimescale = np.clip(np.absolute(res[:,:,1]),1e1,4e3)
# Plot MEGNO
im1 = axarr[0].imshow(megno, vmin=1.8, vmax=4., aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent)
# Plot Lyapunov timescale
im2 = axarr[1].imshow(lyaptimescale, vmin=1e1, vmax=4e3, norm=LogNorm(), aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn", extent=extent)
if first:
cb1 = plt.colorbar(im1, ax=axarr[0])
cb1.solids.set_rasterized(True)
cb1.set_label("MEGNO $\\langle Y \\rangle$")
cb2 = plt.colorbar(im2, ax=axarr[1])
cb2.solids.set_rasterized(True)
cb2.set_label("Lyapunov timescale [years]")
plt.draw()
pool = InterruptiblePool() # Number of threads default to the number of CPUs on the system
def runSim(p):
print("Running %d simulations." % len(p))
res = np.nan_to_num(np.array(pool.map(simulation,p)))
for i,r in enumerate(res):
resd[p[i]] = r
# Setup grid and run many simulations in parallel
a = np.array([7.,10.]) # range of saturn semi-major axis in AU
e = np.array([0.,0.5]) # range of saturn eccentricity
# Setup plots
f, axarr = plt.subplots(2,figsize=(10,8))
extent = [a.min(), a.max(), e.min(), e.max()]
for ax in axarr:
ax.set_xlim(extent[0],extent[1])
ax.set_ylim(extent[2],extent[3])
ax.set_xlabel("$a_{\mathrm{Saturn}}$ [AU]")
ax.set_ylabel("$e_{\mathrm{Saturn}}$")
# Results are stored in this dictionary
resd = {}
# Initial parameters (2x2 grid)
parameters = [(_a, _e) for _a in a for _e in e]
# Run and plot first simulations
runSim(parameters)
updatePlot(first=True)
# Eight levels of refinement
for i in xrange(8):
_a = np.linspace((a[0]+a[1])/2.,a[-1],len(a))[:-1]
a = np.sort(np.concatenate((a,_a)))
parameters = [(__a, _e) for __a in _a for _e in e]
runSim(parameters)
updatePlot()
_e = np.linspace((e[0]+e[1])/2.,e[-1],len(e))[:-1]
e = np.sort(np.concatenate((e,_e)))
parameters = [(_a, __e) for _a in a for __e in _e]
runSim(parameters)
updatePlot()
raw_input('Press enter...')
| 3,419 | 1,406 |
num = int(input('Digite um número de 0 a 9999:\n'))
u = num // 1 % 10
d = num // 10 % 10
c = num // 100 % 100
m = num // 1000 % 1000
print(f'Analisando o número {num}')
print(f'Unidade:{u}')
print(f'Dezena:{d}')
print(f'Centena:{c}')
print(f'Milhar:{m}')
| 257 | 138 |
import random
nums = [str(n)+"\n" for n in range(1, 51)]
random.shuffle(nums)
print(nums)
nums.pop()
with open("missing_number.txt", "w") as f:
f.writelines(nums)
| 167 | 74 |
import pandas as pd
import numpy as np
import sys
import argparse
def f1(predictions, gold):
"""
F1 (a.k.a. DICE) operating on two lists of offsets (e.g., character).
>>> assert f1([0, 1, 4, 5], [0, 1, 6]) == 0.5714285714285714
:param predictions: a list of predicted offsets
:param gold: a list of offsets serving as the ground truth
:return: a score between 0 and 1
"""
if len(gold) == 0:
return 1. if len(predictions) == 0 else 0.
if len(predictions) == 0:
return 0.
predictions_set = set(predictions)
gold_set = set(gold)
nom = 2 * len(predictions_set.intersection(gold_set))
denom = len(predictions_set) + len(gold_set)
return float(nom)/float(denom)
def evaluate(pred_file, test_file):
'''
pred_file: path to the prediction file
test_file: path to the test csv file
'''
test_df = pd.read_csv(test_file)
gold_spans = test_df.spans.apply(eval).to_list()
pred_spans = [eval(line.strip()) for line in open(pred_file).readlines()]
if(len(gold_spans) != len(pred_spans)):
print('Error: the number of predictions does not match the number of test examples!')
sys.exit(1)
scores = []
for pred, gold in zip(pred_spans, gold_spans):
scores.append(f1(pred, gold))
print('F1 score: ', np.mean(scores))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--prediction_file",
required=True,
help="path to the line-by-line file containing system predictions.")
parser.add_argument("--test_file",
required=True,
help="path to the csv file with gold spans.")
args = parser.parse_args()
evaluate(args.prediction_file, args.test_file)
if __name__ == "__main__":
main() | 1,926 | 683 |
class TrieNode:
def __init__(self):
self.children = {}
self.is_word = False
self.word_list = []
class Trie:
def __init__(self):
self.root = TrieNode()
def add(self, word):
node = self.root
for ch in word:
if ch not in node.children:
node.children[ch] = TrieNode()
node = node.children[ch]
node.word_list.append(word)
node.is_word = True
def find(self, word):
node = self.root
for ch in word:
node = node.children.get(ch)
if node is None:
return None
return node
def searchWord(self, word):
node = self.find(word)
return node is not None and node.is_word
def searchPrefix(self, prefix):
node = self.find(prefix)
return [] if node is None else node.word_list
class Solution:
"""
@param: words: a list of words without duplicates
@return: all word squares
"""
def wordSquares(self, words):
# write your code here
if not words or len(words) == 0:
return []
trie = Trie()
for word in words:
trie.add(word)
results = []
for word in words:
self.dfs(trie, [word], results)
return results
def dfs(self, trie, word_square, results):
idx, n = len(word_square), len(word_square[0])
if idx == n:
results.append(word_square[:])
return
# dfs pruning
for row_index in range(idx, n):
prefix = "".join([word_square[i][row_index] for i in range(idx)])
if not trie.searchPrefix(prefix):
return
prefix = "".join([word_square[i][idx] for i in range(idx)])
for word in trie.searchPrefix(prefix):
word_square.append(word)
self.dfs(trie, word_square, results)
word_square.pop()
| 1,963 | 586 |
from pyschism.io.bctides import Bctides
__all__ = ['Bctides']
| 63 | 27 |
from reverse_image_search_bot import bot
if __name__ == '__main__':
bot.main()
| 83 | 29 |
from net import Net
import os
import sys
import time
import torch.optim as optim
print(os.getcwd())
sys.path.append(os.getcwd())
# from ml-sandbox.src.utils.utils import display_formatted_time
import gc
del Training; gc.collect()
class Training:
def __init__(self, dev):
self.device = dev
def train(self, lr, momentum):
since = time.time()
print(self.device)
model = Net().to(self.device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
lr=lr,
momentum=momentum)
for epoch in range(epochs):
running_loss = 0.0
for batch_idx, (inputs, labels) in enumerate(train_loader, 0):
inputs, labels = inputs.to(self.device), labels.to(self.device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % log_interval == (log_interval - 1):
print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tRunningLoss: {:.3f}".format(
epoch, batch_idx * len(inputs), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), running_loss / log_interval
))
running_loss = 0.0
display_formatted_time(time.time() - since)
| 1,444 | 463 |
"""
This script allows the admin to generate a new Fernet key file and re-encrypt
any '.encrypted' file, should the old key file be compromised.
Run it with ``python -m tools.gen_flask_secret``
"""
import os
from webapp.utils.file_encryption import FernetVault
from webapp.constants import SECRET_KEYFILE, FLASK_SECRET_FILE
from webapp.utils.super_logger import logger
if __name__ == '__main__':
if not os.path.exists(SECRET_KEYFILE):
logger.info('Tools', 'Error: You must have the original key file before you can change to a new one.')
exit(-1)
# generate new Flask secret key save it
vault = FernetVault(SECRET_KEYFILE)
NEW_FLASK_SECRET = os.urandom(24)
vault.write_file(NEW_FLASK_SECRET, FLASK_SECRET_FILE)
| 751 | 259 |
# -*- coding: utf-8 -*-
'''Graphite output plugins module
'''
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
with_statement,
)
import logging
from queue import Empty
from statsd.client import StatsClient
from metricol.inputs import MetricInput
from metricol.outputs import MetricOutput
LOG = logging.getLogger(__name__)
class Statsite(MetricOutput):
'''Statsite pusher class
'''
options = ['host', 'port']
def __init__(self, section, queue):
super(Statsite, self).__init__(section, queue)
self.client = None
def prepare_things(self):
super(Statsite, self).prepare_things()
self.client = StatsClient(
host=self.cfg['host'], port=int(self.cfg['port']), maxudpsize=1024)
def do_things(self):
while True:
try:
_key, _val, _type, _ = self.queue.get(block=False)
if _type == MetricInput.METRIC_TYPE_GAUGE:
self.client.gauge(_key, _val)
elif _type == MetricInput.METRIC_TYPE_COUNTER:
self.client.incr(_key, count=_val)
elif _type == MetricInput.METRIC_TYPE_TIMER:
self.client.timing(_key, _val)
except Empty:
break
| 1,321 | 418 |
from qqbot import Main; Main() | 30 | 10 |
from bs4 import BeautifulSoup
import requests
import pandas as pd
import numpy as np
def url_list(base_url, no_of_pages):
nos = np.linspace(1, no_of_pages, no_of_pages)
pages = []
for n in nos:
page = base_url+str(int(n))
pages.append(page)
return pages
def quote_list(url):
site = requests.get(url)
soup = BeautifulSoup(site.text, features='lxml')
quotes = []
genres = []
items = soup.find_all('div', {'class':'quote mediumText'})
for item in items:
q = item.find('div', {'class':'quoteText'}).text
quote = str.split(q, '―')
quotes.append(quote[0].strip())
genre_box = item.find('div', {'class':'greyText smallText left'}).find_all('a')
genre = ''
for g in genre_box:
genre += (g.text + ',')
genre = genre[:-1]
genres.append(genre)
df = pd.DataFrame({'Quote':quotes, 'Genre':genres})
return df
def make_quote_df(base_url, no_of_pages):
pages = url_list(base_url, no_of_pages)
list_of_dfs = []
counter=1
for page in pages:
print(f'About to make dataframe: {counter}')
counter +=1
df = quote_list(page)
list_of_dfs.append(df)
final_df=pd.concat(list_of_dfs, ignore_index=True)
return final_df
# =============================================================================
# Love category of quotes
# =============================================================================
romance_urls = 'https://www.goodreads.com/quotes/tag/romance?page='
romance_df = make_quote_df(romance_urls, 100)
love_urls = 'https://www.goodreads.com/quotes/tag/love?page='
love_df = make_quote_df(love_urls, 100)
# =============================================================================
# Wisdom category of quotes
# =============================================================================
wisdom_urls = 'https://www.goodreads.com/quotes/tag/wisdom?page='
wisdom_df = make_quote_df(wisdom_urls, 100)
truth_urls = 'https://www.goodreads.com/quotes/tag/truth?page='
truth_df = make_quote_df(truth_urls, 100)
# =============================================================================
# Religion category of quotes
# =============================================================================
god_urls = 'https://www.goodreads.com/quotes/tag/god?page='
god_df = make_quote_df(god_urls, 100)
faith_urls = 'https://www.goodreads.com/quotes/tag/faith?page='
faith_df = make_quote_df(faith_urls, 100)
# =============================================================================
# Witty and clever category of quotes
# =============================================================================
humor_urls = 'https://www.goodreads.com/quotes/tag/humor?page='
humor_df = make_quote_df(humor_urls, 100)
writing_urls = 'https://www.goodreads.com/quotes/tag/writing?page='
writing_df = make_quote_df(writing_urls, 100)
# =============================================================================
# Dark and contemplative category of quotes
# =============================================================================
death_urls = 'https://www.goodreads.com/quotes/tag/death?page='
death_df = make_quote_df(death_urls, 100)
time_urls = 'https://www.goodreads.com/quotes/tag/time?page='
time_df = make_quote_df(time_urls, 100)
# =============================================================================
# Intellectual category of quotes
# =============================================================================
knowledge_urls = 'https://www.goodreads.com/quotes/tag/knowledge?page='
knowledge_df = make_quote_df(knowledge_urls, 100)
science_urls = 'https://www.goodreads.com/quotes/tag/science?page='
science_df = make_quote_df(science_urls, 100)
# =============================================================================
# Giving each df the column category, with respective category
# =============================================================================
CATEGORY = 'Category'
romance_df[CATEGORY] = 'Romance'
love_df[CATEGORY] = 'Love'
wisdom_df[CATEGORY] = 'Wisdom'
truth_df[CATEGORY] = 'Truth'
god_df[CATEGORY] = 'God'
faith_df[CATEGORY] = 'Faith'
humor_df[CATEGORY] = 'Humor'
writing_df[CATEGORY] = 'Writing'
death_df[CATEGORY] = 'Death'
time_df[CATEGORY] = 'Time'
knowledge_df[CATEGORY] = 'Knowledge'
science_df[CATEGORY] = 'Science'
# =============================================================================
# Converting every df into a CSV for future use
# =============================================================================
romance_df.to_csv('Romance_Quotes.csv')
love_df.to_csv('Love_Quotes.csv')
wisdom_df.to_csv('Wisdom_Quotes.csv')
truth_df.to_csv('Truth_Quotes.csv')
god_df.to_csv('God_Quotes.csv')
faith_df.to_csv('Faith_Quotes.csv')
humor_df.to_csv('Humor_Quotes.csv')
writing_df.to_csv('Writing_Quotes.csv')
death_df.to_csv('Death_Quotes.csv')
time_df.to_csv('Time_Quotes.csv')
knowledge_df.to_csv('Knowledge_Quotes.csv')
science_df.to_csv('Science_Quotes.csv')
| 5,300 | 1,888 |
import os
import json
from flask import Flask
app = Flask(__name__)
@app.route('/api/entries')
def get_tap_entries():
with open('/usr/src/app/data/beers.json') as f:
beer_data = json.load(f)
entries = []
for entry in beer_data:
if entry.get('visible', False):
image_name = entry.get('image', False)
if image_name:
image_exists = os.path.exists(f"/usr/src/app/data/images/{image_name}")
if not image_exists:
entry['image'] = None
entries.append(entry)
return {
'entries': entries
}
@app.route('/api/header-info')
def get_header_info():
return {
'headerInfo': {
'breweryName': '',
'kegeratorTemp': None
}
}
| 791 | 250 |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 05:31:09 2020
@author: Srikar Balusu
"""
import json
import pandas as pd
import numpy as np
import re
import random
import sqlite3
import datetime
import calendar
from dateutil.relativedelta import *
with open('lookup1.json') as json_file:
data = json.load(json_file)
with open('uniquelookup.json') as json_file:
data2 = json.load(json_file)
with open('state_dict.json') as json_file:
state_dict = json.load(json_file)
conn = sqlite3.connect('testQ.db')
c = conn.cursor()
question_template = "What state will have the (Value Entity) total forecasted number of deaths (Time Entity)?"
question_template_id = 'db4q5'
output = {}
question_key = {}
entities = ['Value Entity', 'Time Entity']
time_values = ['in the next (x) days', 'in the next (x) weeks', 'in the next week', 'tomorrow', 'day after tomorrow']
count = 1
def queryEndDate(query, time_entity):
today = datetime.date.today()
output = time_entity
if time_entity == 'in the next (x) days':
num_day = random.randint(1,20)
future_date = today + datetime.timedelta(days = num_day)
output = output.replace("(x)", str(num_day))
elif time_entity == 'in the next (x) weeks':
num_week = random.randint(2,3)
future_date = today + datetime.timedelta(days=num_week * 7)
output = output.replace("(x)", str(num_week))
elif time_entity == 'in the next week':
future_date = today + datetime.timedelta(days=7)
elif time_entity == 'tomorrow':
future_date = today + datetime.timedelta(days=1)
elif time_entity == 'day after tomorrow':
future_date = today + datetime.timedelta(days=2)
if future_date.weekday() == 5:
query = query.replace("given date", str(future_date))
elif future_date.weekday() ==6:
query = query.replace("given date", str(future_date + datetime.timedelta(days=6)))
else:
query = query.replace("given date", str(future_date + datetime.timedelta(days=5-future_date.weekday())))
return query, output
while count < 250:
output[count] = []
populated_entities = []
time_entity = random.choice(time_values)
val = random.choice(data['Value Entity'])
if val.find("(x)") >= 0:
order = random.randint(1,5)
val = val.replace("(x)", str(order))
if order == 2:
val = val.replace("th", "nd")
if order == 3:
val = val.replace("th", "rd")
if order == 1:
val = val.replace("th", "st")
else:
order = 1
if val.find("most") >= 0 or val.find("highest") >=0 or val.find("Highest") >=0:
ascending = False
else:
ascending = True
sql_template = "Select location_name, Max(point) from db4forecaststate WHERE target_week_end_date = 'given date' and location_name != 'National' group by location_name order by Max(point) asc/desc limit X,1"
query = sql_template
query, time_e = queryEndDate(query,time_entity)
if ascending == False:
query = query.replace("asc/desc", "desc")
query = query.replace("X", str(order-1))
else:
query = query.replace("asc/desc", "asc")
query = query.replace("X", str(order-1))
real_question = question_template.replace("(Time Entity)", time_e)
real_question = real_question.replace("(Value Entity)", val)
populated_entities.append(val)
populated_entities.append(time_e)
c.execute(query)
result = c.fetchall()
if len(result) == 0 or result[0][0] == None:
continue
elif real_question in question_key.keys():
continue
else:
question_key[real_question] = True
output[count].append({'question_template_id' : question_template_id, 'question_template' : question_template,
'entities' : entities, 'question' : real_question,
'populated_entities': populated_entities, 'query_template' : sql_template, 'query' : query, 'database': 'database 4'})
print(count)
print(real_question)
print(query)
print(result)
count = count + 1 | 4,133 | 1,348 |
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
df = pd.read_csv(f'https://gist.githubusercontent.com/chriddyp/c78bf172206ce24f77d6363a2d754b59/'
f'raw/c353e8ef842413cae56ae3920b8fd78468aa4cb2/usa-agricultural-exports-2011.csv')
def generate_simple_table(dataframe: pd.DataFrame, max_rows=10) -> html.Table:
"""
Simple function to return a simple HTML table for a Dash dashboard.
:param dataframe: A Pandas DataFrame to plot.
:param max_rows: The maximum number of rows to plot: defaults to 10
:return: A HTML table object.
"""
return html.Table([
html.Thead(
html.Tr([html.Tr(col) for col in dataframe.columns])
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col] for col in dataframe.columns)
]) for i in range(min(len(dataframe), max_rows))
])
])
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H4(children="US Agriculture Exports (2011)"),
generate_simple_table(dataframe=df, max_rows=10)
])
if __name__ == '__main__':
app.run_server(debug=True, host='127.0.0.1', port=8088)
| 1,347 | 507 |
#!/usr/bin/env python
'''
Test the cmislib package created by Apache Chemistry project.
Version is currently at 0.5.1 and has very good reviews
from the community.
Created: 10/04/2015
'''
__author__ = 'M@Campbell'
import unittest
from unittest import skipIf
import os
from base64 import b64encode
from ooiservices.app import create_app
@skipIf(os.getenv('TRAVIS'), 'Skip if testing from Travis CI.')
class AlfrescoTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('TESTING_CONFIG')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
self.basedir = os.path.abspath(os.path.dirname(__file__))
# set the vars for the connection
self.cmisUrl = \
'https://alfresco.oceanobservatories.org/alfresco/s/api/cmis'
self.cmisUsername = 'ooinet'
self.cmisPassword = '75commonLIKEbrown76'
self.cmisId = 'c161bc66-4f7e-4a4f-b5f2-aac9fbf1d3cd'
def tearDown(self):
self.app_context.pop()
def get_api_headers(self, username, password):
return {
'Authorization': 'Basic ' + b64encode(
(username + ':' + password).encode('utf-8')).decode('utf-8'),
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_cmislib_loaded(self):
'''
Make sure the cmislib library is properly loaded.
'''
from cmislib.model import CmisClient
CmisClient
def test_cmislib_connection(self):
from cmislib.model import CmisClient
# create the connection object
client = CmisClient(self.cmisUrl, self.cmisUsername, self.cmisPassword)
# check to make sure the object was created with the correct url
self.assertEquals(client.repositoryUrl, self.cmisUrl)
# use the client to connect to the repository
repo = client.getRepository(self.cmisId)
# make sure the repo information is referencing the correct repository
self.assertEqual(repo.info['repositoryId'], self.cmisId)
def test_cmislib_CRD(self):
from cmislib.model import CmisClient
client = CmisClient(self.cmisUrl, self.cmisUsername, self.cmisPassword)
repo = client.getRepository(self.cmisId)
# for tests, lets make sure the test folder isn't still there
try:
print ". . ."
someObject = repo.getObjectByPath('/testFolder')
someObject.deleteTree()
except:
print "\tno existing folders..."
# create a new dir in the root folder
print "\ttesting folder creation..."
root = repo.rootFolder
someFolder = root.createFolder('testFolder')
# create a test file and drop it in the test folder.
print "\ttesting file creation..."
someFile = open(self.basedir + '/mock_data/test.txt', 'r')
someFolder.createDocument('Test Document', contentFile=someFile)
# test read by using a full-text search.
print "\ttesting full-text search (read)..."
repo.query("select * from cmis:document where contains('test')")
# Then obliterate the folder and all it's children, mercilessly.
print "\ttesting delete..."
someFolder.deleteTree()
| 3,339 | 999 |
version https://git-lfs.github.com/spec/v1
oid sha256:834d7828e80c45d2160d52eeeefc97a2432c512b24b89422f604edf52f3d667a
size 895
| 128 | 93 |
from __future__ import print_function
from IPython.core.debugger import set_trace
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import config as cf
import numpy as np
import torchvision
import torchvision.transforms as transforms
#import ipdb
import os
import sys
import time
import argparse
import datetime
import scipy.ndimage as ndimage
from networks import *
from torch.autograd import Variable
from itertools import starmap
import random
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning_rate')
parser.add_argument('--net_type', default='wide-resnet', type=str, help='model')
parser.add_argument('--depth', default=28, type=int, help='depth of model')
parser.add_argument('--widen_factor', default=10, type=int, help='width of model')
parser.add_argument('--dropout', default=0.3, type=float, help='dropout_rate')
parser.add_argument('--dataset', default='cifar10', type=str, help='dataset = [cifar10/cifar100]')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--testOnly', '-t', action='store_true', help='Test mode with the saved model')
args = parser.parse_args()
# Hyper Parameter settings
sim_learning = False
#use_noise = True
use_cuda = torch.cuda.is_available()
best_acc = 0
#sig = 10
reg_strength = 1
start_epoch, num_epochs, batch_size, optim_type = cf.start_epoch, cf.num_epochs, cf.batch_size, cf.optim_type
# Data Uplaod
print('\n[Phase 1] : Data Preparation')
torch.manual_seed(2809)
gaussian_transforms = [
transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=0)),
# transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=1)),
# transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=2)),
# transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=5)),
# transforms.Lambda(lambda x: ndimage.gaussian_filter(x, sigma=10))
]
transform_train_noise = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomChoice(gaussian_transforms),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
transform_train_clean = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
]) # meanstd transformation
transform_test_noise = transforms.Compose([
transforms.RandomChoice(gaussian_transforms),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
transform_test = transforms.Compose([
#transforms.RandomChoice(gaussian_transforms),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
if(args.dataset == 'cifar10'):
print("| Preparing CIFAR-10 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
num_classes = 10
elif(args.dataset == 'cifar100'):
print("| Preparing CIFAR-100 dataset...")
sys.stdout.write("| ")
trainset_noise = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train_noise)
trainset_clean = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train_clean)
testset_noise = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test_noise)
num_classes = 100
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test)
num_classes = 100
trainloader_noise = torch.utils.data.DataLoader(trainset_noise, batch_size=batch_size, shuffle=True, num_workers=2)
trainloader_clean = torch.utils.data.DataLoader(trainset_clean, batch_size=batch_size, shuffle=True, num_workers=2)
testloader_noise = torch.utils.data.DataLoader(testset_noise, batch_size=100, shuffle=False, num_workers=2)
testloader_clean = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
# Return network & file name
def getNetwork(args):
if (args.net_type == 'lenet'):
net = LeNet(num_classes)
file_name = 'lenet'
elif (args.net_type == 'vggnet'):
net = VGG(args.depth, num_classes)
file_name = 'vgg-'+str(args.depth)
elif (args.net_type == 'resnet'):
net = ResNet_2Read(args.depth, num_classes)
file_name = 'resnet-'+str(args.depth)
elif (args.net_type == 'wide-resnet'):
net = Wide_ResNet(args.depth, args.widen_factor, args.dropout, num_classes)
file_name = 'wide-resnet-'+str(args.depth)+'x'+str(args.widen_factor)
else:
print('Error : Network should be either [LeNet / VGGNet / ResNet / Wide_ResNet')
sys.exit(0)
return net, file_name
if (sim_learning):
checkpoint_gauss = torch.load("./checkpoint/cifar100/resnet-50_2readout_3.t7")
robustNet = checkpoint_gauss['net']
robustNet = torch.nn.DataParallel(robustNet, device_ids=range(torch.cuda.device_count()))
# Test only option
if (args.testOnly):
print('\n[Test Phase] : Model setup')
assert os.path.isdir('checkpoint'), 'Error: No checkpoint directory found!'
_, file_name = getNetwork(args)
checkpoint = torch.load('./checkpoint/'+args.dataset+os.sep+file_name+'_readout_match.t7')
net = checkpoint['net']
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader_noise):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs, compute_similarity=False)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
acc = 100.*correct/total
#variance = batch_var.mean()
print("| Test Result (Noise Readout)\tAcc@1: %.2f%%" %(acc))
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader_noise):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs, img_type="clean")
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
acc = 100.*correct/total
#variance = batch_var.mean()
print("| Test Result (Clean Readout)\tAcc@1: %.2f%%" %(acc))
# std = 0.
# for images, _ in testloader:
# batch_samples = images.size(0)
# images = images.view(batch_samples,images.size(1), -1)
# std += images.std(2).sum(0)
# std /= len(testloader.dataset)
#print("| Standard Deviation of noise / Standard Deviation of Pixels: %.2f" %(sig/std))
sys.exit(0)
# Model
print('\n[Phase 2] : Model setup')
if args.resume:
# Load checkpoint
print('| Resuming from checkpoint...')
assert os.path.isdir('checkpoint'), 'Error: No checkpoint directory found!'
_, file_name = getNetwork(args)
checkpoint = torch.load('./checkpoint/'+args.dataset+os.sep+file_name+'_2readout_3.t7')
net = checkpoint['net']
best_acc = 100.0
#start_epoch = checkpoint['epoch']
start_epoch = 200
else:
print('| Building net type [' + args.net_type + ']...')
net, file_name = getNetwork(args)
net.apply(conv_init)
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
w_loss = nn.MSELoss()
# Similarity Loss Computation
# Training
similarities = {}
accs = []
def train(epoch):
net.train()
train_loss = 0
correct = 0
total = 0
optimizer = optim.SGD(net.module.linear_clean.parameters(), lr=cf.learning_rate(args.lr, epoch), momentum=0.9, weight_decay=5e-4)
print('\n=> Training Epoch #%d, LR=%.4f' %(epoch, cf.learning_rate(args.lr, epoch)))
for batch_idx, ((inputs1, targets1), (inputs2, targets2)) in enumerate(zip(trainloader_noise, trainloader_clean)):
if use_cuda:
# inputs, targets = inputs.cuda(), targets.cuda()
inputs1, targets1 = inputs1.cuda(), targets1.cuda() # GPU settings
inputs2, targets2 = inputs2.cuda(), targets2.cuda()
optimizer.zero_grad()
outputs_n = net(inputs1, img_type="noise", compute_similarity=False)
l1 = criterion(outputs_n, targets1)
l1.backward(retain_graph=True)
outputs_c = net(inputs2, img_type="clean", compute_similarity=False)
l2 = criterion(outputs_c, targets2)
l2.backward(retain_graph=True)
l3 = w_loss(outputs_n, outputs_c)
l3.backward(retain_graph=True)
optimizer.step() # Optimizer update
loss = l1 + l2 + l3
train_loss += loss.item()
_, predicted = torch.max(outputs_c.data, 1)
total += targets2.size(0)
correct += predicted.eq(targets2.data).cpu().sum()
sys.stdout.write('\r')
sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\t Loss: %.4f Acc@1: %.3f%%'
%(epoch, num_epochs, batch_idx+1,
(len(trainset_noise)//batch_size)+1, loss.item(), 100.*correct/total))
sys.stdout.flush()
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for batch_idx, (inputs1, targets1) in enumerate(testloader_noise):
if use_cuda:
inputs1, targets1 = inputs1.cuda(), targets1.cuda()
outputs_n = net(inputs1, img_type="noise", compute_similarity=False)
loss = criterion(outputs_n, targets1)
test_loss += loss.item()
_, predicted1 = torch.max(outputs_n.data, 1)
total1 += targets1.size(0)
correct1 += predicted1.eq(targets1.data).cpu().sum()
acc = 100.*correct1/total1
for batch_idx, (inputs2, targets2) in enumerate(testloader_noise):
if use_cuda:
inputs2, targets2 = inputs2.cuda(), targets2.cuda()
outputs_c = net(inputs2, img_type="clean", compute_similarity=False)
loss2 = criterion(outputs_c, targets2)
_, predicted2 = torch.max(outputs_c.data, 1)
total2 += targets2.size(0)
correct2 += predicted2.eq(targets2.data).cpu().sum()
acc2 = 100.*correct2/total2
print("\n| Validation Epoch #%d\t\t\tLoss (Noise): %.4f Acc@1: %.2f%%" %(epoch, loss.item(), acc))
print("\n| Validation Epoch #%d\t\t\tLoss (Clean): %.4f Acc@1: %.2f%%" %(epoch, loss2.item(), acc2))
# Save checkpoint when best model
if acc > best_acc:
print('| Saving Best model...\t\t\tTop1 = %.2f%%' %(acc))
state = {
'net':net.module if use_cuda else net,
'acc':acc,
'epoch':epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
save_point = './checkpoint/'+args.dataset+os.sep
if not os.path.isdir(save_point):
os.mkdir(save_point)
torch.save(state, save_point+file_name+'.t7')
best_acc = acc
accs.append(acc)
#net.train()
print('\n[Phase 3] : Training model')
print('| Training Epochs = ' + str(num_epochs))
print('| Initial Learning Rate = ' + str(args.lr))
print('| Optimizer = ' + str(optim_type))
elapsed_time = 0
for epoch in range(start_epoch, start_epoch+num_epochs):
start_time = time.time()
train(epoch)
test(epoch)
epoch_time = time.time() - start_time
elapsed_time += epoch_time
print('| Elapsed time : %d:%02d:%02d' %(cf.get_hms(elapsed_time)))
np.save('epoch_accs', accs)
print('\n[Phase 4] : Testing model')
print('* Test results : Acc@1 = %.2f%%' %(best_acc))
print('| Saving model...')
state = {
'net':net.module if use_cuda else net,
#'acc':acc,
#'epoch':epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
save_point = './checkpoint/'+args.dataset+os.sep
if not os.path.isdir(save_point):
os.mkdir(save_point)
torch.save(state, save_point+file_name+'robust_readout_matching_basicblock.t7')
| 12,804 | 4,583 |
# Copyright (c) ASAPP Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import fire
import os
import sys
import time
from tqdm.auto import tqdm
import re
import json
from colorama import Fore
def get_num_updates(folder):
num_updates = 0
try:
with open(os.path.join(folder, 'hydra_train.log')) as f:
lines = [line.strip() for line in f][-50:]
for line in lines:
try:
start = line.find('num_updates": "')
if start >= 0:
start += len('num_updates": "')
end = line[start:].find('",') + start
if end >= start:
num_updates = int(line[start:end])
except:
continue
return num_updates
except:
return 0
def print_folder(folder, total_updates):
num_updates = get_num_updates(folder)
if num_updates >= total_updates:
color = Fore.GREEN
else:
color = Fore.RED
print(f"{folder}\t{color}{num_updates}{Fore.RESET}")
def main(root="exp-bu", total_updates=100_000):
exp_dirs = set()
match_names = set()
for dirname, dirs, files in tqdm(os.walk(root)):
if 'checkpoints' in dirs:
exp_dirs.add(dirname)
exp_dirs = sorted(exp_dirs)
evaled = []
not_evaled = []
for x in exp_dirs:
if os.path.exists(f"{x}/eval.log"):
evaled.append(x)
else:
not_evaled.append(x)
print("Evaluated:")
for folder in evaled:
print_folder(folder, total_updates)
print("\nNot evaluated:")
for folder in not_evaled:
print_folder(folder, total_updates)
if __name__ == "__main__":
fire.Fire(main)
| 1,843 | 601 |
import unittest
from nate import (
H1,
Body,
Br,
DangerousHtml,
Div,
Head,
Hr,
Html,
Input,
Meta,
P,
Title,
Table,
Thead,
Tbody,
Th,
Tr,
Td,
Fragment,
)
from .common import article_1, article_2, html, section
class TagsTest(unittest.TestCase):
def test_basic(self) -> None:
basic = Html(
lang="en",
children=[
Head(
children=[
Meta(charset="utf-8"),
Meta(
name="viewport",
content="width=device-width, initial-scale=1",
),
Title(children="Basic"),
]
),
Body(
children=[
H1("Hello world"),
P(
children=[
"First line",
Br(),
"Second line",
]
),
Hr(),
P("The end."),
],
),
],
)
basic_expected = """<!DOCTYPE html>\
<html lang="en">\
<head>\
<meta charset="utf-8"/>\
<meta name="viewport" content="width=device-width, initial-scale=1"/>\
<title>Basic</title>\
</head>\
<body>\
<h1>Hello world</h1>\
<p>First line<br/>Second line</p><hr/>\
<p>The end.</p>\
</body>\
</html>\
"""
self.assertEqual(basic.to_html(), basic_expected)
def test_iterables(self) -> None:
politicians = [
{"first_name": "Theodor", "last_name": "Heuss", "party": "FDP"},
{"first_name": "Heinrich", "last_name": "Lübke", "party": "CDU"},
{"first_name": "Gustav", "last_name": "Heinemann", "party": "SPD"},
{"first_name": "Walter", "last_name": "Scheel", "party": "FDP"},
{"first_name": "Karl", "last_name": "Carstens", "party": "CDU"},
{
"first_name": "Richard",
"last_name": "von Weizsäcker",
"party": "CDU",
},
{"first_name": "Roman", "last_name": "Herzog", "party": "CDU"},
{"first_name": "Johannes", "last_name": "Rau", "party": "SPD"},
]
table = Table(
[
Thead(Th([Tr("First Name"), Tr("Last Name"), Tr("Party")])),
Tbody(
map(
lambda politician: Tr(
[
Td(politician["first_name"]),
Td(politician["last_name"]),
Td(politician["party"]),
]
),
politicians,
),
),
]
)
table_expected = """<table>\
<thead><th><tr>First Name</tr><tr>Last Name</tr><tr>Party</tr></th></thead>\
<tbody>\
<tr><td>Theodor</td><td>Heuss</td><td>FDP</td></tr>\
<tr><td>Heinrich</td><td>Lübke</td><td>CDU</td></tr>\
<tr><td>Gustav</td><td>Heinemann</td><td>SPD</td></tr>\
<tr><td>Walter</td><td>Scheel</td><td>FDP</td></tr>\
<tr><td>Karl</td><td>Carstens</td><td>CDU</td></tr>\
<tr><td>Richard</td><td>von Weizsäcker</td><td>CDU</td></tr>\
<tr><td>Roman</td><td>Herzog</td><td>CDU</td></tr>\
<tr><td>Johannes</td><td>Rau</td><td>SPD</td></tr>\
</tbody>\
</table>\
"""
self.assertEqual(table.to_html(), table_expected)
p = P("Test", class_=iter(["bg-red", "blue"]))
p_expected = '<p class="bg-red blue">Test</p>'
self.assertEqual(p.to_html(), p_expected)
def test_xss(self) -> None:
p = P("<script>alert('XSS');</script>")
p_expected =\
"<p><script>alert('XSS');</script></p>"
self.assertEqual(p.to_html(), p_expected)
div = Div([DangerousHtml("<script>alert('XSS');</script>")])
div_expected = "<div><script>alert('XSS');</script></div>"
self.assertEqual(div.to_html(), div_expected)
text_input = Input(value="<script>alert('XSS');</script>")
text_input_expected =\
'<input value="<script>alert('XSS');</\
script>"/>'
self.assertEqual(text_input.to_html(), text_input_expected)
def test_article(self) -> None:
article_1_expected = """\
<article class="pv4 bt bb b--black-10 ph3 ph0-l">\
<div class="flex flex-column flex-row-ns">\
<div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\
<h1 class="f3 athelas mt0 lh-title">\
Tech Giant Invests Huge Money to Build a Computer Out of Science Fiction</h1>\
<p class="f5 f4-l lh-copy athelas">\
The tech giant says it is ready to begin planning a quantum computer, a \
powerful cpu machine that relies on subatomic particles instead of \
transistors.\
</p>\
</div>\
<div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\
<img class="db" src="https://mrmrs.github.io/photos/cpu.jpg" alt="Photo of a \
dimly lit room with a computer interface terminal."/>\
</div>\
</div>\
<p class="f6 lh-copy gray mv0">By <span class="ttu">Robin Darnell</span></p>\
<time class="f6 db gray">Nov. 21, 2016</time>\
</article>"""
article_2_expected = """<article class="pv4 bt bb b--black-10 ph3 ph0-l">\
<div class="flex flex-column flex-row-ns">\
<div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\
<h1 class="f3 athelas mt0 lh-title">\
A whale takes up residence in a large body of water\
</h1>\
<p class="f5 f4-l lh-copy athelas">\
This giant of a whale says it is ready to begin planning a new swim later \
this afternoon. A powerful mammal that relies on fish and plankton instead of \
hamburgers.\
</p>\
</div>\
<div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\
<img class="db" src="http://mrmrs.github.io/photos/whale.jpg" alt="Photo of a \
whale's tale coming crashing out of the water."/>\
</div>\
</div>\
<p class="f6 lh-copy gray mv0">By <span class="ttu">Katherine Grant</span></p>\
<time class="f6 db gray">Nov. 19, 2016</time>\
</article>"""
self.assertEqual(article_1.to_html(), article_1_expected)
self.assertEqual(article_2.to_html(), article_2_expected)
def test_section(self) -> None:
expected = """<section class="mw7 center">\
<h2 class="athelas ph3 ph0-l">News</h2>\
<article class="pv4 bt bb b--black-10 ph3 ph0-l">\
<div class="flex flex-column flex-row-ns">\
<div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\
<h1 class="f3 athelas mt0 lh-title">\
Tech Giant Invests Huge Money to Build a Computer Out of Science Fiction</h1>\
<p class="f5 f4-l lh-copy athelas">\
The tech giant says it is ready to begin planning a quantum computer, a \
powerful cpu machine that relies on subatomic particles instead of \
transistors.\
</p>\
</div>\
<div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\
<img class="db" src="https://mrmrs.github.io/photos/cpu.jpg" alt="Photo of a \
dimly lit room with a computer interface terminal."/>\
</div>\
</div>\
<p class="f6 lh-copy gray mv0">By <span class="ttu">Robin Darnell</span></p>\
<time class="f6 db gray">Nov. 21, 2016</time>\
</article>\
<article class="pv4 bt bb b--black-10 ph3 ph0-l">\
<div class="flex flex-column flex-row-ns">\
<div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\
<h1 class="f3 athelas mt0 lh-title">\
A whale takes up residence in a large body of water\
</h1>\
<p class="f5 f4-l lh-copy athelas">\
This giant of a whale says it is ready to begin planning a new swim later \
this afternoon. A powerful mammal that relies on fish and plankton instead of \
hamburgers.\
</p>\
</div>\
<div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\
<img class="db" src="http://mrmrs.github.io/photos/whale.jpg" alt="Photo of a \
whale's tale coming crashing out of the water."/>\
</div>\
</div>\
<p class="f6 lh-copy gray mv0">By <span class="ttu">Katherine Grant</span></p>\
<time class="f6 db gray">Nov. 19, 2016</time>\
</article>\
</section>"""
self.assertEqual(section.to_html(), expected)
def test_html(self) -> None:
actual = html.to_html()
expected = """<!DOCTYPE html>\
<html lang="en">\
<head>\
<meta charset="utf-8"/>\
<meta name="viewport" content="width=device-width, initial-scale=1"/>\
<title>Example Title</title>\
<link rel="stylesheet" href="https://unpkg.com/tachyons@4.12.0/css/tachyons.\
min.css"/>\
</head>\
<body class="sans-serif">\
<section class="mw7 center">\
<h2 class="athelas ph3 ph0-l">News</h2>\
<article class="pv4 bt bb b--black-10 ph3 ph0-l">\
<div class="flex flex-column flex-row-ns">\
<div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\
<h1 class="f3 athelas mt0 lh-title">\
Tech Giant Invests Huge Money to Build a Computer Out of Science Fiction</h1>\
<p class="f5 f4-l lh-copy athelas">\
The tech giant says it is ready to begin planning a quantum computer, a \
powerful cpu machine that relies on subatomic particles instead of \
transistors.\
</p>\
</div>\
<div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\
<img class="db" src="https://mrmrs.github.io/photos/cpu.jpg" alt="Photo of a \
dimly lit room with a computer interface terminal."/>\
</div>\
</div>\
<p class="f6 lh-copy gray mv0">By <span class="ttu">Robin Darnell</span></p>\
<time class="f6 db gray">Nov. 21, 2016</time>\
</article>\
<article class="pv4 bt bb b--black-10 ph3 ph0-l">\
<div class="flex flex-column flex-row-ns">\
<div class="w-100 w-60-ns pr3-ns order-2 order-1-ns">\
<h1 class="f3 athelas mt0 lh-title">\
A whale takes up residence in a large body of water</h1>\
<p class="f5 f4-l lh-copy athelas">\
This giant of a whale says it is ready to begin planning a new swim later \
this afternoon. A powerful mammal that relies on fish and plankton instead of \
hamburgers.\
</p>\
</div>\
<div class="pl3-ns order-1 order-2-ns mb4 mb0-ns w-100 w-40-ns">\
<img class="db" src="http://mrmrs.github.io/photos/whale.jpg" alt="Photo of a \
whale's tale coming crashing out of the water."/>\
</div>\
</div>\
<p class="f6 lh-copy gray mv0">By <span class="ttu">Katherine Grant</span></p>\
<time class="f6 db gray">Nov. 19, 2016</time>\
</article>\
</section>\
</body>\
</html>\
"""
self.assertEqual(actual, expected)
def test_fragment(self) -> None:
fragment = Div(
children=[
"before fragment",
Fragment(
children=[
Div(children="before nested fragment in div"),
Fragment(
children="nested fragment",
),
Fragment(
children=Div(children="nested fragment in div"),
),
"after nested fragment",
],
),
"after fragment",
],
)
actual = fragment.to_html()
expected = """<div>\
before fragment\
<div>before nested fragment in div</div>\
nested fragment\
<div>nested fragment in div</div>\
after nested fragment\
after fragment\
</div>\
"""
self.assertEqual(actual, expected)
if __name__ == "__main__":
unittest.main()
| 11,356 | 4,025 |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 09:11:06 2016
@author: eikes
"""
ADD_VENTI_WORKER = 'tcp://*:4555'
ADD_VENTI_LH_WORKER = 'tcp://localhost:4555'
ADD_SINK_RECEIVE = 'tcp://*:4556'
ADD_SINK_LH_RECEIVE = 'tcp://localhost:4556'
ADD_SINK_PUBLISH = 'tcp://*:4557'
ADD_SINK_LH_PUBLISH = 'tcp://localhost:4557'
ADD_VENTI_RECEIVE = 'tcp://*:4558'
ADD_VENTI_LH_RECEIVE = 'tcp://localhost:4558'
ADD_LOG_CONTROLLER = 'tcp://*:4560'
ADD_LOG_LH_CONTROLLER = 'tcp://localhost:4560'
ADD_LOGGING = 'tcp://*:4559'
ADD_LH_LOGGING = 'tcp://localhost:4559'
| 554 | 311 |
import importlib
import sys
from ancli import make_cli
try:
entry_point = sys.argv[1]
except IndexError:
print('Error: no entry point name provided!')
sys.exit(1)
try:
module_path, function_name = entry_point.split(':')
except ValueError:
print('Error: entry point name should have format a.b.c:function')
sys.exit(1)
mod = importlib.import_module(module_path)
try:
func = getattr(mod, function_name)
except AttributeError:
print(f'Error: function \'{function_name}\' is not found')
sys.exit(1)
sys.argv = [sys.argv[0]] + sys.argv[2:]
make_cli(func)
| 593 | 208 |
from functools import lru_cache
from urllib.parse import unquote_to_bytes
from zope.interface import implementer
from zope.interface.interfaces import IInterface
from pyramid.encode import url_quote
from pyramid.exceptions import URLDecodeError
from pyramid.interfaces import (
VH_ROOT_KEY,
IRequestFactory,
IResourceURL,
ITraverser,
)
from pyramid.location import lineage
from pyramid.threadlocal import get_current_registry
from pyramid.util import ascii_, is_nonstr_iter, text_
PATH_SEGMENT_SAFE = "~!$&'()*+,;=:@" # from webob
PATH_SAFE = PATH_SEGMENT_SAFE + "/"
def find_root(resource):
"""Find the root node in the resource tree to which ``resource``
belongs. Note that ``resource`` should be :term:`location`-aware.
Note that the root resource is available in the request object by
accessing the ``request.root`` attribute.
"""
for location in lineage(resource):
if location.__parent__ is None:
resource = location
break
return resource
def find_resource(resource, path):
"""Given a resource object and a string or tuple representing a path
(such as the return value of :func:`pyramid.traversal.resource_path` or
:func:`pyramid.traversal.resource_path_tuple`), return a resource in this
application's resource tree at the specified path. The resource passed
in *must* be :term:`location`-aware. If the path cannot be resolved (if
the respective node in the resource tree does not exist), a
:exc:`KeyError` will be raised.
This function is the logical inverse of
:func:`pyramid.traversal.resource_path` and
:func:`pyramid.traversal.resource_path_tuple`; it can resolve any
path string or tuple generated by either of those functions.
Rules for passing a *string* as the ``path`` argument: if the
first character in the path string is the ``/``
character, the path is considered absolute and the resource tree
traversal will start at the root resource. If the first character
of the path string is *not* the ``/`` character, the path is
considered relative and resource tree traversal will begin at the resource
object supplied to the function as the ``resource`` argument. If an
empty string is passed as ``path``, the ``resource`` passed in will
be returned. Resource path strings must be escaped in the following
manner: each path segment must be UTF-8 encoded and escaped via Python's
:mod:`urllib.quote`. For example, ``/path/to%20the/La%20Pe%C3%B1a``
(absolute) or ``to%20the/La%20Pe%C3%B1a`` (relative). The
:func:`pyramid.traversal.resource_path` function generates strings
which follow these rules (albeit only absolute ones). The text may not
have any non-ASCII characters in it.
Rules for passing a *tuple* as the ``path`` argument: if the first
element in the path tuple is the empty string (for example ``('',
'a', 'b', 'c')``, the path is considered absolute and the resource tree
traversal will start at the resource tree root object. If the first
element in the path tuple is not the empty string (for example
``('a', 'b', 'c')``), the path is considered relative and resource tree
traversal will begin at the resource object supplied to the function
as the ``resource`` argument. If an empty sequence is passed as
``path``, the ``resource`` passed in itself will be returned. No
URL-quoting of individual path segments within the tuple is required
(each segment may be any string representing a resource name). Resource
path tuples generated by :func:`pyramid.traversal.resource_path_tuple` can
always be resolved by ``find_resource``.
"""
if isinstance(path, str):
path = ascii_(path)
D = traverse(resource, path)
view_name = D['view_name']
context = D['context']
if view_name:
raise KeyError('%r has no subelement %s' % (context, view_name))
return context
find_model = find_resource # b/w compat (forever)
def find_interface(resource, class_or_interface):
"""
Return the first resource found in the :term:`lineage` of ``resource``
which, a) if ``class_or_interface`` is a Python class object, is an
instance of the class or any subclass of that class or b) if
``class_or_interface`` is a :term:`interface`, provides the specified
interface. Return ``None`` if no resource providing ``interface_or_class``
can be found in the lineage. The ``resource`` passed in *must* be
:term:`location`-aware.
"""
if IInterface.providedBy(class_or_interface):
test = class_or_interface.providedBy
else:
test = lambda arg: isinstance(arg, class_or_interface)
for location in lineage(resource):
if test(location):
return location
def resource_path(resource, *elements):
"""Return a string object representing the absolute physical path of the
resource object based on its position in the resource tree, e.g
``/foo/bar``. Any positional arguments passed in as ``elements`` will be
appended as path segments to the end of the resource path. For instance,
if the resource's path is ``/foo/bar`` and ``elements`` equals ``('a',
'b')``, the returned string will be ``/foo/bar/a/b``. The first
character in the string will always be the ``/`` character (a leading
``/`` character in a path string represents that the path is absolute).
Resource path strings returned will be escaped in the following
manner: each path segment will be encoded as UTF-8 and escaped via
Python's :mod:`urllib.quote`.
For example, ``/path/to%20the/La%20Pe%C3%B1a``.
This function is a logical inverse of
:mod:`pyramid.traversal.find_resource`: it can be used to generate
path references that can later be resolved via that function.
The ``resource`` passed in *must* be :term:`location`-aware.
.. note::
Each segment in the path string returned will use the ``__name__``
attribute of the resource it represents within the resource tree. Each
of these segments *should* be a string (as per the
contract of :term:`location`-awareness). However, no conversion or
safety checking of resource names is performed. For instance, if one of
the resources in your tree has a ``__name__`` which (by error) is a
dictionary, the :func:`pyramid.traversal.resource_path` function will
attempt to append it to a string and it will cause a
:exc:`pyramid.exceptions.URLDecodeError`.
.. note::
The :term:`root` resource *must* have a ``__name__`` attribute with a
value of either ``None`` or the empty string for paths to be generated
properly. If the root resource has a non-null ``__name__`` attribute,
its name will be prepended to the generated path rather than a single
leading '/' character.
"""
# joining strings is a bit expensive so we delegate to a function
# which caches the joined result for us
return _join_path_tuple(resource_path_tuple(resource, *elements))
model_path = resource_path # b/w compat (forever)
def traverse(resource, path):
"""Given a resource object as ``resource`` and a string or tuple
representing a path as ``path`` (such as the return value of
:func:`pyramid.traversal.resource_path` or
:func:`pyramid.traversal.resource_path_tuple` or the value of
``request.environ['PATH_INFO']``), return a dictionary with the
keys ``context``, ``root``, ``view_name``, ``subpath``,
``traversed``, ``virtual_root``, and ``virtual_root_path``.
A definition of each value in the returned dictionary:
- ``context``: The :term:`context` (a :term:`resource` object) found
via traversal or URL dispatch. If the ``path`` passed in is the
empty string, the value of the ``resource`` argument passed to this
function is returned.
- ``root``: The resource object at which :term:`traversal` begins.
If the ``resource`` passed in was found via URL dispatch or if the
``path`` passed in was relative (non-absolute), the value of the
``resource`` argument passed to this function is returned.
- ``view_name``: The :term:`view name` found during
:term:`traversal` or :term:`URL dispatch`; if the ``resource`` was
found via traversal, this is usually a representation of the
path segment which directly follows the path to the ``context``
in the ``path``. The ``view_name`` will be a string. The
``view_name`` will be the empty string if
there is no element which follows the ``context`` path. An
example: if the path passed is ``/foo/bar``, and a resource
object is found at ``/foo`` (but not at ``/foo/bar``), the 'view
name' will be ``'bar'``. If the ``resource`` was found via
URL dispatch, the ``view_name`` will be the empty string unless
the ``traverse`` predicate was specified or the ``*traverse`` route
pattern was used, at which point normal traversal rules dictate the
result.
- ``subpath``: For a ``resource`` found via :term:`traversal`, this
is a sequence of path segments found in the ``path`` that follow
the ``view_name`` (if any). Each of these items is a string.
If no path segments follow the ``view_name``, the
subpath will be the empty sequence. An example: if the path
passed is ``/foo/bar/baz/buz``, and a resource object is found at
``/foo`` (but not ``/foo/bar``), the 'view name' will be
``'bar'`` and the :term:`subpath` will be ``['baz', 'buz']``.
For a ``resource`` found via URL dispatch, the subpath will be a
sequence of values discerned from ``*subpath`` in the route
pattern matched or the empty sequence.
- ``traversed``: The sequence of path elements traversed from the
root to find the ``context`` object during :term:`traversal`.
Each of these items is a string. If no path segments
were traversed to find the ``context`` object (e.g. if the
``path`` provided is the empty string), the ``traversed`` value
will be the empty sequence. If the ``resource`` is a resource found
via :term:`URL dispatch`, traversed will be None.
- ``virtual_root``: A resource object representing the 'virtual' root
of the resource tree being traversed during :term:`traversal`.
See :ref:`vhosting_chapter` for a definition of the virtual root
object. If no virtual hosting is in effect, and the ``path``
passed in was absolute, the ``virtual_root`` will be the
*physical* root resource object (the object at which :term:`traversal`
begins). If the ``resource`` passed in was found via :term:`URL
dispatch` or if the ``path`` passed in was relative, the
``virtual_root`` will always equal the ``root`` object (the
resource passed in).
- ``virtual_root_path`` -- If :term:`traversal` was used to find
the ``resource``, this will be the sequence of path elements
traversed to find the ``virtual_root`` resource. Each of these
items is a string. If no path segments were traversed
to find the ``virtual_root`` resource (e.g. if virtual hosting is
not in effect), the ``traversed`` value will be the empty list.
If URL dispatch was used to find the ``resource``, this will be
``None``.
If the path cannot be resolved, a :exc:`KeyError` will be raised.
Rules for passing a *string* as the ``path`` argument: if the
first character in the path string is the with the ``/``
character, the path will considered absolute and the resource tree
traversal will start at the root resource. If the first character
of the path string is *not* the ``/`` character, the path is
considered relative and resource tree traversal will begin at the resource
object supplied to the function as the ``resource`` argument. If an
empty string is passed as ``path``, the ``resource`` passed in will
be returned. Resource path strings must be escaped in the following
manner: each path segment must be encoded as UTF-8 and escaped via
Python's :mod:`urllib.quote`. For example,
``/path/to%20the/La%20Pe%C3%B1a`` (absolute) or
``to%20the/La%20Pe%C3%B1a`` (relative). The
:func:`pyramid.traversal.resource_path` function generates strings
which follow these rules (albeit only absolute ones).
Rules for passing a *tuple* as the ``path`` argument: if the first
element in the path tuple is the empty string (for example ``('',
'a', 'b', 'c')``, the path is considered absolute and the resource tree
traversal will start at the resource tree root object. If the first
element in the path tuple is not the empty string (for example
``('a', 'b', 'c')``), the path is considered relative and resource tree
traversal will begin at the resource object supplied to the function
as the ``resource`` argument. If an empty sequence is passed as
``path``, the ``resource`` passed in itself will be returned. No
URL-quoting or UTF-8-encoding of individual path segments within
the tuple is required (each segment may be any string representing
a resource name).
Explanation of the decoding of ``path`` segment values during traversal:
Each segment is URL-unquoted, and UTF-8 decoded. Each segment is assumed
to be encoded using the UTF-8 encoding (or a subset, such as ASCII); a
:exc:`pyramid.exceptions.URLDecodeError` is raised if a segment
cannot be decoded. If a segment name is empty or if it is ``.``,
it is ignored. If a segment name is ``..``, the previous segment
is deleted, and the ``..`` is ignored. As a result of this
process, the return values ``view_name``, each element in the
``subpath``, each element in ``traversed``, and each element in
the ``virtual_root_path`` will be decoded strings.
"""
if is_nonstr_iter(path):
# the traverser factory expects PATH_INFO to be a string and it
# expects path segments to be utf-8 and
# urlencoded (it's the same traverser which accepts PATH_INFO
# from user agents; user agents always send strings).
if path:
path = _join_path_tuple(tuple(path))
else:
path = ''
# The user is supposed to pass us a string object, never Unicode. In
# practice, however, users indeed pass Unicode to this API. If they do
# pass a Unicode object, its data *must* be entirely encodeable to ASCII,
# so we encode it here as a convenience to the user and to prevent
# second-order failures from cropping up (all failures will occur at this
# step rather than later down the line as the result of calling
# ``traversal_path``).
path = ascii_(path)
if path and path[0] == '/':
resource = find_root(resource)
reg = get_current_registry()
request_factory = reg.queryUtility(IRequestFactory)
if request_factory is None:
from pyramid.request import Request # avoid circdep
request_factory = Request
request = request_factory.blank(path)
request.registry = reg
traverser = reg.queryAdapter(resource, ITraverser)
if traverser is None:
traverser = ResourceTreeTraverser(resource)
return traverser(request)
def resource_path_tuple(resource, *elements):
"""
Return a tuple representing the absolute physical path of the
``resource`` object based on its position in a resource tree, e.g
``('', 'foo', 'bar')``. Any positional arguments passed in as
``elements`` will be appended as elements in the tuple
representing the resource path. For instance, if the resource's
path is ``('', 'foo', 'bar')`` and elements equals ``('a', 'b')``,
the returned tuple will be ``('', 'foo', 'bar', 'a', 'b')``. The
first element of this tuple will always be the empty string (a
leading empty string element in a path tuple represents that the
path is absolute).
This function is a logical inverse of
:func:`pyramid.traversal.find_resource`: it can be used to
generate path references that can later be resolved by that function.
The ``resource`` passed in *must* be :term:`location`-aware.
.. note::
Each segment in the path tuple returned will equal the ``__name__``
attribute of the resource it represents within the resource tree. Each
of these segments *should* be a string (as per the
contract of :term:`location`-awareness). However, no conversion or
safety checking of resource names is performed. For instance, if one of
the resources in your tree has a ``__name__`` which (by error) is a
dictionary, that dictionary will be placed in the path tuple; no warning
or error will be given.
.. note::
The :term:`root` resource *must* have a ``__name__`` attribute with a
value of either ``None`` or the empty string for path tuples to be
generated properly. If the root resource has a non-null ``__name__``
attribute, its name will be the first element in the generated path
tuple rather than the empty string.
"""
return tuple(_resource_path_list(resource, *elements))
model_path_tuple = resource_path_tuple # b/w compat (forever)
def _resource_path_list(resource, *elements):
"""Implementation detail shared by resource_path and
resource_path_tuple"""
path = [loc.__name__ or '' for loc in lineage(resource)]
path.reverse()
path.extend(elements)
return path
_model_path_list = _resource_path_list # b/w compat, not an API
def virtual_root(resource, request):
"""
Provided any :term:`resource` and a :term:`request` object, return
the resource object representing the :term:`virtual root` of the
current :term:`request`. Using a virtual root in a
:term:`traversal` -based :app:`Pyramid` application permits
rooting. For example, the resource at the traversal path ``/cms`` will
be found at ``http://example.com/`` instead of rooting it at
``http://example.com/cms/``.
If the ``resource`` passed in is a context obtained via
:term:`traversal`, and if the ``HTTP_X_VHM_ROOT`` key is in the
WSGI environment, the value of this key will be treated as a
'virtual root path': the :func:`pyramid.traversal.find_resource`
API will be used to find the virtual root resource using this path;
if the resource is found, it will be returned. If the
``HTTP_X_VHM_ROOT`` key is not present in the WSGI environment,
the physical :term:`root` of the resource tree will be returned instead.
Virtual roots are not useful at all in applications that use
:term:`URL dispatch`. Contexts obtained via URL dispatch don't
really support being virtually rooted (each URL dispatch context
is both its own physical and virtual root). However if this API
is called with a ``resource`` argument which is a context obtained
via URL dispatch, the resource passed in will be returned
unconditionally."""
try:
reg = request.registry
except AttributeError:
reg = get_current_registry()
url_adapter = reg.queryMultiAdapter((resource, request), IResourceURL)
if url_adapter is None:
url_adapter = ResourceURL(resource, request)
vpath, rpath = url_adapter.virtual_path, url_adapter.physical_path
if rpath != vpath and rpath.endswith(vpath):
vroot_path = rpath[: -len(vpath)]
return find_resource(resource, vroot_path)
try:
return request.root
except AttributeError:
return find_root(resource)
def traversal_path(path):
"""Variant of :func:`pyramid.traversal.traversal_path_info` suitable for
decoding paths that are URL-encoded.
If this function is passed a string, it *must* be directly encodeable to
ASCII. For example, '/foo' will work but '/<unprintable unicode>' (a
string object with characters that cannot be encoded to ASCII) will
not. A :exc:`UnicodeEncodeError` will be raised if the string cannot be
encoded directly to ASCII.
"""
if isinstance(path, str):
# must not possess characters outside ascii
path = path.encode('ascii')
# we unquote this path exactly like a PEP 3333 server would
path = unquote_bytes_to_wsgi(path) # result will be a native string
return traversal_path_info(path) # result will be a tuple of unicode
@lru_cache(1000)
def traversal_path_info(path):
"""Given``path``, return a tuple representing that path which can be
used to traverse a resource tree. ``path`` is assumed to be an
already-URL-decoded ``str`` type as if it had come to us from an upstream
WSGI server as the ``PATH_INFO`` environ variable.
The ``path`` is first decoded from its WSGI representation to text.
Per the :pep:`3333` spec, ``path`` is first encoded to bytes using the
Latin-1 encoding; the resulting set of bytes is subsequently decoded to
text using the UTF-8 encoding; a :exc:`pyramid.exc.URLDecodeError` is
raised if the URL cannot be decoded.
The ``path`` is split on slashes, creating a list of segments. If a
segment name is empty or if it is ``.``, it is ignored. If a segment
name is ``..``, the previous segment is deleted, and the ``..`` is
ignored.
Examples:
``/``
()
``/foo/bar/baz``
('foo', 'bar', 'baz')
``foo/bar/baz``
('foo', 'bar', 'baz')
``/foo/bar/baz/``
('foo', 'bar', 'baz')
``/foo//bar//baz/``
('foo', 'bar', 'baz')
``/foo/bar/baz/..``
('foo', 'bar')
``/my%20archives/hello``
('my archives', 'hello')
``/archives/La%20Pe%C3%B1a``
('archives', '<unprintable unicode>')
.. note::
This function does not generate the same type of tuples that
:func:`pyramid.traversal.resource_path_tuple` does. In particular, the
leading empty string is not present in the tuple it returns, unlike
tuples returned by :func:`pyramid.traversal.resource_path_tuple`. As a
result, tuples generated by ``traversal_path`` are not resolveable by
the :func:`pyramid.traversal.find_resource` API. ``traversal_path`` is
a function mostly used by the internals of :app:`Pyramid` and by people
writing their own traversal machinery, as opposed to users writing
applications in :app:`Pyramid`.
"""
try:
path = decode_path_info(path) # result will be Unicode
except UnicodeDecodeError as e:
raise URLDecodeError(e.encoding, e.object, e.start, e.end, e.reason)
return split_path_info(path) # result will be tuple of Unicode
@lru_cache(1000)
def split_path_info(path):
# suitable for splitting an already-unquoted-already-decoded (unicode)
# path value
path = path.strip('/')
clean = []
for segment in path.split('/'):
if not segment or segment == '.':
continue
elif segment == '..':
if clean:
del clean[-1]
else:
clean.append(segment)
return tuple(clean)
# see PEP 3333 for why we encode to latin-1 then decode to utf-8
def decode_path_info(path):
return path.encode('latin-1').decode('utf-8')
# see PEP 3333 for why we decode the path to latin-1
def unquote_bytes_to_wsgi(bytestring):
return unquote_to_bytes(bytestring).decode('latin-1')
_segment_cache = {}
def quote_path_segment(segment, safe=PATH_SEGMENT_SAFE):
"""
Return a quoted representation of a 'path segment' (such as
the string ``__name__`` attribute of a resource) as a string. If the
``segment`` passed in is a bytes object, it is decoded as a UTF-8 string.
The result is then URL-quoted using Python's ``urllib.quote``.
If the segment passed in is not bytes nor a string, an error will be
raised. The return value of ``quote_path_segment`` is always a string.
You may pass a string of characters that need not be encoded as
the ``safe`` argument to this function. This corresponds to the
``safe`` argument to :mod:`urllib.quote`.
.. note::
The return value for each segment passed to this
function is cached in a module-scope dictionary for
speed: the cached version is returned when possible
rather than recomputing the quoted version. No cache
emptying is ever done for the lifetime of an
application, however. If you pass arbitrary
user-supplied strings to this function (as opposed to
some bounded set of values from a 'working set' known to
your application), it may become a memory leak.
"""
# The bit of this code that deals with ``_segment_cache`` is an
# optimization: we cache all the computation of URL path segments
# in this module-scope dictionary with the original string as the
# key, so we can look it up later without needing to reencode
# or re-url-quote it
try:
if segment.__class__ not in (str, bytes):
segment = str(segment)
return _segment_cache[(segment, safe)]
except KeyError:
result = url_quote(text_(segment, 'utf-8'), safe)
# we don't need a lock to mutate _segment_cache, as the below
# will generate exactly one Python bytecode (STORE_SUBSCR)
_segment_cache[(segment, safe)] = result
return result
@implementer(ITraverser)
class ResourceTreeTraverser:
"""A resource tree traverser that should be used (for speed) when
every resource in the tree supplies a ``__name__`` and
``__parent__`` attribute (ie. every resource in the tree is
:term:`location` aware) ."""
VH_ROOT_KEY = VH_ROOT_KEY
VIEW_SELECTOR = '@@'
def __init__(self, root):
self.root = root
def __call__(self, request):
environ = request.environ
matchdict = request.matchdict
if matchdict is not None:
path = matchdict.get('traverse', '/') or '/'
if is_nonstr_iter(path):
# this is a *traverse stararg (not a {traverse})
# routing has already decoded these elements, so we just
# need to join them
path = '/' + '/'.join(path) or '/'
subpath = matchdict.get('subpath', ())
if not is_nonstr_iter(subpath):
# this is not a *subpath stararg (just a {subpath})
# routing has already decoded this string, so we just need
# to split it
subpath = split_path_info(subpath)
else:
# this request did not match a route
subpath = ()
try:
# empty if mounted under a path in mod_wsgi, for example
path = request.path_info or '/'
except KeyError:
# if environ['PATH_INFO'] is just not there
path = '/'
except UnicodeDecodeError as e:
raise URLDecodeError(
e.encoding, e.object, e.start, e.end, e.reason
)
if self.VH_ROOT_KEY in environ:
# HTTP_X_VHM_ROOT
vroot_path = decode_path_info(environ[self.VH_ROOT_KEY])
vroot_tuple = split_path_info(vroot_path)
vpath = (
vroot_path + path
) # both will (must) be unicode or asciistr
vroot_idx = len(vroot_tuple) - 1
else:
vroot_tuple = ()
vpath = path
vroot_idx = -1
root = self.root
ob = vroot = root
if vpath == '/': # invariant: vpath must not be empty
# prevent a call to traversal_path if we know it's going
# to return the empty tuple
vpath_tuple = ()
else:
# we do dead reckoning here via tuple slicing instead of
# pushing and popping temporary lists for speed purposes
# and this hurts readability; apologies
i = 0
view_selector = self.VIEW_SELECTOR
vpath_tuple = split_path_info(vpath)
for segment in vpath_tuple:
if segment[:2] == view_selector:
return {
'context': ob,
'view_name': segment[2:],
'subpath': vpath_tuple[i + 1 :],
'traversed': vpath_tuple[: vroot_idx + i + 1],
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root,
}
try:
getitem = ob.__getitem__
except AttributeError:
return {
'context': ob,
'view_name': segment,
'subpath': vpath_tuple[i + 1 :],
'traversed': vpath_tuple[: vroot_idx + i + 1],
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root,
}
try:
next = getitem(segment)
except KeyError:
return {
'context': ob,
'view_name': segment,
'subpath': vpath_tuple[i + 1 :],
'traversed': vpath_tuple[: vroot_idx + i + 1],
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root,
}
if i == vroot_idx:
vroot = next
ob = next
i += 1
return {
'context': ob,
'view_name': '',
'subpath': subpath,
'traversed': vpath_tuple,
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root,
}
ModelGraphTraverser = (
ResourceTreeTraverser # b/w compat, not API, used in wild
)
@implementer(IResourceURL)
class ResourceURL:
VH_ROOT_KEY = VH_ROOT_KEY
def __init__(self, resource, request):
physical_path_tuple = resource_path_tuple(resource)
physical_path = _join_path_tuple(physical_path_tuple)
if physical_path_tuple != ('',):
physical_path_tuple = physical_path_tuple + ('',)
physical_path = physical_path + '/'
virtual_path = physical_path
virtual_path_tuple = physical_path_tuple
environ = request.environ
vroot_path = environ.get(self.VH_ROOT_KEY)
# if the physical path starts with the virtual root path, trim it out
# of the virtual path
if vroot_path is not None:
vroot_path = vroot_path.rstrip('/')
if vroot_path and physical_path.startswith(vroot_path):
vroot_path_tuple = tuple(vroot_path.split('/'))
numels = len(vroot_path_tuple)
virtual_path_tuple = ('',) + physical_path_tuple[numels:]
virtual_path = physical_path[len(vroot_path) :]
self.virtual_path = virtual_path # IResourceURL attr
self.physical_path = physical_path # IResourceURL attr
self.virtual_path_tuple = virtual_path_tuple # IResourceURL attr (1.5)
self.physical_path_tuple = (
physical_path_tuple # IResourceURL attr (1.5)
)
@lru_cache(1000)
def _join_path_tuple(tuple):
return tuple and '/'.join([quote_path_segment(x) for x in tuple]) or '/'
class DefaultRootFactory:
__parent__ = None
__name__ = None
def __init__(self, request):
pass
| 31,856 | 8,892 |
from setuptools import setup
setup(
name='spanish2datetime',
version='1.0',
description='Python3 script able to turn spanish text into datetime.',
author="Marc Solé",
author_email="marcsole@insomniacwolves.com",
license='LICENSE',
packages=['spanish2datetime']
) | 291 | 93 |
## Demonstrates some techniques for working with "faces", and
## shows how to build a height field (a common feature request)
## with it.
## David Scherer July 2001
from visual import *
class Model:
def __init__(self):
self.frame = frame()
self.model = faces(frame=self.frame)
self.twoSided = true # add every face twice with opposite normals
def FacetedTriangle(self, v1, v2, v3, color=color.white):
"""Add a triangle to the model, apply faceted shading automatically"""
v1 = vector(v1)
v2 = vector(v2)
v3 = vector(v3)
try:
normal = norm( cross(v2-v1, v3-v1) )
except:
normal = vector(0,0,0)
for v in (v1,v2,v3):
self.model.append( pos=v, color=color, normal=normal )
if self.twoSided:
for v in (v1,v3,v2):
self.model.append( pos=v, color=color, normal=-normal )
def FacetedPolygon(self, *v):
"""Appends a planar polygon of any number of vertices to the model,
applying faceted shading automatically."""
for t in range(len(v)-2):
self.FacetedTriangle( v[0], v[t+1], v[t+2] )
def DoSmoothShading(self):
"""Change a faceted model to smooth shaded, by averaging normals at
coinciding vertices.
This is a very slow and simple smooth shading
implementation which has to figure out the connectivity of the
model and does not attempt to detect sharp edges.
It attempts to work even in two-sided mode where there are two
opposite normals at each vertex. It may fail somehow in pathological
cases. """
pos = self.model.pos
normal = self.model.normal
vertex_map = {} # vertex position -> vertex normal
vertex_map_backface = {}
for i in range( len(pos) ):
tp = tuple(pos[i])
old_normal = vertex_map.get( tp, (0,0,0) )
if dot(old_normal, normal[i]) >= 0:
vertex_map[tp] = normal[i] + old_normal
else:
vertex_map_backface[tp] = normal[i] + vertex_map_backface.get(tp, (0,0,0))
for i in range( len(pos) ):
tp = tuple(pos[i])
if dot(vertex_map[tp], normal[i]) >= 0:
normal[i] = vertex_map[tp] and norm( vertex_map[ tp ] )
else:
normal[i] = vertex_map_backface[tp] and norm(vertex_map_backface[tp] )
def DrawNormal(self, scale):
pos = self.model.pos
normal = self.model.normal
for i in range(len(pos)):
arrow(pos=pos[i], axis=normal[i]*scale)
class Mesh (Model):
def __init__(self, xvalues, yvalues, zvalues):
Model.__init__(self)
points = zeros( xvalues.shape + (3,), Float )
points[...,0] = xvalues
points[...,1] = yvalues
points[...,2] = zvalues
for i in range(zvalues.shape[0]-1):
for j in range(zvalues.shape[1]-1):
self.FacetedPolygon( points[i,j], points[i,j+1],
points[i+1,j+1], points[i+1,j] )
## Graph a function of two variables (a height field)
x = arange(-1,1,2./20)
y = arange(-1,1,2./20)
z = zeros( (len(x),len(y)), Float )
x,y = x[:,NewAxis]+z, y+z
m = Mesh( x, (sin(x*pi)+sin(y*pi))*0.2, y )
m.DoSmoothShading()
##m.DrawNormal(0.05)
| 3,485 | 1,202 |
import base64
import json
from bddcli import Given, given, when, stdout, stderr, Application, status
from restfulpy import Application as RestfulpyApplication
foo = RestfulpyApplication(name='jwt')
app = Application('foo', 'tests.test_appcli_jwt:foo.cli_main')
def test_jwt():
with Given(app, 'jwt create'):
assert stderr == ''
assert status == 0
assert len(stdout) > 10
when(given + '\'{"foo": 1}\'')
assert stderr == ''
assert status == 0
header, payload, signature = stdout.encode().split(b'.')
payload = base64.urlsafe_b64decode(payload)
assert json.loads(payload) == {'foo': 1}
if __name__ == '__main__': # pragma: no cover
foo.cli_main(['jwt', 'create', '{"foo": 1}'])
| 766 | 254 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 18:36:34 2022
@author: victor
"""
class SARDog(Dog):
"""Represent a search dog."""
def __init__(self, name):
"""Initilize the sar dog."""
super().__init__(name)
def search(self):
"""Simulate search."""
print(self.name + " is searching.")
my_dog = SARDog("Willie")
print(my_dog.name + " is a search dog.")
my_dog.sit()
my_dog.search() | 476 | 183 |
import os
import numpy as np
import matplotlib.pyplot as plt
def plot_embedding(X, y, training_mode, save_name):
"""
Reference: https://github.com/NaJaeMin92/pytorch_DANN/
Gets the t-sne output and actions label encodings plot T-SNE
"""
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure(figsize=(10, 10))
for i in range(len(y)): # X.shape[0] : 1024
# plot colored number
if y[i] == 0:
colors = (0.0, 0.0, 1.0, 1.0)
elif y[i] == 1:
colors = (1.0, 0.0, 0.0, 1.0)
elif y[i] == 2:
colors = (1.0, 1.0, 0.0, 1.0)
elif y[i] == 3:
colors = (1.0, 1.0, 1.0, 1.0)
elif y[i] == 4:
colors = (1.0, 0.5, 0.0, 1.0)
elif y[i] == 5:
colors = (1.0, 0.0, 0.5, 1.0)
elif y[i] == 6:
colors = (1.0, 1.0, 0.0, 0.0)
elif y[i] == 7:
colors = (1.0, 0.0, 1.0, 1.0)
elif y[i] == 8:
colors = (0.5, 0.5, 0.5, 0.5)
elif y[i] == 9:
colors = (0.5, 0.2, 0.2, 0.2)
elif y[i] == 10:
colors = (1.0, 0.5, 0.2, 1.0)
else:
colors = (1.0, 0.2, 0.5, 1.0)
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=colors,
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([]), plt.yticks([])
if save_name is not None:
plt.title(save_name)
save_folder = 'saved_plot'
if not os.path.exists(save_folder):
os.makedirs(save_folder)
fig_name = 'saved_plot/' + str(training_mode) + '_' + str(save_name) + '.png'
plt.savefig(fig_name)
print('{} is saved'.format(fig_name))
def plot_p1_train_info(training_loss, val_accuracy, save_dir = "./saved_plot/problem1_loss_acc.png"):
"""
Plots training Loss and Validation Acc
"""
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
plt.plot(training_loss, color = 'red')
plt.title("Training Loss vs # Epochs")
plt.ylabel("Cross Entropy Loss")
plt.xlabel("Number of Epochs")
plt.subplot(1,2,2)
plt.plot(val_accuracy, color = 'blue')
plt.title("Validation Accuracy vs # Epochs")
plt.ylabel("Accuracy")
plt.xlabel("Number of Epochs")
plt.savefig(save_dir)
plt.show()
| 2,277 | 1,021 |
#!/usr/bin/env python3
# coding: utf-8
"""
Script to run the baseline experiment:
SVM classification with histogram features.
"""
import os
import sys
import numpy as np
from deepsphere import experiment_helper
from grid import pgrid
def single_experiment(sigma, order, sigma_noise, path):
"""Run as experiment.
Check the notebook `part_sphere.ipynb` to get more insides about this code.
"""
Nside = 1024
print('Solve the histogram problem for sigma {}, order {}, noise {}'.format(sigma, order, sigma_noise), flush=True)
EXP_NAME = '40sim_{}sides_{}noise_{}order_{}sigma'.format(
Nside, sigma_noise, order, sigma)
x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(sigma, order)
x_raw_test, labels_test, _ = experiment_helper.get_testing_data(sigma, order, sigma_noise, x_raw_std)
if order==4:
augmentation = 20
else:
augmentation = 40
ret = experiment_helper.data_preprossing(x_raw_train, labels_raw_train, x_raw_test, sigma_noise, feature_type='histogram', augmentation=augmentation)
features_train, labels_train, features_validation, labels_validation, features_test = ret
ntrain = len(features_train)//augmentation
nsamples = list(ntrain // 12 * np.linspace(1, 6, num=6).astype(np.int))
nsamples += list(ntrain // 2 * np.linspace(1, augmentation*2, num=40).astype(np.int))
err_train = np.zeros(shape=[len(nsamples)])
err_validation = np.zeros(shape=[len(nsamples)])
err_train[:] = np.nan
err_validation[:] = np.nan
for i, n in enumerate(nsamples):
print('{} Solve it for {} samples'.format(i, n), flush=True)
err_train[i], err_validation[i], _ = experiment_helper.err_svc_linear(
features_train[:n], labels_train[:n], features_validation,
labels_validation)
e_train, e_validation, C = experiment_helper.err_svc_linear(
features_train, labels_train, features_validation, labels_validation)
print('The validation error is {}%'.format(e_validation * 100), flush=True)
# Cheating in favor of SVM
e_train, e_test = experiment_helper.err_svc_linear_single(C,
features_train, labels_train, features_test, labels_test)
print('The test error is {}%'.format(e_test * 100), flush=True)
np.savez(path + EXP_NAME, [nsamples, err_train, err_validation, e_test])
return e_test
if __name__ == '__main__':
if len(sys.argv) > 1:
sigma = int(sys.argv[1])
order = int(sys.argv[2])
sigma_noise = float(sys.argv[3])
grid = [(sigma, order, sigma_noise)]
else:
grid = pgrid()
path = 'results/histogram/'
os.makedirs(path, exist_ok=True)
for sigma, order, sigma_noise in grid:
print('Launch experiment for sigma={}, order={}, noise={}'.format(sigma, order, sigma_noise))
res = single_experiment(sigma, order, sigma_noise, path)
filepath = os.path.join(path, 'histogram_results_list_sigma{}'.format(sigma))
new_data = [order, sigma_noise, res]
if os.path.isfile(filepath+'.npz'):
results = np.load(filepath+'.npz')['data'].tolist()
else:
results = []
results.append(new_data)
np.savez(filepath, data=results)
| 3,274 | 1,127 |
import os
import torch.cuda
from hfutils.model_pipe import T5DeepSpeedPipe
import deepspeed
from tqdm import tqdm
from transformers.models.t5.configuration_t5 import T5Config
from transformers import DataCollatorForSeq2Seq, default_data_collator
import argparse
from deepspeed.utils import RepeatingLoader
from hfutils.arg_parser import HfArguments
from hfutils.loader import ModelLoader, DatasetLoader
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from datasets import load_dataset, load_metric, concatenate_datasets
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
args = HfArguments()
data_args = args.data_args
dataset_loader = DatasetLoader(args)
tokenizer, _ = ModelLoader(args).load(load_model=False)
eval_dataset = dataset_loader.load(
tokenizer, partition="validation", create_dataloader=False
)
batch_size = data_args.eval_bsz
user_path = os.path.expanduser("~")
# model_path = os.path.join(user_path, "HuggingFace", "google", "t5-xl-lm-adapt")
# model_path = "/mnt/yavin/checkpoints/t5-xl-lm-adapt/sst2/checkpoint-1380/"
# model_path = "google/t5-small-lm-adapt"
model_path = args.model_args.model_name_or_path
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(tokenizer)
class PipeDataset(Dataset):
def __init__(self, dataset: Dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
return {
"encoder_input_ids": self.dataset[i]['input_ids'],
"encoder_attention_mask": self.dataset[i]['attention_mask'],
}
eval_dataset = concatenate_datasets([eval_dataset]*70)
eval_dataset = PipeDataset(eval_dataset)
# print(eval_dataset[0])
def eval_generator():
eval_dataloader = DataLoader(
eval_dataset,
shuffle=True,
collate_fn=data_collator,
batch_size=batch_size,
)
for batch in tqdm(eval_dataloader, desc="eval_generator"):
shape = batch["encoder_input_ids"].shape
yield ((
batch["encoder_input_ids"],
batch["encoder_attention_mask"],
), torch.zeros(shape[0]))
# print(shape)
# yield (
# batch["encoder_input_ids"],
# batch["encoder_attention_mask"],
# )
# exit()
config = T5Config.from_pretrained(
model_path
)
deepspeed.init_distributed()
model = T5DeepSpeedPipe(config, num_stages=torch.cuda.device_count())
engine, _, _, _ = deepspeed.initialize(args.ds_args, model=model)
for step, batch in enumerate(RepeatingLoader(eval_generator())):
if step > 500: break
engine.eval_batch(iter([batch]*1), compute_loss=False)
# engine.eval_batch(RepeatingLoader(eval_generator()), compute_loss=False)
| 2,827 | 956 |
import numpy
import copy
class Polynomial(object):
def __init__(self, coefficients):
self.coeff = coefficients
def __call__(self, x):
"""Evaluate the polynomial."""
s = 0
for degree in self.coeff:
s += self.coeff[degree]*x**degree
return s
def __add__(self, other):
"""Return self + other as Polynomial object."""
result_coeff = copy.deepcopy(self.coeff)
for key in other.coeff:
if key in self.coeff:
result_coeff[key] = result_coeff[key] + other.coeff[key]
else:
result_coeff[key] = other.coeff[key]
return Polynomial(result_coeff)
def __mul__(self, other):
result_coeff = {}
for keyself in self.coeff:
for keyother in other.coeff:
if keyself + keyother in result_coeff:
result_coeff[keyself+keyother] = result_coeff[keyself+keyother] + self.coeff[keyself] * other.coeff[keyother]
else:
result_coeff[keyself+keyother] = self.coeff[keyself] * other.coeff[keyother]
return Polynomial(result_coeff)
def differentiate(self):
"""Differentiate this polynomial in-place."""
for i in range(1, len(self.coeff)):
self.coeff[i-1] = i*self.coeff[i]
del self.coeff[-1]
def derivative(self):
"""Copy this polynomial and return its derivative."""
dpdx = Polynomial(self.coeff[:]) # make a copy
dpdx.differentiate()
return dpdx
def __str__(self):
s = ''
for i in range(0, len(self.coeff)):
if self.coeff[i] != 0:
s += ' + %g*x^%d' % (self.coeff[i], i)
# Fix layout
s = s.replace('+ -', '- ')
s = s.replace('x^0', '1')
s = s.replace(' 1*', ' ')
s = s.replace('x^1 ', 'x ')
#s = s.replace('x^1', 'x') # will replace x^100 by x^00
if s[0:3] == ' + ': # remove initial +
s = s[3:]
if s[0:3] == ' - ': # fix spaces for initial -
s = '-' + s[3:]
return s
def simplestr(self):
s = ''
for i in range(0, len(self.coeff)):
s += ' + %g*x^%d' % (self.coeff[i], i)
return s
def test_Polynomial():
p1 = Polynomial({1:1, 100:-3})
p2 = Polynomial({20:1, 1:-1, 100:4})
assert (p1.__add__(p2)).coeff == {1:0, 20:1, 100:1}, 'Improper addition.'
assert(p1.__mul__(p2)).coeff == {2:-1, 21:1, 101:7, 120:-3, 200:-12}, 'Improper multiplication.'
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2 and sys.argv[1] == 'verify':
test_Polynomial() | 2,694 | 958 |
import unittest
import numpy as np
import whatthefood.graph as graph
class TestOps(unittest.TestCase):
def test_matmul(self):
x_arr = np.array([[1, 2], [2, 3], [3, 4]])
y_arr = np.array([[1, 2, 3, 4], [4, 5, 6, 7]])
x = graph.Constant(x_arr)
y = graph.Constant(y_arr)
m = graph.Matmul(x, y)
np.testing.assert_array_equal(graph.run(m), np.matmul(x_arr, y_arr))
def test_matmul_grad(self):
x_arr = np.array([[1, 2], [2, 3], [3, 4]])
y_arr = np.array([[1, 2, 3, 4], [4, 5, 6, 7]])
x = graph.Constant(x_arr)
y = graph.Constant(y_arr)
m = graph.Matmul(x, y)
g = graph.Grad(m, [x, y])
mv, (g_x, g_y) = graph.run((m, g))
self.assertSequenceEqual(g_x.shape, x.shape)
self.assertSequenceEqual(g_y.shape, y.shape)
np.testing.assert_array_equal(g_x, np.matmul(np.ones_like(mv), y_arr.T))
np.testing.assert_array_equal(g_y, np.matmul(x_arr.T, np.ones_like(mv)))
def test_matmul_vec(self):
x = graph.Constant([1, 2, 3])
y = graph.Constant([[1, 2], [1, 3], [2, 4]])
m = graph.Matmul(x, y)
np.testing.assert_array_equal([9, 20], graph.run(m))
def test_matmul_vec_grad(self):
x = graph.Constant([1, 2, 3])
y = graph.Constant([[1, 2], [1, 3], [2, 4]])
m = graph.Matmul(x, y)
g = graph.Grad(m, [x, y])
g_x, g_y = graph.run(g)
np.testing.assert_array_equal([3, 4, 6], g_x)
np.testing.assert_array_equal([[1, 1], [2, 2], [3, 3]], g_y)
def test_reduce_sum(self):
x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]])
y1 = graph.ReduceSum(x, axis=0)
y2 = graph.ReduceSum(x, axis=(1, -1))
y3 = graph.ReduceSum(x)
np.testing.assert_array_equal([[9], [12]], graph.run(y1))
np.testing.assert_array_equal([3, 7, 11], graph.run(y2))
self.assertEqual(21, graph.run(y3))
def test_reduce_sum_batched(self):
x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]])
y_arr = np.array([9, 12])
x = graph.Placeholder(shape=(3, 2, 1), batched=True)
y1 = graph.ReduceSum(x, (0, 2), True)
y2 = graph.ReduceSum(x, (0, 2), False)
np.testing.assert_array_equal(y_arr * 3, graph.run(y1, {x: np.array([x_arr, 2 * x_arr])}))
np.testing.assert_array_equal([y_arr, 2 * y_arr], graph.run(y2, {x: np.array([x_arr, 2 * x_arr])}))
def test_reduce_sum_grad(self):
x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]])
y1 = graph.ReduceSum(x, axis=0)
y2 = graph.ReduceSum(x, axis=(1, -1))
y3 = graph.ReduceSum(x)
g1 = graph.Grad(y1, x)
g2 = graph.Grad(y2, x)
g3 = graph.Grad(y3, x)
np.testing.assert_array_equal(np.ones_like(x.value), graph.run(g1))
np.testing.assert_array_equal(np.ones_like(x.value), graph.run(g2))
np.testing.assert_array_equal(np.ones_like(x.value), graph.run(g3))
def test_reduce_sum_grad_batched(self):
x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]])
x = graph.Placeholder(shape=(3, 2, 1), batched=True)
y1 = graph.ReduceSum(x, (0, 2), True)
y2 = graph.ReduceSum(x, (0, 2), False)
g1 = graph.Grad(y1, x)
g2 = graph.Grad(y2, x)
np.testing.assert_array_equal(
[np.ones_like(x_arr), np.ones_like(x_arr)],
graph.run(g1, {x: np.array([x_arr, 2 * x_arr])}))
np.testing.assert_array_equal(
[np.ones_like(x_arr), np.ones_like(x_arr)],
graph.run(g2, {x: np.array([x_arr, 2 * x_arr])}))
def test_reduce_mean(self):
x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]])
y1 = graph.ReduceMean(x, axis=0)
y2 = graph.ReduceMean(x, axis=(1, -1))
y3 = graph.ReduceMean(x)
np.testing.assert_array_equal([[3], [4]], graph.run(y1))
np.testing.assert_array_equal([1.5, 3.5, 5.5], graph.run(y2))
self.assertEqual(3.5, graph.run(y3))
def test_reduce_mean_batched(self):
x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]])
y_arr = np.array([3, 4])
x = graph.Placeholder(shape=(3, 2, 1), batched=True)
y1 = graph.ReduceMean(x, (0, 2), True)
y2 = graph.ReduceMean(x, (0, 2), False)
np.testing.assert_array_equal(y_arr * 1.5, graph.run(y1, {x: np.array([x_arr, 2 * x_arr])}))
np.testing.assert_array_equal([y_arr, 2 * y_arr], graph.run(y2, {x: np.array([x_arr, 2 * x_arr])}))
def test_reduce_mean_grad(self):
x = graph.Constant([[[1], [2]], [[3], [4]], [[5], [6]]])
y1 = graph.ReduceMean(x, axis=0)
y2 = graph.ReduceMean(x, axis=(1, -1))
y3 = graph.ReduceMean(x)
g1 = graph.Grad(y1, x)
g2 = graph.Grad(y2, x)
g3 = graph.Grad(y3, x)
np.testing.assert_array_equal(np.ones_like(x.value) / 3, graph.run(g1))
np.testing.assert_array_equal(np.ones_like(x.value) / 2, graph.run(g2))
np.testing.assert_array_equal(np.ones_like(x.value) / 6, graph.run(g3))
def test_reduce_mean_grad_batched(self):
x_arr = np.array([[[1], [2]], [[3], [4]], [[5], [6]]])
x = graph.Placeholder(shape=(3, 2, 1), batched=True)
y1 = graph.ReduceMean(x, (0, 2), True)
y2 = graph.ReduceMean(x, (0, 2), False)
g1 = graph.Grad(y1, x)
g2 = graph.Grad(y2, x)
np.testing.assert_array_equal(
[np.ones_like(x_arr) / 6, np.ones_like(x_arr) / 6],
graph.run(g1, {x: np.array([x_arr, 2 * x_arr])}))
np.testing.assert_array_equal(
[np.ones_like(x_arr) / 3, np.ones_like(x_arr) / 3],
graph.run(g2, {x: np.array([x_arr, 2 * x_arr])}))
def test_slice(self):
x_arr = [[1, 2, 3], [4, 5, 6]]
x = graph.Constant(x_arr)
y1 = graph.Slice(x, (0, 1), (2, 2))
np.testing.assert_array_equal([[2], [5]], graph.run(y1))
def test_slice_batched(self):
x_arr = np.array([[1, 2, 3], [4, 5, 6]])
x = graph.Placeholder((2, 3), True)
y1 = graph.Slice(x, (0, 1), (2, 2))
np.testing.assert_array_equal(
[[[2], [5]], [[-2], [-5]]],
graph.run(y1, {x: np.array([x_arr, -x_arr])}))
def test_slice_grad(self):
x_arr = [[1, 2, 3], [4, 5, 6]]
x = graph.Constant(x_arr)
y1 = graph.Slice(x, (0, 1), (2, 2))
g1 = graph.Grad(y1, x)
np.testing.assert_array_equal([[0, 1, 0], [0, 1, 0]], graph.run(g1))
def test_slice_grad_batched(self):
x_arr = np.array([[1, 2, 3], [4, 5, 6]])
x = graph.Placeholder((2, 3), True)
y1 = graph.Slice(x, (0, 1), (2, 2))
g1 = graph.Grad(y1, x)
np.testing.assert_array_equal(
[[[0, 1, 0], [0, 1, 0]], [[0, 1, 0], [0, 1, 0]]],
graph.run(g1, {x: np.array([x_arr, -x_arr])}))
def test_concatenate(self):
x1 = graph.Constant([[1, 2, 3], [4, 5, 6]])
x2 = graph.Constant([[7, 8], [9, 10]])
y = graph.Concatenate((x1, x2), axis=1)
np.testing.assert_array_equal([[1, 2, 3, 7, 8], [4, 5, 6, 9, 10]], graph.run(y))
def test_concatenate_batched(self):
x1_arr = np.array([[1, 2, 3], [4, 5, 6]])
x2_arr = np.array([[7, 8], [9, 10]])
x1 = graph.Placeholder(x1_arr.shape, batched=True)
x2 = graph.Placeholder(x2_arr.shape, batched=True)
y = graph.Concatenate((x1, x2), axis=-1)
np.testing.assert_array_equal(
[[[1, 2, 3, -7, -8], [4, 5, 6, -9, -10]], [[-1, -2, -3, 7, 8], [-4, -5, -6, 9, 10]]],
graph.run(y, {x1: np.array([x1_arr, -x1_arr]), x2: np.array([-x2_arr, x2_arr])}))
def test_concatenate_grad(self):
x1 = graph.Constant([[1, 2, 3], [4, 5, 6]])
x2 = graph.Constant([[7, 8], [9, 10]])
y = graph.Concatenate((x1, x2), axis=1)
g = graph.Grad(y, (x1, x2))
g1, g2 = graph.run(g)
np.testing.assert_array_equal(np.ones_like(x1.value), g1)
np.testing.assert_array_equal(np.ones_like(x2.value), g2)
def test_multply(self):
x1 = graph.Constant([[1], [2], [3], [4]])
x2 = graph.Constant([[1, -1], [2, -2], [3, -3], [4, -4]])
y = graph.Multiply(x1, x2)
np.testing.assert_array_equal([[1, -1], [4, -4], [9, -9], [16, -16]], graph.run(y))
def test_multply_grad(self):
x1 = graph.Constant([[1], [2], [3], [4]])
x2 = graph.Constant([[1, -1], [2, -2], [3, -3], [4, -4]])
y = graph.Multiply(x1, x2)
g = graph.Grad(y, (x1, x2))
g1, g2 = graph.run(g)
np.testing.assert_array_equal([[0], [0], [0], [0]], g1)
np.testing.assert_array_equal([[1, 1], [2, 2], [3, 3], [4, 4]], g2)
def test_divide(self):
x1 = graph.Constant([1, 2, 3, 4])
x2 = graph.Constant([4, 3, 2, 1])
y = graph.Divide(x1, x2)
np.testing.assert_array_equal([1/4, 2/3, 3/2, 4], graph.run(y))
def test_divide_grad(self):
x1 = graph.Constant([1, 2, 3, 4])
x2 = graph.Constant([4, 3, 2, 1])
y = graph.Divide(x1, x2)
g = graph.Grad(y, (x1, x2))
g1, g2 = graph.run(g)
np.testing.assert_array_equal([1/4, 1/3, 1/2, 1], g1)
np.testing.assert_array_equal([-1/16, -2/9, -3/4, -4], g2)
| 9,341 | 4,207 |
# AUTOGENERATED! DO NOT EDIT! File to edit: 04_carion2020end.ipynb (unless otherwise specified).
__all__ = ['coco_vocab', 'bb_pad', 'ParentSplitter', 'box_cxcywh_to_xyxy', 'box_xyxy_to_cxcywh', 'TensorBBoxWH',
'TensorBBoxTL', 'ToWH', 'ToXYXY', 'ToTL', 'box_area', 'all_op', 'generalized_box_iou', 'DETRLoss', 'DETR',
'CocoEval', 'sorted_detr_trainable_params', 'GetAnnotatedImageFiles', 'GetBboxAnnotation',
'GetClassAnnotation', 'CocoDataLoaders', 'detr_learner']
# Cell
import os
import torch
import numpy as np
import seaborn as sns
import io
from contextlib import redirect_stdout
from IPython.core.debugger import set_trace
from torch import functional as F
from scipy.optimize import linear_sum_assignment
from fastprogress.fastprogress import master_bar, progress_bar
from fastai.data.all import *
from fastai.vision.all import *
from .core import *
from itertools import chain
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
from .core import _parent_idxs
# Cell
coco_vocab = [
'N/A0', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A1',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A2', 'backpack',
'umbrella', 'N/A3', 'N/A4', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A5', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'N/A6', 'dining table', 'N/A7',
'N/A8', 'toilet', 'N/A9', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A10',
'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
'toothbrush'
]
# Cell
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
if len(samples[0][1:])>0:
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
else:
return samples
# Cell
def ParentSplitter(train_name='train', valid_name='valid'):
"Split `items` from the grand parent folder names (`train_name` and `valid_name`)."
def _inner(o):
tindex = _parent_idxs(o, train_name)
vindex = _parent_idxs(o, valid_name)
return tindex, vindex
return _inner
# Cell
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
# Cell
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
# Cell
class TensorBBoxWH(TensorPoint): pass
class TensorBBoxTL(TensorPoint): pass
# Cell
@Transform
def ToWH(x:TensorBBox): return TensorBBoxWH(box_xyxy_to_cxcywh(x*0.5+0.5), img_size=x.img_size)
# Cell
@Transform
def ToXYXY(x:TensorBBoxWH)->None:
return TensorBBox(box_cxcywh_to_xyxy(x)*2-1, img_size=x.img_size)
# Cell
class ToTL(Transform):
def encodes(self, x:TensorBBoxWH)->None: return TensorBBoxTL(box_cxcywh_to_xyxy(x), img_size=x.img_size)
def encodes(self, x:TensorBBox)->None: return TensorBBoxTL((x+1)/2, img_size=x.img_size)
# Cell
def box_area(boxes): return (boxes[..., 2] - boxes[..., 0]) * (boxes[..., 3] - boxes[..., 1])
# Cell
def all_op(cmp):
"Compares all the elements of `a` and `b` using cmp."
def _inner(a, b):
if not is_iter(b): return False
return all(cmp(a_,b_) for a_,b_ in itertools.zip_longest(a,b))
return _inner
# Cell
def generalized_box_iou(boxes1, boxes2, pairwise=False):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2).
This implemenation expects bs as first dim.
"""
# degenerate boxes gives inf / nan results
# so do an early check
#pexpt((boxes1, boxes2))
#set_trace()
boxes1, boxes2 = ToTL()((boxes1, boxes2))
#pexpt((boxes1, boxes2))
assert (boxes1[..., 2:] >= boxes1[..., :2]).all(), 'boxes1 are not in [left_x, top_y, right_x, bottom_y] coords'
assert (boxes2[..., 2:] >= boxes2[..., :2]).all(), 'boxes2 are not in [left_x, top_y, right_x, bottom_y] coords'
area1 = box_area(boxes1)
area2 = box_area(boxes2)
if pairwise:
boxes1 = boxes1[:, :, None, :]
boxes2 = boxes2[:, None, :, :]
area1 = area1[:, :, None]
area2 = area2[:, None, :]
lt = torch.max(boxes1[..., :2], boxes2[..., :2]) # [N,M,2]
rb = torch.min(boxes1[..., 2:], boxes2[..., 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[..., 0] * wh[..., 1] # [N,M]
union = (area1 + area2) - inter
iou = inter / union
lt = torch.min(boxes1[..., :2], boxes2[..., :2]) # [N,M,2]
rb = torch.max(boxes1[..., 2:], boxes2[..., 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[..., 0] * wh[..., 1]
#set_trace()
return iou - (area - union) / area
# Cell
class DETRLoss(nn.Module):
def __init__(self, classw=1, boxw=1, giouw=1, n_queries=100, th=0.7, eos_coef=0.1, n_classes=92):
super().__init__()
store_attr()
self.emptyw = torch.ones(n_classes)
self.emptyw[-1] = eos_coef
self.entropy = nn.CrossEntropyLoss(weight=self.emptyw)
def class_loss(self, output_classes, target_id, indices):
bs, nq, nc = output_classes.shape
target_id_full = torch.full((bs, nq), nc-1, dtype=torch.int64, device=target_id.device)
for i, ind in enumerate(indices): target_id_full[i, ind[0]] = target_id[i, ind[1]]
return self.entropy(output_classes.transpose(1,2), target_id_full)
def box_loss(self, output_boxes, target_boxes, indices):
output_boxes, target_boxes = ToWH((output_boxes, target_boxes))
output_boxes_ind = []
target_boxes_ind = []
for i, (src, dst) in enumerate(indices):
output_boxes_ind.append(output_boxes[i, src, :])
target_boxes_ind.append(target_boxes[i, dst, :])
output_boxes_ind = torch.cat(output_boxes_ind)
target_boxes_ind = torch.cat(target_boxes_ind)
l1_loss = nn.L1Loss()(output_boxes_ind, target_boxes_ind)
giou = 1 - generalized_box_iou(output_boxes_ind, target_boxes_ind)
return self.boxw * l1_loss + self.giouw * giou.mean()
def box_cost(self, output_boxes, target_boxes):
output_boxes, target_boxes = ToWH((output_boxes, target_boxes))
return torch.cdist(output_boxes, target_boxes, p=1)
def class_cost(self, output_class, target_ids):
bs, nq, _ = output_class.shape
_, mc = target_ids.shape
p = output_class.flatten(0,1).softmax(-1) # [bs*nq, num_classes]
ids = target_ids.flatten() # [bs*nq]
loss = -p[:, ids].reshape(bs, nq, -1) # [bs, nq, bs*mc]
return torch.cat([loss[i, :, i*mc:(i+1)*mc][None, ...] for i in range(bs)], 0) # [bs, nq, mc]
@torch.no_grad()
def matcher(self, output, target):
output_boxes, output_class = output # [bs, nq, 4], [bs, nq, num_classes]
target_boxes, target_ids = target # [bs, max(n in batch), 4], [bs, max(n in batch)]
l_iou = -generalized_box_iou(output_boxes, target_boxes, pairwise=True)
l_box = self.box_cost(output_boxes, target_boxes)
l_class = self.class_cost(output_class, target_ids)
C = self.classw*l_class + self.boxw*l_box + self.giouw*l_iou
C = C.cpu()
sizes = [(v<self.n_classes-1).type(torch.int).sum() for v in target[1]]
Cs = [C[i, :, :s] for i, s in enumerate(sizes)]
indices = [linear_sum_assignment(C[i, :, :s]) for i, s in enumerate(sizes)]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def forward(self, output, target_boxes, target_ids):
output_boxes, output_class, aux_outputs = output
indices = self.matcher((output_boxes, output_class), (target_boxes, target_ids))
l_class = self.class_loss(output_class, target_ids, indices)
l_box = self.box_loss(output_boxes, target_boxes, indices)
loss = l_class * self.classw + l_box
if aux_outputs:
for output in aux_outputs:
output_boxes, output_class = output['pred_boxes'], output['pred_logits']
indices = self.matcher((output_boxes, output_class), (target_boxes, target_ids))
l_class = self.class_loss(output_class, target_ids, indices)
l_box = self.box_loss(output_boxes, target_boxes, indices)
loss += l_class * self.classw + l_box
return loss
def activation(self, x): return (ToXYXY(x[0]), F.softmax(x[1], dim=-1))
def decodes(self, x, pad=True):
pred_boxes, probs = x
max_probs, pred_ids = probs.max(axis=-1)
ind = (max_probs>self.th) & (pred_ids<probs.shape[-1]-1) & (box_area(pred_boxes)>0)
max_probs = [max_probs[i, ind[i]] for i in range(ind.shape[0])]
pred_ids = [pred_ids[i, ind[i]] for i in range(ind.shape[0])]
#pred_boxes = L([pred_boxes[i, ind[i], :] for i in range(ind.shape[0])]).map(TensorBBox)
pred_boxes = L(pred_boxes[i, ind[i], :] for i in range(ind.shape[0]))
if pad:
imgs = [None for i in range_of(pred_ids)]
z_inp = zip(imgs ,pred_boxes, pred_ids)
out = bb_pad(list(z_inp), pad_idx=self.n_classes-1)
pred_boxes = torch.cat([x[1].unsqueeze(0) for x in out])
pred_ids = torch.cat([x[2].unsqueeze(0) for x in out])
pred_boxes, pred_ids = TensorBBox(pred_boxes), TensorMultiCategory(pred_ids)
self.scores = max_probs
return pred_boxes, pred_ids
# Cell
class DETR(nn.Module):
def __init__(self, pretrained=True, n_classes=92, aux_loss=False):
super().__init__()
self.model = torch.hub.load('facebookresearch/detr', 'detr_resnet50', pretrained=pretrained, verbose=False)
if self.model.class_embed.out_features!=n_classes:
self.model.class_embed = nn.Linear(256, n_classes)
self.model.aux_loss = aux_loss
def forward(self, x):
img_sz = x.shape[2:]
x = self.model(x)
pred_boxes, pred_logits = x['pred_boxes'], x['pred_logits']
aux_outputs = x.get('aux_outputs', None)
if aux_outputs:
for o in aux_outputs: o['pred_boxes'] = TensorBBoxWH(o['pred_boxes'], img_size=img_sz)
return TensorBBoxWH(pred_boxes, img_size=img_sz), pred_logits, aux_outputs
# Cell
class CocoEval(Callback):
run_before=Recorder
run_train = False
def __init__(self):
metrics = 'AP AP50 AP75 AP_small AP_medium AP_large AR1 AR10 AR100 AR_small AR_medium AR_large'.split()
self.metrics = L(metrics).map(partial(getattr, self)).map(ValueMetric)
def before_validate(self):
vocab = self.dls.vocab
bs = self.learn.dls.bs
self.gt_ds = {'annotations': [], 'images': [], 'categories': []}
self.dt_ds = {'annotations': [], 'images': [], 'categories': []}
self.gt_ds['categories'] = [{'id': i+1,'name':o} for i,o in enumerate(vocab)]
self.dt_ds['categories'] = [{'id': i+1,'name':o} for i,o in enumerate(vocab)]
self.reset_counters()
self.bs = bs
self.dec_bbox = compose(ToXYXY, to_cpu, self.learn.dls.after_item.decode)#
self.dec_cls = compose(to_cpu, lambda x: x[x>0])
self.batch_to_samples = compose(partial(batch_to_samples, max_n=self.bs), L)
def reset_counters(self):
self.img_id = Inf.count
self.gtann = Inf.count
self.dtann = Inf.count
def after_batch(self):
pred_boxes, pred_ids = self.learn.loss_func.decodes(self.loss_func.activation(self.pred), pad=False)
max_probs = self.learn.loss_func.scores
_, _, w, h = self.xb[0].shape
gt_cls = self.batch_to_samples(self.yb[1]).map(to_cpu)
dt_cls = L(pred_ids).map(to_cpu)
gt_boxes = self.batch_to_samples(self.yb[0]).map(self.dec_bbox)
dt_boxes = L(pred_boxes).map(self.dec_bbox)
for gtb, gtc, dtb, dtc, i, socres in zip(gt_boxes, gt_cls, dt_boxes, dt_cls, self.img_id, max_probs):
self.gt_ds['images'].append({'id': i, 'height': h, 'width': w})
self.gt_ds['annotations'].extend([{'iscrowd': 0, 'bbox': o.tolist(), 'area': box_area(o), 'category_id': int(c), 'image_id': i, 'id': j} for o, c, j in zip(gtb, gtc, self.gtann)])
self.dt_ds['images'].append({'id': i, 'height': h, 'width': w})
self.dt_ds['annotations'].extend([{'iscrowd': 0, 'score': s, 'bbox': o.tolist(), 'area': box_area(o), 'category_id': int(c), 'image_id': i, 'id': j} for o, c, j, s in zip(dtb, dtc, self.dtann, socres)])
def after_validate(self):
with redirect_stdout(io.StringIO()):
gt = COCO()
gt.dataset = self.gt_ds
gt.createIndex()
dt = COCO()
dt.dataset = self.dt_ds
dt.createIndex()
coco_eval = COCOeval(gt, dt, iouType='bbox')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
self.stats = coco_eval.stats
self.reset_counters()
def AP(self): return self.stats[0]
def AP50(self): return self.stats[1]
def AP75(self): return self.stats[2]
def AP_small(self): return self.stats[3]
def AP_medium(self): return self.stats[4]
def AP_large(self): return self.stats[5]
def AR1(self): return self.stats[6]
def AR10(self): return self.stats[7]
def AR100(self): return self.stats[8]
def AR_small(self): return self.stats[9]
def AR_medium(self): return self.stats[10]
def AR_large(self): return self.stats[11]
# Cell
@typedispatch
def show_results(x:TensorImage, y:tuple, samples, outs, ctxs=None, max_n=6,
nrows=None, ncols=1, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(2*len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True,
title='Target/Prediction')
for i in [0, 2]:
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(2*max_n))]
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(0),ctxs[1::2],range(2*max_n))]
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(1),ctxs[1::2],range(2*max_n))]
return ctxs
# Cell
def sorted_detr_trainable_params(m):
named_params = (L(m.named_parameters())).map(L).sorted(itemgetter(0))
backbone_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.backbone'))
input_proj_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.input_proj'))
transformer_enc_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.transformer.encoder'))
transformer_dec_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.transformer.decoder'))
query_embed_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.query_embed'))
bbox_head_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.bbox_embed'))
class_head_mask = named_params.map(itemgetter(0)).map(Self.startswith('model.class_embed'))
transformer_enc = named_params[transformer_enc_mask].itemgot(1)
transformer_dec = named_params[transformer_dec_mask].itemgot(1)
query_embed = named_params[query_embed_mask].itemgot(1)
input_proj = named_params[input_proj_mask].itemgot(1)
backbone = named_params[backbone_mask].itemgot(1)
bbox_head = named_params[bbox_head_mask].itemgot(1)
class_head = named_params[class_head_mask].itemgot(1)
return L(backbone + input_proj, transformer_enc + transformer_dec + query_embed, bbox_head + class_head)
# Cell
class GetAnnotatedImageFiles:
def __init__(self, img2bbox): self.img2bbox = img2bbox
def __call__(self, x): return compose(get_image_files, partial(filter, compose(attrgetter('name'), self.img2bbox.__contains__)), L)(x)
class GetBboxAnnotation:
def __init__(self, img2bbox): self.img2bbox = img2bbox
def __call__(self, x): return compose(attrgetter('name'), self.img2bbox.__getitem__, itemgetter(0))(x)
class GetClassAnnotation:
def __init__(self, img2bbox): self.img2bbox = img2bbox
def __call__(self, x): return compose(attrgetter('name'), self.img2bbox.__getitem__, itemgetter(1))(x)
# Cell
class CocoDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path(cls, path, train='train', valid='val', vocab=None, bs=16, item_tfms=Resize(800), batch_tfms=None, **kwargs):
source = Path(path)
ann_files = source.ls(file_exts='.json')
train_ann = ann_files.filter(lambda x: x.name.startswith(train))
assert len(train_ann)==1, 'More than one (or none) training annotation file'
val_ann = ann_files.filter(lambda x: x.name.startswith(valid))
assert len(val_ann)<2, 'More than one validation annotation file'
ann_files = [train_ann[0]]
if val_ann: ann_files.append(val_ann[0])
img2bbox = {}
for ann_file in ann_files: img2bbox = merge(img2bbox, dict(zip(*get_annotations(ann_file))))
if not vocab: vocab = L(chain(*L(img2bbox.values()).itemgot(1))).unique()
if not '#na#' in vocab:
vocab = L(vocab) + '#na#'
elif '#na#'!=vocab[-1]:
warn('Empty category #na# should be the last element of the vocab.')
warn('Moving category #na# at the end of vocab.')
vocab.pop(vocab.index('#na#'))
vocab = L(vocab) + '#na#'
img_folders = source.ls().filter(Self.is_dir())
train_name = img_folders.filter(Self.name.startswith(train))
val_name = img_folders.filter(Self.name.startswith(valid))
assert len(train_name)==1
train_name = train_name[0].name
if len(ann_files)==2:
assert len(val_name)==1
val_name = val_name[0].name
splitter = ParentSplitter(train_name=train_name, valid_name=val_name)
else:
splitter = RandomSplitter()
BBoxBlock.dls_kwargs = {'before_batch': partial(bb_pad, pad_idx=len(vocab)-1)}
dblock = DataBlock(blocks=(ImageBlock, BBoxBlock, BBoxLblBlock(vocab=list(vocab), add_na=False)),
get_items=GetAnnotatedImageFiles(img2bbox),
splitter=splitter,
get_y=[GetBboxAnnotation(img2bbox), GetClassAnnotation(img2bbox)],
item_tfms=item_tfms,
batch_tfms=batch_tfms,
n_inp=1)
return cls.from_dblock(dblock, source, bs=bs, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_sources(cls, sources, vocab=None, bs=16, item_tfms=Resize(800), batch_tfms=None, **kwargs):
ann_files = [sources['train_ann'], sources['val_ann']]
img2bbox = {}
for ann_file in ann_files: img2bbox = merge(img2bbox, dict(zip(*get_annotations(ann_file))))
if not vocab: vocab = L(chain(*L(img2bbox.values()).itemgot(1))).unique()
if not '#na#' in vocab:
vocab = L(vocab) + '#na#'
elif '#na#'!=vocab[-1]:
warn('Empty category #na# should be the last element of the vocab.')
warn('Moving category #na# at the end of vocab.')
vocab.pop(vocab.index('#na#'))
vocab = L(vocab) + '#na#'
splitter = ParentSplitter(train_name=sources['train'].name, valid_name=sources['val'].name)
BBoxBlock.dls_kwargs = {'before_batch': partial(bb_pad, pad_idx=len(vocab)-1)}
dblock = DataBlock(blocks=(ImageBlock, BBoxBlock, BBoxLblBlock(vocab=list(vocab), add_na=False)),
get_items=GetAnnotatedImageFiles(img2bbox),
splitter=splitter,
get_y=[GetBboxAnnotation(img2bbox), GetClassAnnotation(img2bbox)],
item_tfms=item_tfms,
batch_tfms=batch_tfms,
n_inp=1)
return cls.from_dblock(dblock, sources['base'], bs=bs, **kwargs)
# Cell
def detr_learner(dls, pretrained=True, bs=16):
model = DETR(pretrained=pretrained, n_classes=len(dls.vocab), aux_loss=True)
loss = DETRLoss(classw=1, boxw=5, giouw=2).cuda()
ce = CocoEval()
learn = Learner(dls, model, loss, splitter=sorted_detr_trainable_params,
cbs=[ce], metrics=ce.metrics,
opt_func=partial(Adam, decouple_wd=True))
learn.coco_eval = ce
return learn | 21,339 | 8,104 |
# external
import pytest
# project
from dephell.models.author import Author
@pytest.mark.parametrize('name, mail, formatted', [
('gram', 'example@mail.com', 'gram <example@mail.com>'),
('gram', None, 'gram'),
('Грам @orsinium', 'example_mail@mail.com', 'Грам @orsinium <example_mail@mail.com>'),
('Грам @orsinium', None, 'Грам @orsinium'),
])
def test_format(name, mail, formatted):
author = Author(name=name, mail=mail)
assert str(author) == formatted
@pytest.mark.parametrize('name, mail, formatted', [
('gram', 'example@mail.com', 'gram <example@mail.com>'),
('gram', None, 'gram'),
('Грам @orsinium', 'example_mail@mail.com', 'Грам @orsinium <example_mail@mail.com>'),
('Грам @orsinium', None, 'Грам @orsinium'),
])
def test_parse(name, mail, formatted):
author = Author.parse(formatted)
assert author.name == name
assert author.mail == mail
| 1,063 | 349 |
import os
import ffmpeg
import subprocess
import shutil
from pymultimediacompression.utilities.utils import bitrate_size_based, gb_to_bit, mb_to_bit, kb_to_bit, b_to_bit
MISSING_REQUIREMENTS = "FFmpeg required to be installed to use PyMultiMediaCompression \n Check https://github.com/AbdullrhmanAljasser/PyMultiMediaCompression"
'''
Check if required installs are satisfied
Raise an error if not
'''
def check_required():
check = subprocess.call(['which', 'ffmpeg'])
if check != 0:
raise Exception(MISSING_REQUIREMENTS)
check = subprocess.call(['which', 'ffprobe'])
if check != 0:
raise Exception(MISSING_REQUIREMENTS)
'''
Video Compression Based on given Size
Description:
A function to allow users to compressa given video to their expected video size
Input Paramters:
filepath (Required): Path of the video file being compressed
finalsize (Required): expected final size of video
size_type (Optional): Specify final size type gb, mb, kb, or b (Default mb)
output (Optional): To keep original video specify output path to stop overwriting
codec (Optional): Specify the codec used to compress (Default x264)
#TODO: More parameters to give more freedom to user
'''
def video_compress_size_based(
filepath,
finalsize,
size_type='mb',
output=None,
codec='libx264'
):
# Check if the required installs are satisfied
check_required()
# Check if filepath is a file
if not os.path.isfile(filepath):
raise Exception("File path is not a valid file")
# Check if filepath is absolute or not
if not os.path.isabs(filepath):
filepath = os.getcwd() + filepath
# Check if asked size is a correct number ==>
try:
float(finalsize)
except Exception as e:
raise (e)
if finalsize <= 0:
raise Exception("Unable to compress to 0 or below size")
# END <==
# Retrieve file extension to ensure it applicable ==>
ext = os.path.splitext(filepath)[-1].lower()
file_name_w_ext = filepath.split('\\')[-1]
splitter = filepath.split('\\')
path_to_file = ''
for x in range(len(splitter)-1):
path_to_file = path_to_file + '\\' +splitter[x]
if not valid_video_ext(ext):
raise Exception("Input file is not of valid video type")
# END <==
# Setup output (Overwrite/None)
if output is None:
if not os.path.isdir('compressed'):
os.mkdir('compressed')
os.chdir(os.getcwd()+'\\compressed')
else:
os.chdir(os.getcwd()+'\\compressed')
else:
if os.path.isdir(output):
if not os.path.isfile(output):
if not os.path.isabs(output):
output = os.getcwd() + output
os.chdir(output)
else:
os.chdir(output)
else:
raise Exception("Output path is a file not a directory")
else:
raise Exception("Output path is not a valid directory, maybe file doesn't exists?")
file_info = ffmpeg.probe(filepath)
file_info_size = file_info['format']['size']
file_info_duration = file_info['format']['duration']
file_info_bitrate = (float(file_info_size)) / float(file_info_duration)
if not valid_size_type(size_type):
raise Exception("Size type is not correct, must be gb, mb, kb, or b")
finalsize = final_bit_size(finalsize, size_type=size_type)
bitrate_for_compression = bitrate_size_based(finalsize, file_info_duration)
try:
ffmpeg.input(filepath)\
.output(file_name_w_ext, **{'vcodec':codec, 'video_bitrate':bitrate_for_compression})\
.overwrite_output()\
.run()
except Exception as e:
raise (e)
path_to_compressed = os.getcwd() + '\\' + file_name_w_ext
if output is None:
# Moving to overwrite file
shutil.move(path_to_compressed, filepath)
else:
# Moving to the specified output
True
# shutil.move(path_to_compressed, output)
return True
__all__ = [
'video_compress_size_based',
'check_required'
] | 4,283 | 1,411 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.v1_pod_status import V1PodStatus # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1PodStatus(unittest.TestCase):
"""V1PodStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1PodStatus
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.v1_pod_status.V1PodStatus() # noqa: E501
if include_optional :
return V1PodStatus(
conditions = [
kubernetes.client.models.v1/pod_condition.v1.PodCondition(
last_probe_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
last_transition_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
reason = '0',
status = '0',
type = '0', )
],
container_statuses = [
kubernetes.client.models.v1/container_status.v1.ContainerStatus(
container_id = '0',
image = '0',
image_id = '0',
last_state = kubernetes.client.models.v1/container_state.v1.ContainerState(
running = kubernetes.client.models.v1/container_state_running.v1.ContainerStateRunning(
started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ),
terminated = kubernetes.client.models.v1/container_state_terminated.v1.ContainerStateTerminated(
container_id = '0',
exit_code = 56,
finished_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
reason = '0',
signal = 56,
started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ),
waiting = kubernetes.client.models.v1/container_state_waiting.v1.ContainerStateWaiting(
message = '0',
reason = '0', ), ),
name = '0',
ready = True,
restart_count = 56,
started = True,
state = kubernetes.client.models.v1/container_state.v1.ContainerState(), )
],
ephemeral_container_statuses = [
kubernetes.client.models.v1/container_status.v1.ContainerStatus(
container_id = '0',
image = '0',
image_id = '0',
last_state = kubernetes.client.models.v1/container_state.v1.ContainerState(
running = kubernetes.client.models.v1/container_state_running.v1.ContainerStateRunning(
started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ),
terminated = kubernetes.client.models.v1/container_state_terminated.v1.ContainerStateTerminated(
container_id = '0',
exit_code = 56,
finished_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
reason = '0',
signal = 56,
started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ),
waiting = kubernetes.client.models.v1/container_state_waiting.v1.ContainerStateWaiting(
message = '0',
reason = '0', ), ),
name = '0',
ready = True,
restart_count = 56,
started = True,
state = kubernetes.client.models.v1/container_state.v1.ContainerState(), )
],
host_ip = '0',
init_container_statuses = [
kubernetes.client.models.v1/container_status.v1.ContainerStatus(
container_id = '0',
image = '0',
image_id = '0',
last_state = kubernetes.client.models.v1/container_state.v1.ContainerState(
running = kubernetes.client.models.v1/container_state_running.v1.ContainerStateRunning(
started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ),
terminated = kubernetes.client.models.v1/container_state_terminated.v1.ContainerStateTerminated(
container_id = '0',
exit_code = 56,
finished_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
reason = '0',
signal = 56,
started_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), ),
waiting = kubernetes.client.models.v1/container_state_waiting.v1.ContainerStateWaiting(
message = '0',
reason = '0', ), ),
name = '0',
ready = True,
restart_count = 56,
started = True,
state = kubernetes.client.models.v1/container_state.v1.ContainerState(), )
],
message = '0',
nominated_node_name = '0',
phase = '0',
pod_ip = '0',
pod_i_ps = [
kubernetes.client.models.v1/pod_ip.v1.PodIP(
ip = '0', )
],
qos_class = '0',
reason = '0',
start_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f')
)
else :
return V1PodStatus(
)
def testV1PodStatus(self):
"""Test V1PodStatus"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 7,548 | 2,221 |
import warnings
def get_user_warning(data):
class HERE(UserWarning):
pass
setattr( HERE, 'data', data )
return HERE
class Message(object):
def __init__(self,a,b):
self.a = a
self.b = b
def fxn():
##warnings.warn(Message('sample text','b'), UserWarning )
warnings.warn(Message('sample text','b'), get_user_warning('other sample text',45) )
##warnings.warn(('sample text','b'), UserWarning )
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
fxn()
# Verify some things
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
##assert "deprecated" in str(w[-1].message)
print (w[-1].filename, w[-1].lineno )
print (w[-1].message)
| 831 | 280 |
from django.db import models
from django.utils import timezone
class Room(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class SimpleUser(models.Model):
objects = models.Manager()
username = models.CharField(max_length=50, primary_key=True)
room = models.ForeignKey(Room, null=True, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.username
class Message(models.Model):
objects = models.Manager()
text = models.CharField(max_length=500)
created_at = models.DateTimeField(default=timezone.now)
room = models.ForeignKey(Room, on_delete=models.CASCADE)
def __str__(self):
return self._format_short_text(self.text)
@staticmethod
def _format_short_text(text: str) -> str:
if len(text) <= 30:
return text
if text[30] == " ":
return text
return " ".join(text[:30].split(" ")[:-1]) + "..."
| 979 | 316 |
import re
import urllib
def get_cep(cep):
"""."""
url = ""
cepCorrigido = re.sub('[^\d]', '', cep)
if len(cepCorrigido) != 8:
return False;
payload = {'relaxation'}
| 198 | 79 |
from collections import defaultdict
class Environment:
def __init__(self, config):
self.aliases = {}
self.registers = Registers(self.aliases, config)
self.memory = Memory(config)
# :type Dict[str, Label]: {label_name: Label}
self.labels = {}
self.execution_count = 0
# :type Dict[str, Dict[str, int]]: populated by TokenMappers in asm module
# e.g. instruction_id = luts[BranchEqual.mnemonic][immediate]
# e.g. addr = luts[LoadWord.mnemonic][immediate]
self.luts = defaultdict(dict)
self._acc_luts = defaultdict(dict)
# :type int: not a genuine register
self.pc = 0
self.cout = 0
def __repr__(self):
items = [
('Memory', repr(self.memory)),
('Registers', self.registers.as_str(self.aliases)),
('Dynamic instruction count', str(self.execution_count)),
]
str_items = []
for k, v in items:
lns = v.splitlines()
if len(lns) > 1:
v = ''.join('\n\t' + ln for ln in lns)
s = '{}: {}'.format(k, v)
str_items.append(s)
return '\n'.join(str_items)
def unalias(self, op):
return self.aliases.get(op, op)
class Registers:
def __init__(self, aliases, config):
assert len(config['reg_names']) <= 16
super().__setattr__('names', config['reg_names'])
regs = make_bytes(config['reg_default'], len(self.names))
regs = dict(zip(self.names, regs))
super().__setattr__('registers', regs)
super().__setattr__('aliases', aliases)
def __repr__(self):
return self.as_str()
def as_str(self, aliases=None):
inv_aliases = {v: k for k, v in self.aliases.items()}
return '\n'.join('{}: {}'.format(
inv_aliases.get(n, n), self.registers[n]) for n in self.names)
def __getattr__(self, name):
name = self.aliases.get(name, name)
return self.registers[name].get()
def __setattr__(self, name, value):
name = self.aliases.get(name, name)
self.registers[name].set(value)
def __getitem__(self, key):
"""
:param int key:
"""
return self.__getattr__(self.names[key])
def __setitem__(self, key, value):
"""
:param int key:
"""
return self.__setattr__(self.names[key], value)
class Memory:
""" Memory is of big-endian format.
"""
def __init__(self, config):
assert 0 < config['mem_size']
self.memory = make_bytes(config['mem_default'], config['mem_size'])
def __repr__(self):
return '\n'.join(' '.join(map(str, self.memory[i:i + 8]))
for i in range(0, len(self.memory), 8))
def __getitem__(self, key):
"""
Load an unsigned byte at address 0xff: memory[0xff]
Load an unsigned word at address 0xff: memory[0xff, 4]
"""
if not isinstance(key, tuple):
key = key,
return self.load(*key)
def __setitem__(self, key, value):
"""
Store a byte at address 0xff: memory[0xff] = 0xab
Store a word at address 0xff: memory[0xff, 4] = 0xab
"""
if isinstance(key, tuple):
addr, size = key
key = addr, value, size
else:
key = key, value
self.store(*key)
def load(self, addr, size=1, signed=False):
assert 1 <= size
assert 0 <= addr
assert addr + size <= len(self.memory)
value = 0
for i in range(addr, addr + size):
byte = self.memory[i].get()
value = (value << 8) | byte
if signed:
value = convert_to_signed_integer(value, size * 8)
return value
def store(self, addr, value, size=1):
assert 1 <= size
assert 0 <= addr
assert addr + size <= len(self.memory)
value = convert_to_unsigned_integer(value, size * 8)
for i in range(size - 1, -1, -1):
self.memory[addr + i].set(value & 0xff)
value >>= 8
class Byte:
def __init__(self):
self.value = 0
def __repr__(self):
return '0x{:02x}'.format(self.value)
def set(self, value):
# Signed minimum and unsigned maximum
self.value = convert_to_unsigned_integer(value, 8)
return self
def get(self):
return self.value
def convert_to_unsigned_integer(value, size):
"""
:param int size: number of bits containing this integer
"""
upper_bound = 2 ** size
if not (-upper_bound // 2 <= value < upper_bound):
msg = '{} is out of range of {} bits'.format(value, size)
raise ValueError(msg)
all_f_mask = upper_bound - 1
return value & all_f_mask
def convert_to_signed_integer(value, size):
"""
:param int size: number of bits containing this integer
"""
upper_bound = 2 ** size
if not (-upper_bound // 2 <= value < upper_bound):
msg = '{} is out of range of {} bits'.format(value, size)
raise ValueError(msg)
if value >= 0:
msb_mask = 1 << (size - 1)
if value & msb_mask:
value -= upper_bound
return value
def make_bytes(default, size=None):
"""
:param int|List[int] default:
:param int size: number of bytes in the list, if default is int
:return List[Byte]:
"""
if isinstance(default, int):
if size is None:
raise ValueError("'size' is not specified when default is int")
return [Byte().set(default) for _ in range(size)]
bytes = [Byte().set(d) for d in default]
if size is not None and len(bytes) != size:
raise ValueError("'default' and 'size' are not of the same length")
return bytes
| 5,822 | 1,859 |
import unittest
from version import Version
class VersionTestCase(unittest.TestCase):
def testFromString(self):
v = Version.fromObject("1.3.3sp1")
self.assertEquals(v.major, 1)
self.assertEquals(v.minor, '3')
self.assertEquals(v.micro, '3sp1')
self.assertEquals(Version.getNumericPiece(v.micro), '3')
self.assertEquals(Version.getAnnotationPiece(v.micro), 'sp1')
self.assertEquals(str(v), "1.3.3sp1")
def testComparisons(self):
self.assertTrue(Version.fromObject('1.3.3') < Version.fromObject('1.3.3sp1'))
self.assertTrue(Version.fromObject('1.4.0') > Version.fromObject('1.3.3sp1'))
self.assertTrue(Version.fromObject('1.3.3p1') < Version.fromObject('1.4.0'))
if __name__ == '__main__':
unittest.main() | 731 | 300 |
def value(colors):
colorList = ["black","brown","red","orange","yellow","green","blue","violet","grey","white"]
code = ""
for color in colors:
if len(code) < 2:
code += str(colorList.index(color))
return int(code)
| 250 | 81 |
"""WebUtils.Funcs
This module provides some basic functions that are useful
in HTML and web development.
You can safely import * from WebUtils.Funcs if you like.
"""
__all__ = [
'htmlEncode', 'htmlEncodeStr', 'htmlDecode', 'urlEncode', 'urlDecode',
'htmlForDict', 'requestURI', 'normURL']
htmlForNone = '-' # used by htmlEncode
htmlCodes = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
# ['\n', '<br>'],
)
htmlCodesReversed = tuple(reversed(htmlCodes))
def htmlEncode(what, codes=htmlCodes):
"""Return the HTML encoded version of the given object.
The optional 'codes' parameter allows passing custom translations.
"""
if what is None:
return htmlForNone
if hasattr(what, 'html'):
# allow objects to specify their own translation to html
# via a method, property or attribute
ht = what.html
if callable(ht):
ht = ht()
return ht
what = str(what)
return htmlEncodeStr(what, codes)
def htmlEncodeStr(s, codes=htmlCodes):
"""Return the HTML encoded version of the given string.
This is useful to display a plain ASCII text string on a web page.
The optional 'codes' parameter allows passing custom translations.
"""
for c, e in codes:
s = s.replace(c, e)
return s
def htmlDecode(s, codes=htmlCodesReversed):
"""Return the ASCII decoded version of the given HTML string.
This does NOT remove normal HTML tags like <p>.
It is the inverse of htmlEncode().
The optional 'codes' parameter allows passing custom translations.
"""
for c, e in codes:
s = s.replace(e, c)
return s
# Aliases for URL encoding and decoding functions:
from urllib import quote_plus as urlEncode, unquote_plus as urlDecode
def htmlForDict(d, addSpace=None, filterValueCallBack=None,
maxValueLength=None, topHeading=None, isEncoded=None):
"""Return an HTML string with a table where each row is a key-value pair."""
if not d:
return ''
# A really great (er, bad) example of hardcoding. :-)
html = ['<table class="NiceTable">\n']
if topHeading:
html.append('<tr class="TopHeading"><th')
html.append(('>%s</th><th>%s' if isinstance(topHeading, tuple)
else ' colspan="2">%s') % topHeading)
html.append('</th></tr>\n')
for key in sorted(d):
value = d[key]
if addSpace and key in addSpace:
target = addSpace[key]
value = (target + ' ').join(value.split(target))
if filterValueCallBack:
value = filterValueCallBack(value, key, d)
if maxValueLength and not isEncoded:
value = str(value)
if len(value) > maxValueLength:
value = value[:maxValueLength-3] + '...'
key = htmlEncode(key)
if not isEncoded:
value = htmlEncode(value)
html.append('<tr><th style="text-align:left">%s</th><td>%s</td></tr>\n'
% (key, value))
html.append('</table>')
return ''.join(html)
def requestURI(env):
"""Return the request URI for a given CGI-style dictionary.
Uses REQUEST_URI if available, otherwise constructs and returns it
from SCRIPT_URL, SCRIPT_NAME, PATH_INFO and QUERY_STRING.
"""
uri = env.get('REQUEST_URI')
if uri is None:
uri = env.get('SCRIPT_URL')
if uri is None:
uri = env.get('SCRIPT_NAME', '') + env.get('PATH_INFO', '')
query = env.get('QUERY_STRING', '')
if query != '':
uri += '?' + query
return uri
def normURL(path):
"""Normalizes a URL path, like os.path.normpath.
Acts on a URL independent of operating system environment.
"""
if not path:
return
initialslash = path[0] == '/'
lastslash = path[-1] == '/'
comps = path.split('/')
newcomps = []
for comp in comps:
if comp in ('', '.'):
continue
if comp != '..':
newcomps.append(comp)
elif newcomps:
newcomps.pop()
path = '/'.join(newcomps)
if path and lastslash:
path += '/'
if initialslash:
path = '/' + path
return path
| 4,225 | 1,321 |
#!/usr/bin/env python3
import argparse
import enum
import sys
import slizzy.config as config
import slizzy.version as version
from slizzy.track import Track
from slizzy.util import color, logging, time
__all__ = [
"module",
"all_modules",
"slizzy"
]
module = enum.Enum("modules", "slider mp3co zippy")
all_modules = { module.slider, module.mp3co, module.zippy }
def picky_selection(available_downloads, logger):
logger.log("\"picky\" flag: Select which entries to download by providing their " +
"(comma-separated) indexes in the list below. Alternatively, enter 'A'" +
"to download (a)ll entries.",
level = logging.level.info)
print("\nA. download all entries")
template = "{0}. {1:80} | {2:4} | {3:4.2f} MB | {4:3.2f} Kbps"
for i, entry in enumerate(available_downloads) :
print(template.format(
i,
entry.name,
time.to_str(entry.duration),
entry.size,
entry.bitrate
))
tries = 3
for i in range(tries) :
print("\nYour selection: ", end='')
try:
selection = input().strip()
if selection in ['a', 'A', 'all', 'All'] :
return available_downloads
else :
selection = list(map(int, selection.split(',')))
tracks_to_download = list(map(available_downloads.__getitem__, selection))
logger.br()
return tracks_to_download
except Exception as e:
if i < tries - 1 :
print("Error: " + str(e) + ". Was that a typo?", file = sys.stderr)
else :
print("Error: " + str(e) + ".", file = sys.stderr)
sys.exit(1)
def slizzy(track, modules, download_tracks, picky = False, fetch_limit = False):
logger = logging.Logger("slizzy")
logger.log("Slizzy magic for track '" + track.title + "'", logging.level.info)
logger.log("Query string: " + track.query_string, logging.level.info)
logger.br()
if fetch_limit :
logger.log("\"fetch_limit\" flag: a maximum of {} {} will be fetched from each provider.".format(
fetch_limit, "files" if fetch_limit > 1 else "file"
), logging.level.info)
logger.br()
if not track.duration: # Duration not supplied from command line.
try:
from slizzy.google import google
from slizzy.beatport import beatport
track.duration = next( # Extract duration from the first matching page.
duration
for page in google(track, beatport.domain)
for duration in [ beatport.get_metadata(track, page) ]
if duration
)
except StopIteration:
logger.log("Track duration unavailable", logging.level.error)
return
if module.slider in modules:
from slizzy.slider import slider
slider_downloads = slider(track, fetch_limit)
else:
slider_downloads = []
if module.mp3co in modules:
from slizzy.mp3co import mp3co
mp3co_downloads = mp3co(track, fetch_limit)
else:
mp3co_downloads = []
if module.zippy in modules:
from slizzy.google import google
from slizzy.zippy import zippy
zippy_downloads = [
dl
for page in google(track, zippy.domain, fetch_limit)
for dl in [ zippy.get_download(track, page) ]
if dl
]
else:
zippy_downloads = []
if module.slider in modules:
logger.log(
"Selected " + color.result(len(slider_downloads)) + " slider entries.",
logging.level.info
)
if module.mp3co in modules:
logger.log(
"Selected " + color.result(len(mp3co_downloads)) + " mp3co entries.",
logging.level.info
)
if module.zippy in modules:
logger.log(
"Selected " + color.result(len(zippy_downloads)) + " zippy entries.",
logging.level.info
)
available_downloads = slider_downloads + mp3co_downloads + zippy_downloads
if not available_downloads:
logger.log("No entries to download.")
return
if picky :
tracks_to_download = picky_selection(available_downloads, logger)
else:
tracks_to_download = available_downloads
if download_tracks:
from slizzy.downloader import download
download(tracks_to_download)
else:
logger.log("Selected urls:\n " + "\n ".join(
download.name + " | " + download.link
for download in tracks_to_download
))
logger.br()
logger.finish("Slizzied " + str(len(tracks_to_download)) + " files.")
def parse_args(argv):
parser = argparse.ArgumentParser(
description = "Slizzy is a tool to search for and "
"download slider.kz, mp3co.biz and zippyshare objects.",
formatter_class = argparse.RawTextHelpFormatter
)
parser.add_argument(
"--version", "-v",
action = "version",
version = "\n".join([
"%(prog)s " + version.__version__,
"Copyright (c) 2018, gahag.",
"All rights reserved."
])
)
commands = parser.add_subparsers(dest = "command", help = "commands")
dl = commands.add_parser("dl", help="download tracks")
lns = commands.add_parser("lns", help="get download links")
cfg = commands.add_parser("cfg", help="config")
for command in [ dl, lns ]:
command.add_argument(
"tracks",
help = "one or more tracks to seach, in the format: "
"A & B ft. C - ID (D vs. E Remix)",
nargs = "+"
)
command.add_argument(
"-d", "--duration",
help = "manually specify the track duration, eliding the beatport search"
)
command.add_argument(
"--fetch_limit",
help = "limits the number of entries fetched from each provider"
)
command.add_argument(
"--slider",
action = "store_true",
help = "search in slider.kz instead of all resources"
)
command.add_argument(
"--mp3co",
action = "store_true",
help = "search in mp3co.biz instead of all resources"
)
command.add_argument(
"--zippy",
action = "store_true",
help = "search only in zippyshare instead of all resources"
)
command.add_argument(
"--picky",
action = "store_true",
help = "pick which files to download instead of downloading all eligible files"
)
cfg.add_argument("--google-key", help = "set the google API key")
cfg.add_argument("--beatport-cx", help = "set the cx API key for the beatport search")
cfg.add_argument("--zippyshare-cx", help = "set the cx API key for the zippyshare search")
# add arguments for other settings, specially thresholds.
if not argv:
parser.print_usage()
sys.exit(1)
args = parser.parse_args(argv)
if args.command in [ "dl", "lns" ]:
if args.duration:
if len(args.tracks) > 1:
print(
"Error: with the duration parameter, only one track may be specified.",
file = sys.stderr
)
sys.exit(1)
try:
args.duration = time.from_str(args.duration)
except Exception as e:
print("Error: " + str(e) + ".", file = sys.stderr)
sys.exit(1)
if args.fetch_limit :
try:
args.fetch_limit = int(args.fetch_limit)
except Exception as e:
print("Error: " + str(e) + ".", file = sys.stderr)
sys.exit(1)
if args.fetch_limit <= 0:
print(
"Error: fetch limit must be an integer greater than zero.",
file = sys.stderr
)
sys.exit(1)
if args.command == "cfg":
pass # validate args
return args
def main(argv):
args = parse_args(argv)
if args.command in [ "dl", "lns" ]:
for i, track in enumerate(args.tracks):
try:
args.tracks[i] = Track(track, args.duration)
except:
print("Error: invalid track format '" + track + "'.", file = sys.stderr)
sys.exit(1)
modules = {
m
for m, arg in [
(module.slider, args.slider),
(module.mp3co, args.mp3co),
(module.zippy, args.zippy)
]
if arg
} or all_modules
download_tracks = args.command == "dl"
tracks = iter(args.tracks)
try:
slizzy(
next(tracks),
modules,
download_tracks,
fetch_limit = args.fetch_limit,
picky = args.picky
)
for track in tracks:
print(color.yellow(70 * "-"))
slizzy(track, modules, download_tracks)
except config.ConfigError as e:
print("Error (config): " + str(e), file = sys.stderr)
sys.exit(2)
if args.command == "cfg":
if args.google_key:
config.cfg["google"]["key"] = args.google_key
if args.beatport_cx:
config.cfg["beatport"]["cx"] = args.beatport_cx
if args.zippyshare_cx:
config.cfg["zippyshare"]["cx"] = args.zippyshare_cx
try:
config.update(config.cfg)
except config.ConfigError as e:
print("Error (config): " + str(e), file = sys.stderr)
sys.exit(2)
def cli():
import signal
def sigint(sig, frame):
print() # Exit progress logging
print("Slizzy: interrupted.", file = sys.stderr)
sys.exit(130)
signal.signal(signal.SIGINT, sigint)
main(sys.argv[1:])
| 8,996 | 2,984 |
score, evectors, evals = pca(X)
with plt.xkcd():
plot_eigenvalues(evals)
plt.xlim([0,100]) | 95 | 48 |
import luigi
class MasterConfig(luigi.Config):
model_path: str = luigi.Parameter()
embedding_dim: int = luigi.IntParameter()
target_title1: str = luigi.Parameter()
target_title2: str = luigi.Parameter()
target_title1_label: str = luigi.Parameter()
target_title2_label: str = luigi.Parameter()
| 319 | 104 |
SECRET_KEY='0nk#k)hck^)6y6*+%4(&r$ew+mhubb!trj-cz_0oturh48p*+y'
DEBUG=True | 74 | 54 |
f = open('essay_material_word_sources.txt', encoding='utf-16-le')
fw = open('essay_material_word_sources_.txt', 'w', encoding='utf-8')
output = []
for l in f.readlines():
print(l)
if l and (l != ' ') and (l != '\n') and ('【' not in l) and ('】' not in l):
fw.write(l.strip(' '))
# print(output[-1])
else: print('no')
# print(''.join(output))
| 383 | 157 |
from typing import List
from overrides import overrides
from allennlp.data.tokenizers.token import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
@Tokenizer.register("just_spaces")
class WhitespaceTokenizer(Tokenizer):
@overrides
def tokenize(self, text: str) -> List[Token]:
reveal_type(text) | 330 | 100 |
from ..support.decorators import TypeType
from .collection_type import CollectionType
from ..exceptions import UndressError
@TypeType("set", ['element_type', 'metadata'])
class SetType(CollectionType):
def _include(self, value, world):
if not isinstance(value, set) and not isinstance(value, list):
return False
if not all(self.element_type.include(v, world) for v in value):
return False
return True
def _m_dress(self, value, monad):
if not isinstance(value, set) and not isinstance(value, list):
msg = 'set/list expected, got `{0}`'.format(value)
return monad.failure(self, [msg, [value]])
def mapper(elm, _idx):
return self.element_type.m_dress(elm, monad)
def on_failure(causes):
msg = "Invalid ${typeName}"
return monad.failure(self, [msg, ["Set"]], causes)
m = monad.map(list(value), mapper, on_failure)
def finalise(_set):
duplicates = set([x for x in _set if _set.count(x) > 1])
if len(duplicates):
msg = "Duplicate values: `${duplicates}`"
dupstr = [str(dup) for dup in duplicates]
err = monad.failure(self, [msg, [', '.join(dupstr)]])
def cause_failure(cause):
return monad.failure(self, "Invalid Set", [cause])
return err.on_failure(cause_failure)
else:
return m
return m.on_success(finalise)
def _undress(self, value, as_type):
if not isinstance(as_type, CollectionType):
raise UndressError('Unable to undress `{0}` to `{1}'
.format(value, as_type))
return super(self, SetType).undress(value, as_type)
def low(self):
return SetType(self.element_type.low())
def resolve_proxies(self, system):
return self.element_type.resolve_proxies(system)
def __repr__(self):
return '{' + '{0}'.format(self.element_type) + '}'
| 2,056 | 622 |
import functools
import sqlite3
from .repository import Repository
try:
from psycopg2.extras import DictRow, RealDictRow
ROW_CLASSES = (dict, sqlite3.Row, DictRow, RealDictRow)
except ImportError: # pragma: no cover
ROW_CLASSES = (dict, sqlite3.Row)
class Manager:
"""Managers extend repositories with the ability to iterate over
queries and convert result rows to concrete data types.
"""
def __init__(self, value_factory=dict, *, repository=None):
self.repository = repository or Repository()
self.load_queries = self.repository.load_queries
self.value_factory = value_factory
def __getattr__(self, name):
fn = getattr(self.repository, name)
if getattr(fn, "is_command", False):
return command_runner(fn)
return query_iterator(fn, self.value_factory)
def command_runner(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
fn(*args, **kwargs)
# Grab the cursor late so that if the user forgets to provide
# it the TypeError they get will refer to the query function,
# not wrapper() itself.
cursor = args[0]
if cursor.description:
result = cursor.fetchone()
if isinstance(result, ROW_CLASSES):
return result
return {col[0]: val for col, val in zip(cursor.description, result)}
return None
return wrapper
def query_iterator(fn, value_factory):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
fn(*args, **kwargs)
# Grab the cursor late so that if the user forgets to provide
# it the TypeError they get will refer to the query function,
# not wrapper() itself.
cursor = args[0]
while True:
results = cursor.fetchmany()
if not results:
break
for result in results:
if not isinstance(result, ROW_CLASSES):
result = {col[0]: val for col, val in zip(cursor.description, result)}
yield value_factory(**result)
return wrapper
| 2,120 | 595 |
import time
from collections import deque
try:
import rexviewer as r
except ImportError: #not running under rex
import mockviewer as r
import circuits
from circuits.core import handler
class TestThread(circuits.Thread):
"""
A threaded circuits component inside Naali,
now testing how this behaves, as can be needed for e.g. webdav inv
"""
def __init__(self):
circuits.Thread.__init__(self)
self.commands = deque()
self.start()
#gets events from the manager in the main thread
@handler("on_input") #why is this needed for Thread, but not for Component?
def on_input(self, evid):
print "Test Thread received input event:", evid
#this is executed in a separate thread
def run(self):
while 1:
time.sleep(1)
print "I'm a lazy sleeper. And even a blocking one."
if len(self.commands) > 0:
print self.commands
c = self.commands.popleft()
if c == "stop":
self.stop()
return
#when this worker thread stops itself
def stop(self):
circuits.Thread.stop(self)
print "stopped"
#when the manager is stopped
@handler("stopped")
def stopped(self, caller):
self.commands.append("stop")
if __name__ == '__main__':
from circuits import Event, Manager, Debugger
class Input(Event): pass
m = Manager()
c = TestThread()
m += c + Debugger()
now = time.time()
interval = 0.5
prev_time = now
try:
while 1:
now = time.time()
if prev_time + interval < now:
m.push(Input(1), "on_input")
prev_time = now
while m:
m.flush()
except KeyboardInterrupt:
print "stopping"
m.stop()
while m:
m.flush()
c.join()
| 1,922 | 553 |
# coding: utf-8
# # Lineplots (07) - Making dashed and dotted lines
# In the last lesson we saw how to change the colour and thickness of the lines in our plot.
#
# In this lesson we'll learn how to displayed a dashed or dotted line instead of a solid line. In the next lesson we'll find out how to set the marker symbol for each trace. Changing these two properties in addition to manipulating the colour and line width gives us the ability to distinguish between a large number of different data items.
# ## Module Imports
# In[1]:
#plotly.offline doesn't push your charts to the clouds
import plotly.offline as pyo
#allows us to create the Data and Figure objects
from plotly.graph_objs import *
#plotly.plotly pushes your charts to the cloud
import plotly.plotly as py
#pandas is a data analysis library
import pandas as pd
from pandas import DataFrame
# In[2]:
#lets us see the charts in an iPython Notebook
pyo.offline.init_notebook_mode() # run at the start of every ipython
# ## Getting the data
# We'll get the data from the same source as the previous lesson.
# In[3]:
expenseData = pd.read_csv("http://richard-muir.com/data/public/csv/NumberOfMPsExpenseClaims_2010-2015.csv")
# In[4]:
expenseData.head(5)
# ## Plotting the data
#
# We'll plot the data for the different years using a For loop:
# In[5]:
traces = []
for i in range(2010, 2016):
traces.append({'type' : 'scatter',
'x' : expenseData['month'],
'y' : expenseData['NumberOfClaims' + str(i)],
'name' : i,
'mode' : 'lines'})
# In[6]:
data = Data(traces)
# In[7]:
layout = {'title' : 'Number of expenses by month for 2010 - 2015',
'xaxis' : {'title' : 'Month'},
'yaxis' : {'title' : 'Yaxis title1'}}
# In[8]:
fig = Figure(data = data, layout = layout)
pyo.iplot(fig)
py.image.save_as(fig, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(02) Lineplots\Notebooks\images\Lineplots (07) - Making dashed and dotted lines\pyo.iplot-0.png")
#
# ## Changing the solidity of a line
# We can change the solidity of a line by using the <code>'dash'</code> option in the <code>'line'</code> dictionary in the trace:
# ````python
# trace = {'type' : 'scatter',
# 'line' : {'dash' : <Dash string/Dash length in pixels/Variable>}
# ````
#
# Some valid dash strings are:
# - <code>'solid'</code>
# - <code>'dash'</code>
# - <code>'dot'</code>
# - <code>'dashdot'</code>
#
# Here's what the dash strings look like.
#
# I'm using a list of dash strings to contain the possible values.
#
# I'm using the enumerate function and floor division to give me the number of the index to pass to the list. The <code>enumerate()</code> creates a variable which holds the value of each item's index in the list in addition to the variable which holds the actual value of the variable. This value starts at 0 for the first item and increments by one for each subsequent item.
# In[9]:
dashes = ['dash', 'dot', 'dashdot']
for i, yr in enumerate(range(2010, 2016)):
print(i, yr)
# In[10]:
for i, yr in enumerate(range(2010, 2016)):
print(dashes[(i//2)])
# Now I'll apply this to our loop which creates the traces:
# In[11]:
traces = []
for i, yr in enumerate(range(2010, 2016)):
traces.append({'type' : 'scatter',
'x' : expenseData['month'],
'y' : expenseData['NumberOfClaims' + str(yr)],
'line' : {'dash' : dashes[i//2]},
'name' : yr,
'mode' : 'lines'})
# Refreshing the Data and Figure objects to see the effect on the chart:
# In[12]:
data = Data(traces)
fig = Figure(data = data, layout = layout)
pyo.iplot(fig)
py.image.save_as(fig, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(02) Lineplots\Notebooks\images\Lineplots (07) - Making dashed and dotted lines\pyo.iplot-1.png")
#
# Let's push this chart to the cloud. It's not quite production-ready, but it's good to record our progress!
# In[13]:
py.plot(fig, filename = "MP expenses by month 2010-2015 (Line solidity)", filopt="overwrite")
py.image.save_as(fig, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(02) Lineplots\Notebooks\images\Lineplots (07) - Making dashed and dotted lines\py.plot-0.png")
#
# ### Changing line solidity - what have we learnt?
#
# We've seen how to manipulate the value of the <code>'dash'</code> key in the <code>'line'</code> dictionary to change the solidity of the line. We can use a string such as <code>'dashdot'</code> or set the dash length to be an integer number of pixels. We also utilised the <code>enumerate()</code> function to help us choose an item from a list by index and value.
#
# In the next lesson we'll find out how to specify that Plotly shows the marker in addition to the line. We'll also see how to change the marker symbol.
# If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
| 5,113 | 1,758 |
"""
SpyDrNet
========
SpyDrNet is an EDA tool for analyzing and transforming netlists.
See https://byuccl.github.io/spydrnet for more details.
"""
import importlib
import pkgutil
import pathlib
import sys
import os
discovered_plugins = {
name: importlib.import_module(name)
for finder, name, ispkg
in pkgutil.iter_modules()
if name.startswith('spydrnet_')
}
print("Installed Plugins", discovered_plugins.keys())
def get_active_plugins():
active_plugins = {}
config_file = os.path.join(pathlib.Path.home(), ".spydrnet")
if os.path.isfile(config_file):
for plugin in open(config_file, "r").read().split():
if discovered_plugins.get(plugin, None):
active_plugins.update({plugin: discovered_plugins[plugin]})
else:
print("Plugin %s is not installed " % plugin)
else:
with open(config_file, "w") as fp:
fp.write("\n".join(discovered_plugins.keys()))
active_plugins.update(discovered_plugins)
return active_plugins
print("Active Plugins", get_active_plugins().keys())
# Release data
from spydrnet import release
__author__ = '%s <%s>\n%s <%s>\n%s <%s>' % \
(release.authors['Keller'] + release.authors['Skouson'] +
release.authors['Wirthlin'])
__license__ = release.license
__date__ = release.date
__version__ = release.version
__release__ = release.release
from spydrnet.ir import *
from spydrnet.util.hierarchical_reference import HRef
OUT = Port.Direction.OUT
IN = Port.Direction.IN
INOUT = Port.Direction.INOUT
UNDEFINED = Port.Direction.UNDEFINED
from spydrnet.util.selection import INSIDE, OUTSIDE, BOTH, ALL
from spydrnet.testing.test import run as test
from spydrnet.parsers import parse
from spydrnet.composers import compose
from spydrnet.plugins import namespace_manager
from spydrnet.util import get_netlists, get_libraries, get_definitions, get_ports, get_cables, get_instances,\
get_wires, get_pins
from spydrnet.util import get_hinstances, get_hports, get_hpins, get_hcables, get_hwires
import os
base_dir = os.path.dirname(os.path.abspath(__file__))
import glob
example_netlist_names = list()
for filename in glob.glob(os.path.join(base_dir, 'support_files', 'EDIF_netlists', "*")):
basename = os.path.basename(filename)
example_netlist_names.append(basename[:basename.index('.')])
example_netlist_names.sort()
# logger for the module
import logging
import sys
LOG_FORMAT = "%(levelname)5s %(filename)s:%(lineno)s (%(threadName)10s) - %(message)s"
logger = logging.getLogger('spydrnet_logs')
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(stream_handler)
def enable_file_logging(LOG_LEVEL=None, filename=""):
LOG_LEVEL = LOG_LEVEL or "INFO"
file_handler = logging.FileHandler("_" + filename + "_spydrnet.log", mode='w')
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
file_handler.setLevel(eval(f"logging.{LOG_LEVEL}"))
logger.addHandler(file_handler)
return file_handler
def load_example_netlist_by_name(name):
assert name in example_netlist_names, "Example netlist not found"
return parse(os.path.join(base_dir, 'support_files', 'EDIF_netlists', name + ".edf.zip"))
| 3,344 | 1,125 |
# Compare memory usage of a dense and a sparse adjancency matrix.
#
# Requires numpy. Install it with `pip3 install --user numpy`
# Authors: Gianluca Andreotti, Aurora Lucrezia Castro
import numpy as np
from scipy.sparse import csr_matrix
import sys
def load_matrix(file):
matrix = np.loadtxt(file, dtype=int, ndmin=2)
print("Nodes: " + str(len(matrix)))
print(f"Dense matrix: {matrix.nbytes / 1000}mb")
sparse_csr_mat = csr_matrix(matrix)
print(f"Sparse matrix: {sparse_csr_mat.data.nbytes / 1000}mb")
print("")
if __name__ == "__main__":
for f in sys.argv[1:]:
load_matrix(f)
| 620 | 232 |
from pandas import DataFrame
class CaesarCipher:
def __init__(self, text, key) -> None:
self.text = text
self.key = key
def decrypt(self):
decipheredText = ""
for ch in self.text.upper():
if ch.isalpha():
newchar = ord(ch)-self.key
if newchar < ord('A'):
newchar += 26
newShiftedALphabet = chr(newchar)
decipheredText += newShiftedALphabet
return decipheredText.upper()
def encrypt(self):
cipherText = ""
for ch in self.text.upper():
if ch.isalpha():
alphabetInString = ord(ch) + self.key
if alphabetInString > ord('Z'):
alphabetInString -= 26
shiftedAlphabet = chr(alphabetInString)
cipherText += shiftedAlphabet
return cipherText.upper()
class VigenereCipher:
# Initialization funtion
def __init__(self, text, key):
# converting key and text to uppercase and removing spaces from them.
self.text = "".join(text.upper().split(' '))
self.key = "".join(key.upper().split(' '))
# Function that encrypts the given plain text using given key.
def encrypt(self):
cipher_text, j = [], 0
for i in range(len(self.text)):
if j > len(self.key)-1:
j = 0
character = ((ord(self.text[i])+ord(self.key[j])) % 26)+ord('A')
j += 1
cipher_text.append(chr(character))
return ''.join(cipher_text)
# Function that decrypts the given plain text using given key.
def decrypt(self):
plain_text, j = [], 0
for i in range(len(self.text)):
if j > len(self.key)-1:
j = 0
character = ((ord(self.text[i])-ord(self.key[j])+26) % 26)+ord('A')
plain_text.append(chr(character))
j += 1
return ''.join(plain_text)
class MorseCode:
def __init__(self, message: str) -> None:
self.message = message.upper()
# Function that returns value or key from morse_dict dictionary
def getDictItems(self, val, option):
morse_dict = {'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....',
'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.',
'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..--', '3': '...--', '4': '....-',
'5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.',
'.': '.-.-.-', ',': '--..--', '?': '..--..', '!': '-.-.--', '/': '-..-.',
'(': '-.--.', ')': '-.--.-', '&': '.-...', ':': '---...', ';': '-.-.-.',
'=': '-...-', '+': '.-.-.', '-': '-....-', '_': '..--.-', '$': '...-..-', '@': '.--.-.'}
operation = {1: morse_dict, 2: list(morse_dict.keys())}
if option == 1:
return operation[option][val]
else:
return operation[option][list(morse_dict.values()).index(val)]
# Function to encrypt given message
def encrypt(self):
return "".join(['/ ' if character == ' '
else f'{self.getDictItems(character, 1)} '
for character in self.message])
# Function to decrypt given cipher text
def decrypt(self):
return "".join([' ' if character == '/'
else f'{self.getDictItems(character, 2)}'
for character in self.message.split(' ')])
class RunningKeyCipher:
def __init__(self, plainText, key):
# converting the plain text and key to upper case and removing spaces
self.pt = "".join(plainText.upper().split(' '))
self.ky = "".join(key.upper().split(' '))
# creating a DataFrame of size 26x26
tab, tableau = [chr(a) for a in range(65, 91)], []
for i in range(26):
row = tab[i:]+tab[:i]
tableau.append(row)
self.tabulaRecta = DataFrame(tableau, index=tab, columns=tab)
def encrypt(self):
encryptedText = ''
for i in range(len(self.pt)):
encryptedText += self.tabulaRecta.values[ord(
self.pt[i])-65][ord(self.ky[i])-65]
return encryptedText
def decrypt(self):
decryptedText = ''
for i in range(len(self.pt)):
decryptedText += ''.join(
self.tabulaRecta[self.tabulaRecta[self.ky[i]] == self.pt[i]].index.values)
return decryptedText
class ROT13:
def __init__(self, text: str):
# removing spaces from the text
self.text = "".join(text.upper().split(' '))
def encrypt(self):
return "".join([chr(ord(alphabet)-13) if ord(alphabet) > ord('A')+12
else chr(ord(alphabet)+13) for alphabet in self.text])
def decrypt(self):
return "".join([chr(ord(alphabet)-13) if ord(alphabet) > ord('A')+12
else chr(ord(alphabet)+13) for alphabet in self.text])
| 5,298 | 1,729 |
#!/usr/bin/env python
# coding=utf-8
#
# A module for create a multi-agent system over Ad-hoc networks
# Copyright (C) 2017-2018
# Juan Sebastian Triana Correa <justrianaco@unal.edu.co>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
__author__ = "Juan Sebastian Triana Correa"
__copyright__ = "Copyright 2015, TLON group"
__license__ = "LGPL"
__version__ = "1.2"
__email__ = "justrianaco@unal.edu.co"
__status__ = "Development"
import sys
import stomp
import dill as pickle
import MultiPManager.managerImp as managerImp
import MultiPManager.multiProc as multiProc
AUTHKEY = ''
tlon_resources = {}
def BrokerConnect(ip, port):
conn = stomp.Connection([(ip, port)])
conn.start()
conn.connect(wait=True)
return conn
class __resourceTopicListener__(stomp.ConnectionListener):
def on_error(self, headers, message):
print('Received an error {}'.format(message))
def on_message(self, headers, message):
global tlon_resources
#print('Received a message {}'.format(message))
#print('Request for resource {}'.format(message))
if sys.version_info[0]<3:
tmp = pickle.loads(message)
else:
tmp = pickle.loads(message.encode())
tlon_resources[tmp.__name__] = tmp
class __ordersTopicListener__(stomp.ConnectionListener):
def on_error(self, headers, message):
print('Received an error {}'.format(message))
def on_message(self, headers, message):
global tlon_resources
if sys.version_info[0]<3:
tmp = pickle.loads(message)
else:
tmp = pickle.loads(message.encode())
print("{},{},{}".format(tmp['ip'], tmp['portnum'], tmp['authkey']))
print(tmp)
if tmp['resourceName'] in tlon_resources:
manager = managerImp.make_client_manager(tmp['ip'], tmp['portnum'], tmp['authkey'])
job_q = manager.get_job_q()
result_q = manager.get_result_q()
multiProc.tlon_multiprocessing(job_q, result_q, tlon_resources[tmp['resourceName']])
| 2,638 | 821 |
from app import db
from datetime import datetime as dt
from slugify import slugify
class Post(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=False)
content = db.Column(db.String, nullable=False)
slug = db.Column(db.String, nullable=False)
timestamp = db.Column(db.DateTime, nullable=False)
draft = db.Column(db.Boolean, default=True)
category_id = db.Column(db.Integer, db.ForeignKey('category.id'))
category = db.relationship('Category', backref=db.backref('posts', lazy='dynamic'))
def __init__(self, title, description, content, category=None):
self.title = title
self.description = description
self.content = content
self.timestamp = dt.utcnow()
self.slug = slugify(title)
self.category = category
def __repr__(self):
return '<Post [%r] %r - %r>' % self.slug % self.title % self.category
def url(self):
return '/posts/' + self.slug
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Category %r>' % self.name | 1,289 | 410 |
'''
Helper functions for span tests
'''
import ptf.testutils as testutils
def send_and_verify_mirrored_packet(ptfadapter, src_port, monitor):
'''
Send packet from ptf and verify it on monitor port
Args:
ptfadapter: ptfadapter fixture
src_port: ptf port index, from which packet will be sent
monitor: ptf port index, where packet will be verified on
'''
src_mac = ptfadapter.dataplane.get_mac(0, src_port)
pkt = testutils.simple_icmp_packet(eth_src=src_mac, eth_dst='ff:ff:ff:ff:ff:ff')
ptfadapter.dataplane.flush()
testutils.send(ptfadapter, src_port, pkt)
testutils.verify_packet(ptfadapter, pkt, monitor)
| 672 | 231 |
import functools
import html
import itertools
import pprint
from bonobo import settings
from bonobo.config import Configurable, Option, Method, use_raw_input, use_context, use_no_input
from bonobo.config.functools import transformation_factory
from bonobo.config.processors import ContextProcessor, use_context_processor
from bonobo.constants import NOT_MODIFIED
from bonobo.util.objects import ValueHolder
from bonobo.util.term import CLEAR_EOL
from mondrian import term
__all__ = [
'FixedWindow',
'Format',
'Limit',
'OrderFields',
'PrettyPrinter',
'Rename',
'SetFields',
'Tee',
'UnpackItems',
'count',
'identity',
'noop',
]
def identity(x):
return x
class Limit(Configurable):
"""
Creates a Limit() node, that will only let go through the first n rows (defined by the `limit` option), unmodified.
.. attribute:: limit
Number of rows to let go through.
TODO: simplify into a closure building factory?
"""
limit = Option(positional=True, default=10)
@ContextProcessor
def counter(self, context):
yield ValueHolder(0)
def __call__(self, counter, *args, **kwargs):
counter += 1
if counter <= self.limit:
yield NOT_MODIFIED
def Tee(f):
from bonobo.constants import NOT_MODIFIED
@functools.wraps(f)
def wrapped(*args, **kwargs):
nonlocal f
f(*args, **kwargs)
return NOT_MODIFIED
return wrapped
def _shorten(s, w):
if w and len(s) > w:
s = s[0:w - 3] + '...'
return s
class PrettyPrinter(Configurable):
max_width = Option(
int,
default=term.get_size()[0],
required=False,
__doc__='''
If set, truncates the output values longer than this to this width.
'''
)
filter = Method(
default=
(lambda self, index, key, value: (value is not None) and (not isinstance(key, str) or not key.startswith('_'))),
__doc__='''
A filter that determine what to print.
Default is to ignore any key starting with an underscore and none values.
'''
)
@ContextProcessor
def context(self, context):
context.setdefault('_jupyter_html', None)
yield context
if context._jupyter_html is not None:
from IPython.display import display, HTML
display(HTML('\n'.join(['<table>'] + context._jupyter_html + ['</table>'])))
def __call__(self, context, *args, **kwargs):
if not settings.QUIET:
if term.isjupyter:
self.print_jupyter(context, *args, **kwargs)
return NOT_MODIFIED
if term.istty:
self.print_console(context, *args, **kwargs)
return NOT_MODIFIED
self.print_quiet(context, *args, **kwargs)
return NOT_MODIFIED
def print_quiet(self, context, *args, **kwargs):
for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())):
if self.filter(index, key, value):
print(self.format_quiet(index, key, value, fields=context.get_input_fields()))
def format_quiet(self, index, key, value, *, fields=None):
# XXX should we implement argnames here ?
return ' '.join(((' ' if index else '-'), str(key), ':', str(value).strip()))
def print_console(self, context, *args, **kwargs):
print('\u250c')
for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())):
if self.filter(index, key, value):
print(self.format_console(index, key, value, fields=context.get_input_fields()))
print('\u2514')
def format_console(self, index, key, value, *, fields=None):
fields = fields or []
if not isinstance(key, str):
if len(fields) > key and str(key) != str(fields[key]):
key = '{}{}'.format(fields[key], term.lightblack('[{}]'.format(key)))
else:
key = str(index)
prefix = '\u2502 {} = '.format(key)
prefix_length = len(prefix)
def indent(text, prefix):
for i, line in enumerate(text.splitlines()):
yield (prefix if i else '') + line + CLEAR_EOL + '\n'
repr_of_value = ''.join(
indent(pprint.pformat(value, width=self.max_width - prefix_length), '\u2502' + ' ' * (len(prefix) - 1))
).strip()
return '{}{}{}'.format(prefix, repr_of_value.replace('\n', CLEAR_EOL + '\n'), CLEAR_EOL)
def print_jupyter(self, context, *args):
if not context._jupyter_html:
context._jupyter_html = [
'<thead><tr>',
*map('<th>{}</th>'.format, map(html.escape, map(str,
context.get_input_fields() or range(len(args))))),
'</tr></thead>',
]
context._jupyter_html += [
'<tr>',
*map('<td>{}</td>'.format, map(html.escape, map(repr, args))),
'</tr>',
]
@use_no_input
def noop(*args, **kwargs):
return NOT_MODIFIED
class FixedWindow(Configurable):
"""
Transformation factory to create fixed windows of inputs, as lists.
For example, if the input is successively 1, 2, 3, 4, etc. and you pass it through a ``FixedWindow(2)``, you'll get
lists of elements 2 by 2: [1, 2], [3, 4], ...
"""
length = Option(int, positional=True) # type: int
@ContextProcessor
def buffer(self, context):
buffer = yield ValueHolder([])
if len(buffer):
last_value = buffer.get()
last_value += [None] * (self.length - len(last_value))
context.send(*last_value)
@use_raw_input
def __call__(self, buffer, bag):
buffer.append(bag)
if len(buffer) >= self.length:
yield tuple(buffer.get())
buffer.set([])
@transformation_factory
def OrderFields(fields):
"""
Transformation factory to reorder fields in a data stream.
:param fields:
:return: callable
"""
fields = list(fields)
@use_context
@use_raw_input
def _OrderFields(context, row):
nonlocal fields
context.setdefault('remaining', None)
if not context.output_type:
context.remaining = list(sorted(set(context.get_input_fields()) - set(fields)))
context.set_output_fields(fields + context.remaining)
yield tuple(row.get(field) for field in context.get_output_fields())
return _OrderFields
@transformation_factory
def SetFields(fields):
"""
Transformation factory that sets the field names on first iteration, without touching the values.
:param fields:
:return: callable
"""
@use_context
@use_no_input
def _SetFields(context):
nonlocal fields
if not context.output_type:
context.set_output_fields(fields)
return NOT_MODIFIED
return _SetFields
@transformation_factory
def UnpackItems(*items, fields=None, defaults=None):
"""
>>> UnpackItems(0)
:param items:
:param fields:
:param defaults:
:return: callable
"""
defaults = defaults or {}
@use_context
@use_raw_input
def _UnpackItems(context, bag):
nonlocal fields, items, defaults
if fields is None:
fields = ()
for item in items:
fields += tuple(bag[item].keys())
context.set_output_fields(fields)
values = ()
for item in items:
values += tuple(bag[item].get(field, defaults.get(field)) for field in fields)
return values
return _UnpackItems
@transformation_factory
def Rename(**translations):
# XXX todo handle duplicated
fields = None
translations = {v: k for k, v in translations.items()}
@use_context
@use_raw_input
def _Rename(context, bag):
nonlocal fields, translations
if not fields:
fields = tuple(translations.get(field, field) for field in context.get_input_fields())
context.set_output_fields(fields)
return NOT_MODIFIED
return _Rename
@transformation_factory
def Format(**formats):
fields, newfields = None, None
@use_context
@use_raw_input
def _Format(context, bag):
nonlocal fields, newfields, formats
if not context.output_type:
fields = context.input_type._fields
newfields = tuple(field for field in formats if not field in fields)
context.set_output_fields(fields + newfields)
return tuple(
formats[field].format(**bag._asdict()) if field in formats else bag.get(field)
for field in fields + newfields
)
return _Format
def _count(self, context):
counter = yield ValueHolder(0)
context.send(counter.get())
@use_no_input
@use_context_processor(_count)
def count(counter):
counter += 1
| 9,039 | 2,741 |
'''
This file serves to be an example on how to use kNN. This file is the simplified version with no graphs. For an example with graphs, check example.py
Change k, noise, and amount of data to see how accuracy is affected.
'''
import kNN as knn
import modeler
#setup the trainer. Tune your k parameter here.
trainer = knn.knn(k=5)
#using basic modeler provided, can define how many elements, and how much noise we want.
parameters, labels = modeler.generateData(100, noiseFactor=.25)
#get parameters to test on. These should have 0 noise so we can accurately test them
testParameters, testLabel = modeler.generateData(25, noiseFactor=0)
#load the train data into the trainer
trainer.loadData(parameters, labels)
#holds the number of incorrect
error = 0
#for each test element
for i in range(len(testParameters)):
#use trainer to get a guess
confidence,guess = trainer.predict(testParameters[i], negativeValue=0)
#check if we were incorrect
if guess != testLabel[i]:
error += 1
#calcuate and print error
print("Accuracy", 1 - error / len(testParameters)) | 1,093 | 322 |
import unittest
from mathtest import add, minus
class TestMathFunc(unittest.TestCase):
""" Test math function """
def test_add(self):
""" Test method add(a,b) """
self.assertEqual(3, add(1, 2))
self.assertNotEqual(3, add(2, 2))
def test_minus(self):
""" Test method test minus(a,b) """
self.assertEqual(1, minus(3, 2))
if __name__ == '__main__':
unittest.main()
| 424 | 149 |
import sys
import unittest
sys.path.append('.')
class TestDataSet(unittest.TestCase):
def test_cfg(self):
pass
if __name__ == '__main__':
unittest.main()
| 174 | 61 |
import logging
import os
from Interface.Isosurface import Isosurface
from Interface.JVXL import JVXL
from Interface.NBOinteractions import NBOinteractions
from Top import Top
log = logging.getLogger(__name__)
class ParserFactory(Top):
@staticmethod
def interface_by_command_line(frecord):
"""
Assign a class to a task from the group_names dictionary
"""
rectype, params = frecord[0], frecord[1:]
if rectype == 'file':
return
type2class = {
'inbo': NBOinteractions,
'iso': Isosurface,
'jvxl': JVXL,
'top': Top
}
if rectype in type2class:
parser = type2class[rectype]()
log.debug('Assigned parser was successfully loaded')
return parser
else:
log.error("Parser '%s' cannot be loaded" % parser)
return Top()
@staticmethod
def interface_by_file_extension(frecord, exts):
"""
Assign a class to frecord
"""
rectype, params = frecord[0], frecord[1:]
if rectype != 'file':
return
top = Top()
file = params[0]
base, ext = os.path.splitext(file)
ext = ext[1:]
if ext in exts:
ParsingClass = exts[ext]
log.info('%s parser is selected for %s' % (ParsingClass, file))
else:
log.error("Extension '%s' is not registered" % ext)
return top
ModName = 'Interface.' + ParsingClass
try:
GenericParser = __import__(ModName)
module = getattr(GenericParser, ParsingClass)
except:
log.error("Module '%s' cannot be loaded" % ModName)
return top
try:
cl = eval('module.' + ParsingClass)()
log.debug('Assigned parser was successfully loaded')
return cl
except NameError:
log.error("Parser '%s' cannot be loaded" % ParsingClass)
return top
@staticmethod
def typeByContent(frecord):
# TODO To be implemented
rectype, params = frecord[0], frecord[1:]
return Top()
| 2,181 | 612 |
#!/usr/bin/env python3
from collections import deque
N, Q = map(int, input().split())
G = [[] for _ in range(N)]
for _ in range(N - 1):
a, b = map(lambda x: int(x) - 1, input().split())
G[a].append(b)
G[b].append(a)
s = 0
dist = [-1] * N
que = deque()
que.append(s)
dist[s] = 0
while que:
i = que.popleft()
for j in G[i]:
if dist[j] == -1:
dist[j] = 1 - dist[i]
que.append(j)
for _ in range(Q):
c, d = map(lambda x: int(x) - 1, input().split())
if dist[c] == dist[d]:
print("Town")
else:
print("Road")
| 585 | 248 |
# Copyright (c) 2013, veena and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
data = []
conditions=get_conditions(filters)
data = get_data(filters,conditions)
for project in data:
project["total_tasks"] = frappe.db.count("Task", filters={"project": project['name']})
project["completed_tasks"] = frappe.db.count("Task", filters={"project": project['name'], "status": "Completed"})
project["overdue_tasks"] = frappe.db.count("Task", filters={"project": project['name'], "status": "Overdue"})
chart = get_chart_data(data)
report_summary = get_report_summary(data)
return columns, data, None, chart, report_summary
def get_columns():
return [
{
"fieldname": "name",
"label": _("Project"),
"fieldtype": "Link",
"options": "Project",
"width": 200
},
{
"fieldname": "project_type",
"label": _("Type"),
"fieldtype": "Link",
"options": "Project Type",
"width": 120
},
{
"fieldname": "status",
"label": _("Status"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "total_tasks",
"label": _("Total Tasks"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "completed_tasks",
"label": _("Tasks Completed"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "overdue_tasks",
"label": _("Tasks Overdue"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "percent_complete",
"label": _("Completion"),
"fieldtype": "Data",
"width": 120
},
{
"fieldname": "expected_start_date",
"label": _("Start Date"),
"fieldtype": "Date",
"width": 120
},
{
"fieldname": "expected_end_date",
"label": _("End Date"),
"fieldtype": "Date",
"width": 120
},
{
"fieldname": "date_percentage",
"label": _("Duration Left%"),
"fieldtype": "percent",
"width": 200
},
{
"fieldname": "total_sales_amount",
"label": _("Total Sales Amount"),
"fieldtype": "currency",
"width": 150
},
{
"fieldname": "total_billed_amount",
"label": _("Total Billed Amount"),
"fieldtype": "currency",
"width": 200
},
{
"fieldname": "sales_percentage",
"label": _("Sales Remaining%"),
"fieldtype": "percent",
"width": 150
}
]
def get_data(filters,conditions):
query="""select p.name,p.status,p.percent_complete,p.expected_start_date,p.expected_end_date,p.project_type,(CASE WHEN p.expected_start_date > NOW() THEN 100 ELSE ( (DATEDIFF(p.expected_end_date,Now()) / DATEDIFF(p.expected_end_date, p.expected_start_date))*100)END)as date_percentage,p.total_sales_amount,p.total_billed_amount,(((p.total_sales_amount-p.total_billed_amount)/p.total_sales_amount)*100) as sales_percentage from `tabProject` p WHERE {conditions} ORDER BY p.expected_end_date ASC""".format(conditions=conditions)
proj=frappe.db.sql(query, as_dict=True)
return proj
def get_conditions(filters):
conditions=""
if filters.get('company'):
conditions += " p.company = '{}'".format(filters.get('company'))
conditions += " AND p.expected_end_date >= NOW()"
if filters.get('is_active'):
conditions += " AND p.is_active = '{}'".format(filters.get('is_active'))
if filters.get('status'):
conditions += " AND p.status = '{}'".format(filters.get('status'))
if filters.get('project_type'):
conditions += " AND p.project_type = '{}'".format(filters.get('project_type'))
if filters.get('priority'):
conditions += " AND p.priority = '{}'".format(filters.get('priority'))
return conditions
def get_chart_data(data):
labels = []
total = []
completed = []
overdue = []
date_per = []
sales_per = []
for project in data:
labels.append(project.name)
total.append(project.total_tasks)
completed.append(project.completed_tasks)
overdue.append(project.overdue_tasks)
date_per.append(project.date_percentage)
sales_per.append(project.sales_percentage)
return {
"data": {
'labels': labels[:50],
'datasets': [
{
"name": "Overdue",
"values": overdue[:30]
},
{
"name": "Completed",
"values": completed[:30]
},
{
"name": "Total Tasks",
"values": total[:30]
},
{
"name": "Duration Left%",
"values": date_per[:30]
},
{
"name": "Sales Remaining%",
"values": sales_per[:50]
},
]
},
"type": "bar",
"colors": ["#fc4f51", "#ffd343","#00FF00", "#7575ff","#78d6ff"],
"barOptions": {
"stacked": False
}
}
def get_report_summary(data):
if not data:
return None
avg_completion = sum([project.percent_complete for project in data]) / len(data)
total = sum([project.total_tasks for project in data])
total_overdue = sum([project.overdue_tasks for project in data])
completed = sum([project.completed_tasks for project in data])
return [
{
"value": avg_completion,
"indicator": "Green" if avg_completion > 50 else "Red",
"label": "Average Completion",
"datatype": "Percent",
},
{
"value": total,
"indicator": "Blue",
"label": "Total Tasks",
"datatype": "Int",
},
{
"value": completed,
"indicator": "Green",
"label": "Completed Tasks",
"datatype": "Int",
},
{
"value": total_overdue,
"indicator": "Green" if total_overdue == 0 else "Red",
"label": "Overdue Tasks",
"datatype": "Int",
}
]
| 5,384 | 2,291 |
from . import cli
from . import datasets
from . import evaluation
from . import models
from . import plotting
from .model_evaluator import ModelEvaluator
from .plotting.plot import plot
from .version import __version__, VERSION
Evaluate = ModelEvaluator
Plot = plot
| 267 | 74 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ListenerAction',
'ListenerAuthenticateCognitoConfig',
'ListenerAuthenticateOidcConfig',
'ListenerCertificate',
'ListenerCertificateCertificate',
'ListenerFixedResponseConfig',
'ListenerForwardConfig',
'ListenerRedirectConfig',
'ListenerRuleAction',
'ListenerRuleAuthenticateCognitoConfig',
'ListenerRuleAuthenticateOidcConfig',
'ListenerRuleFixedResponseConfig',
'ListenerRuleForwardConfig',
'ListenerRuleHostHeaderConfig',
'ListenerRuleHttpHeaderConfig',
'ListenerRuleHttpRequestMethodConfig',
'ListenerRulePathPatternConfig',
'ListenerRuleQueryStringConfig',
'ListenerRuleQueryStringKeyValue',
'ListenerRuleRedirectConfig',
'ListenerRuleRuleCondition',
'ListenerRuleSourceIpConfig',
'ListenerRuleTargetGroupStickinessConfig',
'ListenerRuleTargetGroupTuple',
'ListenerTargetGroupStickinessConfig',
'ListenerTargetGroupTuple',
'LoadBalancerAttribute',
'LoadBalancerSubnetMapping',
'LoadBalancerTag',
'TargetGroupAttribute',
'TargetGroupMatcher',
'TargetGroupTag',
'TargetGroupTargetDescription',
]
@pulumi.output_type
class ListenerAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authenticateCognitoConfig":
suggest = "authenticate_cognito_config"
elif key == "authenticateOidcConfig":
suggest = "authenticate_oidc_config"
elif key == "fixedResponseConfig":
suggest = "fixed_response_config"
elif key == "forwardConfig":
suggest = "forward_config"
elif key == "redirectConfig":
suggest = "redirect_config"
elif key == "targetGroupArn":
suggest = "target_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
authenticate_cognito_config: Optional['outputs.ListenerAuthenticateCognitoConfig'] = None,
authenticate_oidc_config: Optional['outputs.ListenerAuthenticateOidcConfig'] = None,
fixed_response_config: Optional['outputs.ListenerFixedResponseConfig'] = None,
forward_config: Optional['outputs.ListenerForwardConfig'] = None,
order: Optional[int] = None,
redirect_config: Optional['outputs.ListenerRedirectConfig'] = None,
target_group_arn: Optional[str] = None):
pulumi.set(__self__, "type", type)
if authenticate_cognito_config is not None:
pulumi.set(__self__, "authenticate_cognito_config", authenticate_cognito_config)
if authenticate_oidc_config is not None:
pulumi.set(__self__, "authenticate_oidc_config", authenticate_oidc_config)
if fixed_response_config is not None:
pulumi.set(__self__, "fixed_response_config", fixed_response_config)
if forward_config is not None:
pulumi.set(__self__, "forward_config", forward_config)
if order is not None:
pulumi.set(__self__, "order", order)
if redirect_config is not None:
pulumi.set(__self__, "redirect_config", redirect_config)
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="authenticateCognitoConfig")
def authenticate_cognito_config(self) -> Optional['outputs.ListenerAuthenticateCognitoConfig']:
return pulumi.get(self, "authenticate_cognito_config")
@property
@pulumi.getter(name="authenticateOidcConfig")
def authenticate_oidc_config(self) -> Optional['outputs.ListenerAuthenticateOidcConfig']:
return pulumi.get(self, "authenticate_oidc_config")
@property
@pulumi.getter(name="fixedResponseConfig")
def fixed_response_config(self) -> Optional['outputs.ListenerFixedResponseConfig']:
return pulumi.get(self, "fixed_response_config")
@property
@pulumi.getter(name="forwardConfig")
def forward_config(self) -> Optional['outputs.ListenerForwardConfig']:
return pulumi.get(self, "forward_config")
@property
@pulumi.getter
def order(self) -> Optional[int]:
return pulumi.get(self, "order")
@property
@pulumi.getter(name="redirectConfig")
def redirect_config(self) -> Optional['outputs.ListenerRedirectConfig']:
return pulumi.get(self, "redirect_config")
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[str]:
return pulumi.get(self, "target_group_arn")
@pulumi.output_type
class ListenerAuthenticateCognitoConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userPoolArn":
suggest = "user_pool_arn"
elif key == "userPoolClientId":
suggest = "user_pool_client_id"
elif key == "userPoolDomain":
suggest = "user_pool_domain"
elif key == "authenticationRequestExtraParams":
suggest = "authentication_request_extra_params"
elif key == "onUnauthenticatedRequest":
suggest = "on_unauthenticated_request"
elif key == "sessionCookieName":
suggest = "session_cookie_name"
elif key == "sessionTimeout":
suggest = "session_timeout"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerAuthenticateCognitoConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerAuthenticateCognitoConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerAuthenticateCognitoConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
user_pool_arn: str,
user_pool_client_id: str,
user_pool_domain: str,
authentication_request_extra_params: Optional[Any] = None,
on_unauthenticated_request: Optional[str] = None,
scope: Optional[str] = None,
session_cookie_name: Optional[str] = None,
session_timeout: Optional[str] = None):
pulumi.set(__self__, "user_pool_arn", user_pool_arn)
pulumi.set(__self__, "user_pool_client_id", user_pool_client_id)
pulumi.set(__self__, "user_pool_domain", user_pool_domain)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
@property
@pulumi.getter(name="userPoolArn")
def user_pool_arn(self) -> str:
return pulumi.get(self, "user_pool_arn")
@property
@pulumi.getter(name="userPoolClientId")
def user_pool_client_id(self) -> str:
return pulumi.get(self, "user_pool_client_id")
@property
@pulumi.getter(name="userPoolDomain")
def user_pool_domain(self) -> str:
return pulumi.get(self, "user_pool_domain")
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[Any]:
return pulumi.get(self, "authentication_request_extra_params")
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[str]:
return pulumi.get(self, "on_unauthenticated_request")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[str]:
return pulumi.get(self, "session_cookie_name")
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[str]:
return pulumi.get(self, "session_timeout")
@pulumi.output_type
class ListenerAuthenticateOidcConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authorizationEndpoint":
suggest = "authorization_endpoint"
elif key == "clientId":
suggest = "client_id"
elif key == "clientSecret":
suggest = "client_secret"
elif key == "tokenEndpoint":
suggest = "token_endpoint"
elif key == "userInfoEndpoint":
suggest = "user_info_endpoint"
elif key == "authenticationRequestExtraParams":
suggest = "authentication_request_extra_params"
elif key == "onUnauthenticatedRequest":
suggest = "on_unauthenticated_request"
elif key == "sessionCookieName":
suggest = "session_cookie_name"
elif key == "sessionTimeout":
suggest = "session_timeout"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerAuthenticateOidcConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerAuthenticateOidcConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerAuthenticateOidcConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
authorization_endpoint: str,
client_id: str,
client_secret: str,
issuer: str,
token_endpoint: str,
user_info_endpoint: str,
authentication_request_extra_params: Optional[Any] = None,
on_unauthenticated_request: Optional[str] = None,
scope: Optional[str] = None,
session_cookie_name: Optional[str] = None,
session_timeout: Optional[str] = None):
pulumi.set(__self__, "authorization_endpoint", authorization_endpoint)
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret", client_secret)
pulumi.set(__self__, "issuer", issuer)
pulumi.set(__self__, "token_endpoint", token_endpoint)
pulumi.set(__self__, "user_info_endpoint", user_info_endpoint)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
@property
@pulumi.getter(name="authorizationEndpoint")
def authorization_endpoint(self) -> str:
return pulumi.get(self, "authorization_endpoint")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> str:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter
def issuer(self) -> str:
return pulumi.get(self, "issuer")
@property
@pulumi.getter(name="tokenEndpoint")
def token_endpoint(self) -> str:
return pulumi.get(self, "token_endpoint")
@property
@pulumi.getter(name="userInfoEndpoint")
def user_info_endpoint(self) -> str:
return pulumi.get(self, "user_info_endpoint")
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[Any]:
return pulumi.get(self, "authentication_request_extra_params")
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[str]:
return pulumi.get(self, "on_unauthenticated_request")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[str]:
return pulumi.get(self, "session_cookie_name")
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[str]:
return pulumi.get(self, "session_timeout")
@pulumi.output_type
class ListenerCertificate(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateArn":
suggest = "certificate_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerCertificate. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerCertificate.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerCertificate.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_arn: Optional[str] = None):
if certificate_arn is not None:
pulumi.set(__self__, "certificate_arn", certificate_arn)
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> Optional[str]:
return pulumi.get(self, "certificate_arn")
@pulumi.output_type
class ListenerCertificateCertificate(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateArn":
suggest = "certificate_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerCertificateCertificate. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerCertificateCertificate.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerCertificateCertificate.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_arn: Optional[str] = None):
if certificate_arn is not None:
pulumi.set(__self__, "certificate_arn", certificate_arn)
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> Optional[str]:
return pulumi.get(self, "certificate_arn")
@pulumi.output_type
class ListenerFixedResponseConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
elif key == "contentType":
suggest = "content_type"
elif key == "messageBody":
suggest = "message_body"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerFixedResponseConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerFixedResponseConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerFixedResponseConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
content_type: Optional[str] = None,
message_body: Optional[str] = None):
pulumi.set(__self__, "status_code", status_code)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if message_body is not None:
pulumi.set(__self__, "message_body", message_body)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
return pulumi.get(self, "status_code")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[str]:
return pulumi.get(self, "content_type")
@property
@pulumi.getter(name="messageBody")
def message_body(self) -> Optional[str]:
return pulumi.get(self, "message_body")
@pulumi.output_type
class ListenerForwardConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetGroupStickinessConfig":
suggest = "target_group_stickiness_config"
elif key == "targetGroups":
suggest = "target_groups"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerForwardConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerForwardConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerForwardConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_group_stickiness_config: Optional['outputs.ListenerTargetGroupStickinessConfig'] = None,
target_groups: Optional[Sequence['outputs.ListenerTargetGroupTuple']] = None):
if target_group_stickiness_config is not None:
pulumi.set(__self__, "target_group_stickiness_config", target_group_stickiness_config)
if target_groups is not None:
pulumi.set(__self__, "target_groups", target_groups)
@property
@pulumi.getter(name="targetGroupStickinessConfig")
def target_group_stickiness_config(self) -> Optional['outputs.ListenerTargetGroupStickinessConfig']:
return pulumi.get(self, "target_group_stickiness_config")
@property
@pulumi.getter(name="targetGroups")
def target_groups(self) -> Optional[Sequence['outputs.ListenerTargetGroupTuple']]:
return pulumi.get(self, "target_groups")
@pulumi.output_type
class ListenerRedirectConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRedirectConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRedirectConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRedirectConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
host: Optional[str] = None,
path: Optional[str] = None,
port: Optional[str] = None,
protocol: Optional[str] = None,
query: Optional[str] = None):
pulumi.set(__self__, "status_code", status_code)
if host is not None:
pulumi.set(__self__, "host", host)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if query is not None:
pulumi.set(__self__, "query", query)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
return pulumi.get(self, "status_code")
@property
@pulumi.getter
def host(self) -> Optional[str]:
return pulumi.get(self, "host")
@property
@pulumi.getter
def path(self) -> Optional[str]:
return pulumi.get(self, "path")
@property
@pulumi.getter
def port(self) -> Optional[str]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def query(self) -> Optional[str]:
return pulumi.get(self, "query")
@pulumi.output_type
class ListenerRuleAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authenticateCognitoConfig":
suggest = "authenticate_cognito_config"
elif key == "authenticateOidcConfig":
suggest = "authenticate_oidc_config"
elif key == "fixedResponseConfig":
suggest = "fixed_response_config"
elif key == "forwardConfig":
suggest = "forward_config"
elif key == "redirectConfig":
suggest = "redirect_config"
elif key == "targetGroupArn":
suggest = "target_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
authenticate_cognito_config: Optional['outputs.ListenerRuleAuthenticateCognitoConfig'] = None,
authenticate_oidc_config: Optional['outputs.ListenerRuleAuthenticateOidcConfig'] = None,
fixed_response_config: Optional['outputs.ListenerRuleFixedResponseConfig'] = None,
forward_config: Optional['outputs.ListenerRuleForwardConfig'] = None,
order: Optional[int] = None,
redirect_config: Optional['outputs.ListenerRuleRedirectConfig'] = None,
target_group_arn: Optional[str] = None):
pulumi.set(__self__, "type", type)
if authenticate_cognito_config is not None:
pulumi.set(__self__, "authenticate_cognito_config", authenticate_cognito_config)
if authenticate_oidc_config is not None:
pulumi.set(__self__, "authenticate_oidc_config", authenticate_oidc_config)
if fixed_response_config is not None:
pulumi.set(__self__, "fixed_response_config", fixed_response_config)
if forward_config is not None:
pulumi.set(__self__, "forward_config", forward_config)
if order is not None:
pulumi.set(__self__, "order", order)
if redirect_config is not None:
pulumi.set(__self__, "redirect_config", redirect_config)
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="authenticateCognitoConfig")
def authenticate_cognito_config(self) -> Optional['outputs.ListenerRuleAuthenticateCognitoConfig']:
return pulumi.get(self, "authenticate_cognito_config")
@property
@pulumi.getter(name="authenticateOidcConfig")
def authenticate_oidc_config(self) -> Optional['outputs.ListenerRuleAuthenticateOidcConfig']:
return pulumi.get(self, "authenticate_oidc_config")
@property
@pulumi.getter(name="fixedResponseConfig")
def fixed_response_config(self) -> Optional['outputs.ListenerRuleFixedResponseConfig']:
return pulumi.get(self, "fixed_response_config")
@property
@pulumi.getter(name="forwardConfig")
def forward_config(self) -> Optional['outputs.ListenerRuleForwardConfig']:
return pulumi.get(self, "forward_config")
@property
@pulumi.getter
def order(self) -> Optional[int]:
return pulumi.get(self, "order")
@property
@pulumi.getter(name="redirectConfig")
def redirect_config(self) -> Optional['outputs.ListenerRuleRedirectConfig']:
return pulumi.get(self, "redirect_config")
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[str]:
return pulumi.get(self, "target_group_arn")
@pulumi.output_type
class ListenerRuleAuthenticateCognitoConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userPoolArn":
suggest = "user_pool_arn"
elif key == "userPoolClientId":
suggest = "user_pool_client_id"
elif key == "userPoolDomain":
suggest = "user_pool_domain"
elif key == "authenticationRequestExtraParams":
suggest = "authentication_request_extra_params"
elif key == "onUnauthenticatedRequest":
suggest = "on_unauthenticated_request"
elif key == "sessionCookieName":
suggest = "session_cookie_name"
elif key == "sessionTimeout":
suggest = "session_timeout"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleAuthenticateCognitoConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleAuthenticateCognitoConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleAuthenticateCognitoConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
user_pool_arn: str,
user_pool_client_id: str,
user_pool_domain: str,
authentication_request_extra_params: Optional[Any] = None,
on_unauthenticated_request: Optional[str] = None,
scope: Optional[str] = None,
session_cookie_name: Optional[str] = None,
session_timeout: Optional[int] = None):
pulumi.set(__self__, "user_pool_arn", user_pool_arn)
pulumi.set(__self__, "user_pool_client_id", user_pool_client_id)
pulumi.set(__self__, "user_pool_domain", user_pool_domain)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
@property
@pulumi.getter(name="userPoolArn")
def user_pool_arn(self) -> str:
return pulumi.get(self, "user_pool_arn")
@property
@pulumi.getter(name="userPoolClientId")
def user_pool_client_id(self) -> str:
return pulumi.get(self, "user_pool_client_id")
@property
@pulumi.getter(name="userPoolDomain")
def user_pool_domain(self) -> str:
return pulumi.get(self, "user_pool_domain")
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[Any]:
return pulumi.get(self, "authentication_request_extra_params")
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[str]:
return pulumi.get(self, "on_unauthenticated_request")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[str]:
return pulumi.get(self, "session_cookie_name")
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[int]:
return pulumi.get(self, "session_timeout")
@pulumi.output_type
class ListenerRuleAuthenticateOidcConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authorizationEndpoint":
suggest = "authorization_endpoint"
elif key == "clientId":
suggest = "client_id"
elif key == "clientSecret":
suggest = "client_secret"
elif key == "tokenEndpoint":
suggest = "token_endpoint"
elif key == "userInfoEndpoint":
suggest = "user_info_endpoint"
elif key == "authenticationRequestExtraParams":
suggest = "authentication_request_extra_params"
elif key == "onUnauthenticatedRequest":
suggest = "on_unauthenticated_request"
elif key == "sessionCookieName":
suggest = "session_cookie_name"
elif key == "sessionTimeout":
suggest = "session_timeout"
elif key == "useExistingClientSecret":
suggest = "use_existing_client_secret"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleAuthenticateOidcConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleAuthenticateOidcConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleAuthenticateOidcConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
authorization_endpoint: str,
client_id: str,
client_secret: str,
issuer: str,
token_endpoint: str,
user_info_endpoint: str,
authentication_request_extra_params: Optional[Any] = None,
on_unauthenticated_request: Optional[str] = None,
scope: Optional[str] = None,
session_cookie_name: Optional[str] = None,
session_timeout: Optional[int] = None,
use_existing_client_secret: Optional[bool] = None):
pulumi.set(__self__, "authorization_endpoint", authorization_endpoint)
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret", client_secret)
pulumi.set(__self__, "issuer", issuer)
pulumi.set(__self__, "token_endpoint", token_endpoint)
pulumi.set(__self__, "user_info_endpoint", user_info_endpoint)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
if use_existing_client_secret is not None:
pulumi.set(__self__, "use_existing_client_secret", use_existing_client_secret)
@property
@pulumi.getter(name="authorizationEndpoint")
def authorization_endpoint(self) -> str:
return pulumi.get(self, "authorization_endpoint")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> str:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter
def issuer(self) -> str:
return pulumi.get(self, "issuer")
@property
@pulumi.getter(name="tokenEndpoint")
def token_endpoint(self) -> str:
return pulumi.get(self, "token_endpoint")
@property
@pulumi.getter(name="userInfoEndpoint")
def user_info_endpoint(self) -> str:
return pulumi.get(self, "user_info_endpoint")
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[Any]:
return pulumi.get(self, "authentication_request_extra_params")
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[str]:
return pulumi.get(self, "on_unauthenticated_request")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[str]:
return pulumi.get(self, "session_cookie_name")
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[int]:
return pulumi.get(self, "session_timeout")
@property
@pulumi.getter(name="useExistingClientSecret")
def use_existing_client_secret(self) -> Optional[bool]:
return pulumi.get(self, "use_existing_client_secret")
@pulumi.output_type
class ListenerRuleFixedResponseConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
elif key == "contentType":
suggest = "content_type"
elif key == "messageBody":
suggest = "message_body"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleFixedResponseConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleFixedResponseConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleFixedResponseConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
content_type: Optional[str] = None,
message_body: Optional[str] = None):
pulumi.set(__self__, "status_code", status_code)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if message_body is not None:
pulumi.set(__self__, "message_body", message_body)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
return pulumi.get(self, "status_code")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[str]:
return pulumi.get(self, "content_type")
@property
@pulumi.getter(name="messageBody")
def message_body(self) -> Optional[str]:
return pulumi.get(self, "message_body")
@pulumi.output_type
class ListenerRuleForwardConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetGroupStickinessConfig":
suggest = "target_group_stickiness_config"
elif key == "targetGroups":
suggest = "target_groups"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleForwardConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleForwardConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleForwardConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_group_stickiness_config: Optional['outputs.ListenerRuleTargetGroupStickinessConfig'] = None,
target_groups: Optional[Sequence['outputs.ListenerRuleTargetGroupTuple']] = None):
if target_group_stickiness_config is not None:
pulumi.set(__self__, "target_group_stickiness_config", target_group_stickiness_config)
if target_groups is not None:
pulumi.set(__self__, "target_groups", target_groups)
@property
@pulumi.getter(name="targetGroupStickinessConfig")
def target_group_stickiness_config(self) -> Optional['outputs.ListenerRuleTargetGroupStickinessConfig']:
return pulumi.get(self, "target_group_stickiness_config")
@property
@pulumi.getter(name="targetGroups")
def target_groups(self) -> Optional[Sequence['outputs.ListenerRuleTargetGroupTuple']]:
return pulumi.get(self, "target_groups")
@pulumi.output_type
class ListenerRuleHostHeaderConfig(dict):
def __init__(__self__, *,
values: Optional[Sequence[str]] = None):
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleHttpHeaderConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "httpHeaderName":
suggest = "http_header_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleHttpHeaderConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleHttpHeaderConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleHttpHeaderConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
http_header_name: Optional[str] = None,
values: Optional[Sequence[str]] = None):
if http_header_name is not None:
pulumi.set(__self__, "http_header_name", http_header_name)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="httpHeaderName")
def http_header_name(self) -> Optional[str]:
return pulumi.get(self, "http_header_name")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleHttpRequestMethodConfig(dict):
def __init__(__self__, *,
values: Optional[Sequence[str]] = None):
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRulePathPatternConfig(dict):
def __init__(__self__, *,
values: Optional[Sequence[str]] = None):
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleQueryStringConfig(dict):
def __init__(__self__, *,
values: Optional[Sequence['outputs.ListenerRuleQueryStringKeyValue']] = None):
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> Optional[Sequence['outputs.ListenerRuleQueryStringKeyValue']]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleQueryStringKeyValue(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class ListenerRuleRedirectConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCode":
suggest = "status_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleRedirectConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleRedirectConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleRedirectConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_code: str,
host: Optional[str] = None,
path: Optional[str] = None,
port: Optional[str] = None,
protocol: Optional[str] = None,
query: Optional[str] = None):
pulumi.set(__self__, "status_code", status_code)
if host is not None:
pulumi.set(__self__, "host", host)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if query is not None:
pulumi.set(__self__, "query", query)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> str:
return pulumi.get(self, "status_code")
@property
@pulumi.getter
def host(self) -> Optional[str]:
return pulumi.get(self, "host")
@property
@pulumi.getter
def path(self) -> Optional[str]:
return pulumi.get(self, "path")
@property
@pulumi.getter
def port(self) -> Optional[str]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def query(self) -> Optional[str]:
return pulumi.get(self, "query")
@pulumi.output_type
class ListenerRuleRuleCondition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "hostHeaderConfig":
suggest = "host_header_config"
elif key == "httpHeaderConfig":
suggest = "http_header_config"
elif key == "httpRequestMethodConfig":
suggest = "http_request_method_config"
elif key == "pathPatternConfig":
suggest = "path_pattern_config"
elif key == "queryStringConfig":
suggest = "query_string_config"
elif key == "sourceIpConfig":
suggest = "source_ip_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleRuleCondition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleRuleCondition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleRuleCondition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
field: Optional[str] = None,
host_header_config: Optional['outputs.ListenerRuleHostHeaderConfig'] = None,
http_header_config: Optional['outputs.ListenerRuleHttpHeaderConfig'] = None,
http_request_method_config: Optional['outputs.ListenerRuleHttpRequestMethodConfig'] = None,
path_pattern_config: Optional['outputs.ListenerRulePathPatternConfig'] = None,
query_string_config: Optional['outputs.ListenerRuleQueryStringConfig'] = None,
source_ip_config: Optional['outputs.ListenerRuleSourceIpConfig'] = None,
values: Optional[Sequence[str]] = None):
if field is not None:
pulumi.set(__self__, "field", field)
if host_header_config is not None:
pulumi.set(__self__, "host_header_config", host_header_config)
if http_header_config is not None:
pulumi.set(__self__, "http_header_config", http_header_config)
if http_request_method_config is not None:
pulumi.set(__self__, "http_request_method_config", http_request_method_config)
if path_pattern_config is not None:
pulumi.set(__self__, "path_pattern_config", path_pattern_config)
if query_string_config is not None:
pulumi.set(__self__, "query_string_config", query_string_config)
if source_ip_config is not None:
pulumi.set(__self__, "source_ip_config", source_ip_config)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def field(self) -> Optional[str]:
return pulumi.get(self, "field")
@property
@pulumi.getter(name="hostHeaderConfig")
def host_header_config(self) -> Optional['outputs.ListenerRuleHostHeaderConfig']:
return pulumi.get(self, "host_header_config")
@property
@pulumi.getter(name="httpHeaderConfig")
def http_header_config(self) -> Optional['outputs.ListenerRuleHttpHeaderConfig']:
return pulumi.get(self, "http_header_config")
@property
@pulumi.getter(name="httpRequestMethodConfig")
def http_request_method_config(self) -> Optional['outputs.ListenerRuleHttpRequestMethodConfig']:
return pulumi.get(self, "http_request_method_config")
@property
@pulumi.getter(name="pathPatternConfig")
def path_pattern_config(self) -> Optional['outputs.ListenerRulePathPatternConfig']:
return pulumi.get(self, "path_pattern_config")
@property
@pulumi.getter(name="queryStringConfig")
def query_string_config(self) -> Optional['outputs.ListenerRuleQueryStringConfig']:
return pulumi.get(self, "query_string_config")
@property
@pulumi.getter(name="sourceIpConfig")
def source_ip_config(self) -> Optional['outputs.ListenerRuleSourceIpConfig']:
return pulumi.get(self, "source_ip_config")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleSourceIpConfig(dict):
def __init__(__self__, *,
values: Optional[Sequence[str]] = None):
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "values")
@pulumi.output_type
class ListenerRuleTargetGroupStickinessConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "durationSeconds":
suggest = "duration_seconds"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleTargetGroupStickinessConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleTargetGroupStickinessConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleTargetGroupStickinessConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
duration_seconds: Optional[int] = None,
enabled: Optional[bool] = None):
if duration_seconds is not None:
pulumi.set(__self__, "duration_seconds", duration_seconds)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter(name="durationSeconds")
def duration_seconds(self) -> Optional[int]:
return pulumi.get(self, "duration_seconds")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
return pulumi.get(self, "enabled")
@pulumi.output_type
class ListenerRuleTargetGroupTuple(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetGroupArn":
suggest = "target_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerRuleTargetGroupTuple. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerRuleTargetGroupTuple.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerRuleTargetGroupTuple.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_group_arn: Optional[str] = None,
weight: Optional[int] = None):
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[str]:
return pulumi.get(self, "target_group_arn")
@property
@pulumi.getter
def weight(self) -> Optional[int]:
return pulumi.get(self, "weight")
@pulumi.output_type
class ListenerTargetGroupStickinessConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "durationSeconds":
suggest = "duration_seconds"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerTargetGroupStickinessConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerTargetGroupStickinessConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerTargetGroupStickinessConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
duration_seconds: Optional[int] = None,
enabled: Optional[bool] = None):
if duration_seconds is not None:
pulumi.set(__self__, "duration_seconds", duration_seconds)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter(name="durationSeconds")
def duration_seconds(self) -> Optional[int]:
return pulumi.get(self, "duration_seconds")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
return pulumi.get(self, "enabled")
@pulumi.output_type
class ListenerTargetGroupTuple(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetGroupArn":
suggest = "target_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ListenerTargetGroupTuple. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ListenerTargetGroupTuple.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ListenerTargetGroupTuple.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_group_arn: Optional[str] = None,
weight: Optional[int] = None):
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[str]:
return pulumi.get(self, "target_group_arn")
@property
@pulumi.getter
def weight(self) -> Optional[int]:
return pulumi.get(self, "weight")
@pulumi.output_type
class LoadBalancerAttribute(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class LoadBalancerSubnetMapping(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "subnetId":
suggest = "subnet_id"
elif key == "allocationId":
suggest = "allocation_id"
elif key == "iPv6Address":
suggest = "i_pv6_address"
elif key == "privateIPv4Address":
suggest = "private_i_pv4_address"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LoadBalancerSubnetMapping. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LoadBalancerSubnetMapping.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LoadBalancerSubnetMapping.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
subnet_id: str,
allocation_id: Optional[str] = None,
i_pv6_address: Optional[str] = None,
private_i_pv4_address: Optional[str] = None):
pulumi.set(__self__, "subnet_id", subnet_id)
if allocation_id is not None:
pulumi.set(__self__, "allocation_id", allocation_id)
if i_pv6_address is not None:
pulumi.set(__self__, "i_pv6_address", i_pv6_address)
if private_i_pv4_address is not None:
pulumi.set(__self__, "private_i_pv4_address", private_i_pv4_address)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> str:
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter(name="allocationId")
def allocation_id(self) -> Optional[str]:
return pulumi.get(self, "allocation_id")
@property
@pulumi.getter(name="iPv6Address")
def i_pv6_address(self) -> Optional[str]:
return pulumi.get(self, "i_pv6_address")
@property
@pulumi.getter(name="privateIPv4Address")
def private_i_pv4_address(self) -> Optional[str]:
return pulumi.get(self, "private_i_pv4_address")
@pulumi.output_type
class LoadBalancerTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class TargetGroupAttribute(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class TargetGroupMatcher(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "grpcCode":
suggest = "grpc_code"
elif key == "httpCode":
suggest = "http_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TargetGroupMatcher. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TargetGroupMatcher.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TargetGroupMatcher.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
grpc_code: Optional[str] = None,
http_code: Optional[str] = None):
if grpc_code is not None:
pulumi.set(__self__, "grpc_code", grpc_code)
if http_code is not None:
pulumi.set(__self__, "http_code", http_code)
@property
@pulumi.getter(name="grpcCode")
def grpc_code(self) -> Optional[str]:
return pulumi.get(self, "grpc_code")
@property
@pulumi.getter(name="httpCode")
def http_code(self) -> Optional[str]:
return pulumi.get(self, "http_code")
@pulumi.output_type
class TargetGroupTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class TargetGroupTargetDescription(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "availabilityZone":
suggest = "availability_zone"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TargetGroupTargetDescription. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TargetGroupTargetDescription.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TargetGroupTargetDescription.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
id: str,
availability_zone: Optional[str] = None,
port: Optional[int] = None):
pulumi.set(__self__, "id", id)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[str]:
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter
def port(self) -> Optional[int]:
return pulumi.get(self, "port")
| 59,592 | 17,818 |
#===============================================================================
# Copyright (c) 2018, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of animate_gp nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
import GPy
from matplotlib import pyplot as plt
from matplotlib import animation
def exp_map_sphere(mu, E):
theta = np.sqrt((E ** 2).sum(0))[None, :]
M = mu * np.sin(theta)
M = M + (E * (np.cos(theta)) / theta)
M[:, np.abs(theta[0]) <= 1e-7] = mu
return M
def exp_map(mu, E):
theta = np.sqrt((E ** 2).sum(0))[None]
M = mu * np.sin(theta)
M = M + (E * (np.cos(theta)) / theta)
M[:, np.abs(theta[0]) <= 1e-7] = mu
return M
def animation_matrix(N, n):
u = np.random.normal(0, 1, size=(N, 1))
r = np.sqrt((u ** 2).sum())
u /= r
t = np.random.normal(0, 1, size=(N, 1))
t = t - (t.T.dot(u)).dot(u.T).T
t /= np.sqrt((t ** 2).sum())
# start = np.random.uniform(0,2*np.pi)
# T = np.linspace(start, start+2*np.pi, n)[None, :] * t
return r * exp_map_sphere(u, np.linspace(0.001, 2 * np.pi, n)[None] * t)
def get_percs(mu, K):
s = np.random.multivariate_normal(mu.squeeze(), K, size=(50000)).T
return np.percentile(s, np.linspace(0.01, 99.99, 75), overwrite_input=True, axis=1)
def create_empty_ax():
fig, ax = plt.subplots(figsize=(4.2 * (16 / 9), 4.20))
ax.set_frame_on(False)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
return fig, ax
def plot_data(ax, X, Y):
return ax.scatter(X, Y, marker='x', color='k')
def fill_grad(ax, X, mu, K):
from GPy.plotting.matplot_dep.plot_definitions import MatplotlibPlots
mat_plot = MatplotlibPlots()
mat_plot.fill_gradient(ax, X[:, 0], get_percs(mu, K), color='#687C8E', linewidth=0, alpha=1.)
def animate_kernel(fig, ax, X, mu, K, out, frames=200):
colors = ['#f7fbff',
'#deebf7',
'#c6dbef',
"#9ecae1",
"#6baed6",
"#4292c6",
'#2171b5',
'#08519c',
'#08306b',
]
L = GPy.util.linalg.pdinv(K + np.eye(K.shape[0]) * 1e-8)[1]
lines = [ax.plot([], [], lw=.8, color=c)[0] for c in colors]
Rs = [animation_matrix(X.shape[0], frames) for _ in lines]
def init():
for line in lines:
line.set_data([], [])
return lines
def animate(i):
for animatrix, line in zip(Rs, lines):
# print y[:,i].shape, x.shape
line.set_data(X[:, 0], mu[:,[0]] + L.dot(animatrix[:, [i]]))
return lines
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=20, blit=False,
repeat=True, save_count=frames,
)
writer = animation.FFMpegFileWriter(
fps=30,
codec='libx264',
extra_args=[
'-pix_fmt', 'yuva420p',
],
)
anim.save(
out,
writer=writer,
dpi=150,
savefig_kwargs={'transparent': False, 'facecolor': 'white'},
)
return anim
| 4,652 | 1,741 |
from django.db import models
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.core.fields import RichTextField
from wagtail.core.models import Page
class AutomaticRedirectsTestIndexPage(Page):
subtitle = models.CharField(max_length=255, blank=True, default='')
body = RichTextField(blank=True, default='')
content_panels = Page.content_panels + [
FieldPanel('subtitle'),
FieldPanel('body'),
]
class AutomaticRedirectsTestPage(Page):
body = RichTextField(blank=True, default='')
content_panels = Page.content_panels + [
FieldPanel('body'),
]
| 615 | 186 |
import socket, sys, time, colorama, os
from threading import *
colorama.init(autoreset=True)
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "localhost"
port = int(sys.argv[1])
try:
serversocket.bind((host, port))
except Exception as e:
import traceback
print("An error occurred while trying to open socket {}.{}:\n".format(host, port, str(e)))
traceback.print_exc()
input("Hit any key to exit")
sys.exit(-1)
class client(Thread):
def __init__(self, socket, address):
Thread.__init__(self)
self.sock = socket
self.addr = address
self.start()
def run(self):
while 1:
try:
data = self.sock.recv(1024).decode()
except:
input("The socket was broken! Hit any key to exit")
os._exit(0)
if not data:
input("The client disconnected! Hit any key to exit")
os._exit(0)
print (data)
serversocket.listen(5)
print ("Console monitor started, listening on {}:{}".format(host, port))
while 1:
clientsocket, address = serversocket.accept()
client(clientsocket, address)
| 1,190 | 355 |
#!/usr/bin/env python3
from aoc2021.util import print_solutions, import_matrix
from math import inf
def part_1(dumbos):
flashes = 0
for _ in range(100):
for coord in dumbos:
dumbos[coord] += 1
flashed = set()
for coord, dumbo in dumbos.items():
if coord not in flashed and dumbo > 9:
flash(dumbos, coord, flashed)
flashes += len(flashed)
return flashes
def part_2(dumbos):
flashes = 0
step = 0
while True:
step += 1
for coord in dumbos:
dumbos[coord] += 1
flashed = set()
for coord, dumbo in dumbos.items():
if coord not in flashed and dumbo > 9:
flash(dumbos, coord, flashed)
if len(flashed) == len(dumbos):
return step
def flash(dumbos, coord, flashed):
if coord not in dumbos:
return
if coord in flashed:
return
dumbos[coord] += 1
if dumbos[coord] <= 9:
return
dumbos[coord] = 0
flashed.add(coord)
for offset in [(0, 1), (0, -1), (1, 0), (-1, 0), (-1, -1), (-1, 1), (1, 1), (1, -1)]:
flash(dumbos, tuple(map(sum, zip(coord, offset))), flashed)
def main():
print_solutions(
["resources/day11-test.txt", "resources/day11.txt"],
import_matrix,
part_1
)
print_solutions(
["resources/day11-test.txt", "resources/day11.txt"],
import_matrix,
part_2
)
if __name__ == "__main__":
main()
| 1,517 | 566 |