text string | size int64 | token_count int64 |
|---|---|---|
"""
Pytest common fixtures
"""
from __future__ import absolute_import, print_function
import os
import pytest
from pywbemtools._utils import CONNECTIONS_FILENAME, \
DEFAULT_CONNECTIONS_DIR, DEFAULT_CONNECTIONS_FILE
SCRIPT_DIR = os.path.dirname(__file__)
# Backup file of the default connections file
BAK_SUFFIX = '.bak'
CONNECTIONS_BAK_FILENAME = CONNECTIONS_FILENAME + BAK_SUFFIX
CONNECTIONS_BAK_FILE = os.path.join(DEFAULT_CONNECTIONS_DIR,
CONNECTIONS_BAK_FILENAME)
# Save files for the default connections file and its backup file
SAVE_SUFFIX = '.testsavepywbemclitests'
CONNECTIONS_SAVE_FILENAME = CONNECTIONS_FILENAME + SAVE_SUFFIX
CONNECTIONS_SAVE_FILE = os.path.join(DEFAULT_CONNECTIONS_DIR,
CONNECTIONS_SAVE_FILENAME)
CONNECTIONS_BAK_SAVE_FILENAME = CONNECTIONS_BAK_FILENAME + SAVE_SUFFIX
CONNECTIONS_BAK_SAVE_FILE = os.path.join(DEFAULT_CONNECTIONS_DIR,
CONNECTIONS_BAK_SAVE_FILENAME)
@pytest.fixture
def default_connections_file_path():
"""
Fixture to return the path name of the default connections file.
"""
return DEFAULT_CONNECTIONS_FILE
@pytest.fixture(scope='session', autouse=True)
def save_default_connections_file(request):
"""
Fixture that saves away an existing default connections file and its backup
file at the begin of a test session and restores them at the end of the
test session.
This function is called once per test session (i.e. execution of the pytest
command) before the first test is executed.
"""
# Save the default connections file and its backup file
if os.path.isfile(DEFAULT_CONNECTIONS_FILE):
os.rename(DEFAULT_CONNECTIONS_FILE, CONNECTIONS_SAVE_FILE)
if os.path.isfile(CONNECTIONS_BAK_FILE):
os.rename(CONNECTIONS_BAK_FILE, CONNECTIONS_BAK_SAVE_FILE)
def teardown():
"""
Restore the saved default connections file and its saved backup
file.
This function is called once per test session (i.e. execution of the
pytest command) after the last test has been executed.
"""
# Restore the saved default connections file
if os.path.isfile(DEFAULT_CONNECTIONS_FILE):
os.remove(DEFAULT_CONNECTIONS_FILE)
if os.path.isfile(CONNECTIONS_SAVE_FILE):
os.rename(CONNECTIONS_SAVE_FILE, DEFAULT_CONNECTIONS_FILE)
# Restore the saved backup file of the default connections file
if os.path.isfile(CONNECTIONS_BAK_FILE):
os.remove(CONNECTIONS_BAK_FILE)
if os.path.isfile(CONNECTIONS_BAK_SAVE_FILE):
os.rename(CONNECTIONS_BAK_SAVE_FILE, CONNECTIONS_BAK_FILE)
request.addfinalizer(teardown)
| 2,769 | 917 |
import tensorflow as tf
from menpofit.visualize import plot_cumulative_error_distribution
from menpofit.error import compute_cumulative_error
from scipy.integrate import simps
from menpo_functions import load_menpo_image_list, load_bb_dictionary
from logging_functions import *
from data_loading_functions import *
from time import time
import sys
from PyQt5 import QtWidgets
qapp=QtWidgets.QApplication([''])
def load_menpo_test_list(img_dir, test_data='full', image_size=256, margin=0.25, bb_type='gt'):
mode = 'TEST'
bb_dir = os.path.join(img_dir, 'Bounding_Boxes')
bb_dictionary = load_bb_dictionary(bb_dir, mode, test_data=test_data)
img_menpo_list = load_menpo_image_list(
img_dir=img_dir, train_crop_dir=None, img_dir_ns=None, mode=mode, bb_dictionary=bb_dictionary,
image_size=image_size, margin=margin,
bb_type=bb_type, test_data=test_data, augment_basic=False, augment_texture=False, p_texture=0,
augment_geom=False, p_geom=0)
return img_menpo_list
def evaluate_heatmap_fusion_network(model_path, img_path, test_data, batch_size=10, image_size=256, margin=0.25,
bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False,
debug_data_size=20):
t = time()
from deep_heatmaps_model_fusion_net import DeepHeatmapsModel
import logging
logging.getLogger('tensorflow').disabled = True
# load test image menpo list
test_menpo_img_list = load_menpo_test_list(
img_path, test_data=test_data, image_size=image_size, margin=margin, bb_type=bb_type)
if debug:
test_menpo_img_list = test_menpo_img_list[:debug_data_size]
print ('\n*** FUSION NETWORK: calculating normalized mean error on: ' + test_data +
' set (%d images - debug mode) ***' % debug_data_size)
else:
print ('\n*** FUSION NETWORK: calculating normalized mean error on: ' + test_data + ' set (%d images) ***' %
(len(test_menpo_img_list)))
# create heatmap model
tf.reset_default_graph()
model = DeepHeatmapsModel(mode='TEST', batch_size=batch_size, image_size=image_size, c_dim=c_dim,
num_landmarks=num_landmarks, img_path=img_path, test_model_path=model_path,
test_data=test_data, menpo_verbose=False)
# add placeholders
model.add_placeholders()
# build model
model.build_model()
# create loss ops
model.create_loss_ops()
num_batches = int(1. * len(test_menpo_img_list) / batch_size)
if num_batches == 0:
batch_size = len(test_menpo_img_list)
num_batches = 1
reminder = len(test_menpo_img_list) - num_batches * batch_size
num_batches_reminder = num_batches + 1 * (reminder > 0)
img_inds = np.arange(len(test_menpo_img_list))
with tf.Session() as session:
# load trained parameters
saver = tf.train.Saver()
saver.restore(session, model_path)
print ('\nnum batches: ' + str(num_batches_reminder))
err = []
for j in range(num_batches):
print ('batch %d / %d ...' % (j + 1, num_batches_reminder))
batch_inds = img_inds[j * batch_size:(j + 1) * batch_size]
batch_images, _, batch_landmarks_gt = load_images_landmarks(
test_menpo_img_list, batch_inds=batch_inds, image_size=image_size,
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
batch_maps_pred = session.run(model.pred_hm_f, {model.images: batch_images})
batch_pred_landmarks = batch_heat_maps_to_landmarks(
batch_maps_pred, batch_size=batch_size, image_size=image_size, num_landmarks=num_landmarks)
batch_err = session.run(
model.nme_per_image, {model.lms: batch_landmarks_gt, model.pred_lms: batch_pred_landmarks})
err = np.hstack((err, batch_err))
if reminder > 0:
print ('batch %d / %d ...' % (j + 2, num_batches_reminder))
reminder_inds = img_inds[-reminder:]
batch_images, _, batch_landmarks_gt = load_images_landmarks(
test_menpo_img_list, batch_inds=reminder_inds, image_size=image_size,
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
batch_maps_pred = session.run(model.pred_hm_f, {model.images: batch_images})
batch_pred_landmarks = batch_heat_maps_to_landmarks(
batch_maps_pred, batch_size=reminder, image_size=image_size, num_landmarks=num_landmarks)
batch_err = session.run(
model.nme_per_image, {model.lms: batch_landmarks_gt, model.pred_lms: batch_pred_landmarks})
err = np.hstack((err, batch_err))
print ('\ndone!')
print ('run time: ' + str(time() - t))
return err
def evaluate_heatmap_primary_network(model_path, img_path, test_data, batch_size=10, image_size=256, margin=0.25,
bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False,
debug_data_size=20):
t = time()
from deep_heatmaps_model_primary_net import DeepHeatmapsModel
import logging
logging.getLogger('tensorflow').disabled = True
# load test image menpo list
test_menpo_img_list = load_menpo_test_list(
img_path, test_data=test_data, image_size=image_size, margin=margin, bb_type=bb_type)
if debug:
test_menpo_img_list = test_menpo_img_list[:debug_data_size]
print ('\n*** PRIMARY NETWORK: calculating normalized mean error on: ' + test_data +
' set (%d images - debug mode) ***' % debug_data_size)
else:
print ('\n*** PRIMARY NETWORK: calculating normalized mean error on: ' + test_data +
' set (%d images) ***' % (len(test_menpo_img_list)))
# create heatmap model
tf.reset_default_graph()
model = DeepHeatmapsModel(mode='TEST', batch_size=batch_size, image_size=image_size, c_dim=c_dim,
num_landmarks=num_landmarks, img_path=img_path, test_model_path=model_path,
test_data=test_data, menpo_verbose=False)
# add placeholders
model.add_placeholders()
# build model
model.build_model()
# create loss ops
model.create_loss_ops()
num_batches = int(1. * len(test_menpo_img_list) / batch_size)
if num_batches == 0:
batch_size = len(test_menpo_img_list)
num_batches = 1
reminder = len(test_menpo_img_list) - num_batches * batch_size
num_batches_reminder = num_batches + 1 * (reminder > 0)
img_inds = np.arange(len(test_menpo_img_list))
with tf.Session() as session:
# load trained parameters
saver = tf.train.Saver()
saver.restore(session, model_path)
print ('\nnum batches: ' + str(num_batches_reminder))
err = []
for j in range(num_batches):
print ('batch %d / %d ...' % (j + 1, num_batches_reminder))
batch_inds = img_inds[j * batch_size:(j + 1) * batch_size]
batch_images, _, batch_landmarks_gt = load_images_landmarks(
test_menpo_img_list, batch_inds=batch_inds, image_size=image_size,
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
batch_maps_small_pred = session.run(model.pred_hm_p, {model.images: batch_images})
batch_maps_small_pred = zoom(batch_maps_small_pred, zoom=[1, 4, 4, 1], order=1) # NN interpolation
batch_pred_landmarks = batch_heat_maps_to_landmarks(
batch_maps_small_pred, batch_size=batch_size, image_size=image_size,
num_landmarks=num_landmarks)
batch_err = session.run(
model.nme_per_image, {model.lms_small: batch_landmarks_gt, model.pred_lms_small: batch_pred_landmarks})
err = np.hstack((err, batch_err))
if reminder > 0:
print ('batch %d / %d ...' % (j + 2, num_batches_reminder))
reminder_inds = img_inds[-reminder:]
batch_images, _, batch_landmarks_gt = load_images_landmarks(
test_menpo_img_list, batch_inds=reminder_inds, image_size=image_size,
c_dim=c_dim, num_landmarks=num_landmarks, scale=scale)
batch_maps_small_pred = session.run(model.pred_hm_p, {model.images: batch_images})
batch_maps_small_pred = zoom(batch_maps_small_pred, zoom=[1, 4, 4, 1], order=1) # NN interpolation
batch_pred_landmarks = batch_heat_maps_to_landmarks(
batch_maps_small_pred, batch_size=reminder, image_size=image_size,
num_landmarks=num_landmarks)
batch_err = session.run(
model.nme_per_image, {model.lms_small: batch_landmarks_gt, model.pred_lms_small: batch_pred_landmarks})
err = np.hstack((err, batch_err))
print ('\ndone!')
print ('run time: ' + str(time() - t))
return err
def evaluate_heatmap_network(model_path, network_type, img_path, test_data, batch_size=10, image_size=256, margin=0.25,
bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False,
debug_data_size=20):
if network_type.lower() == 'fusion':
return evaluate_heatmap_fusion_network(
model_path=model_path, img_path=img_path, test_data=test_data, batch_size=batch_size, image_size=image_size,
margin=margin, bb_type=bb_type, c_dim=c_dim, scale=scale, num_landmarks=num_landmarks, debug=debug,
debug_data_size=debug_data_size)
elif network_type.lower() == 'primary':
return evaluate_heatmap_primary_network(
model_path=model_path, img_path=img_path, test_data=test_data, batch_size=batch_size, image_size=image_size,
margin=margin, bb_type=bb_type, c_dim=c_dim, scale=scale, num_landmarks=num_landmarks, debug=debug,
debug_data_size=debug_data_size)
else:
sys.exit('\n*** Error: please choose a valid network type: Fusion/Primary ***')
def AUC(errors, max_error, step_error=0.0001):
x_axis = list(np.arange(0., max_error + step_error, step_error))
ced = np.array(compute_cumulative_error(errors, x_axis))
return simps(ced, x=x_axis) / max_error, 1. - ced[-1]
def print_nme_statistics(
errors, model_path, network_type, test_data, max_error=0.08, log_path='', save_log=True, plot_ced=True,
norm='interocular distance'):
auc, failures = AUC(errors, max_error=max_error)
print ("\n****** NME statistics for " + network_type + " Network ******\n")
print ("* model path: " + model_path)
print ("* dataset: " + test_data + ' set')
print ("\n* Normalized mean error (percentage of "+norm+"): %.2f" % (100 * np.mean(errors)))
print ("\n* AUC @ %.2f: %.2f" % (max_error, 100 * auc))
print ("\n* failure rate @ %.2f: %.2f" % (max_error, 100 * failures) + '%')
if plot_ced:
plt.figure()
plt.yticks(np.linspace(0, 1, 11))
plot_cumulative_error_distribution(
list(errors),
legend_entries=[network_type],
marker_style=['s'],
marker_size=7,
x_label='Normalised Point-to-Point Error\n('+norm+')\n*' + test_data + ' set*',
)
if save_log:
with open(os.path.join(log_path, network_type.lower() + "_nme_statistics_on_" + test_data + "_set.txt"),
"wb") as f:
f.write(b"************************************************")
f.write(("\n****** NME statistics for " + str(network_type) + " Network ******\n").encode())
f.write(b"************************************************")
f.write(("\n\n* model path: " + str(model_path)).encode())
f.write(("\n\n* dataset: " + str(test_data) + ' set').encode())
f.write(b"\n\n* Normalized mean error (percentage of "+norm+"): %.2f" % (100 * np.mean(errors)))
f.write(b"\n\n* AUC @ %.2f: %.2f" % (max_error, 100 * auc))
f.write(("\n\n* failure rate @ %.2f: %.2f" % (max_error, 100 * failures) + '%').encode())
if plot_ced:
plt.savefig(os.path.join(log_path, network_type.lower() + '_nme_ced_on_' + test_data + '_set.png'),
bbox_inches='tight')
plt.close()
print ('\nlog path: ' + log_path)
def print_ced_compare_methods(
method_errors,method_names,test_data,log_path='', save_log=True, norm='interocular distance'):
plt.yticks(np.linspace(0, 1, 11))
plot_cumulative_error_distribution(
[list(err) for err in list(method_errors)],
legend_entries=list(method_names),
marker_style=['s'],
marker_size=7,
x_label='Normalised Point-to-Point Error\n('+norm+')\n*'+test_data+' set*'
)
if save_log:
plt.savefig(os.path.join(log_path,'nme_ced_on_'+test_data+'_set.png'), bbox_inches='tight')
print ('ced plot path: ' + os.path.join(log_path,'nme_ced_on_'+test_data+'_set.png'))
plt.close() | 13,143 | 4,507 |
#!/usr/bin/env python3
from DataBase_Manager.beta_manager import gather_data
import sys
experiment_path = sys.argv[1]
csv_path = sys.argv[2]
gather_data(experiment_path, save_file=csv_path) | 191 | 70 |
import requests
def info_spacex():
url = "https://api.spacexdata.com/v3/info"
request = requests.get(url)
data = request.json()
print(
"SpaceX Info ->\n"
f"• Founder : {data['founder']}\n"
f"• Founded : {data['founded']}\n"
f"• Employees : {data['employees']}\n"
f"• Vehicles : {data['vehicles']}\n"
f"• Launch Sites : {data['launch_sites']}\n"
f"• Ceo : {data['ceo']}\n"
f"• Cto : {data['cto']}\n"
f"• Coo : {data['coo']}\n"
f"• Cto Propulsion : {data['cto_propulsion']}\n"
f"• Address : {data['headquarters']['address']}\n"
f"• City : {data['headquarters']['city']}\n"
f"• State : {data['headquarters']['state']}\n\n"
f"• Summary : {data['summary']}\n"
)
| 792 | 306 |
import unittest
import numpy as np
from cctpy.baseutils import Vectors, Equal, Stream
from cctpy.constant import M, MM, YI, XI, Protons, ZI, MRAD
from cctpy.particle import RunningParticle, ParticleFactory, PhaseSpaceParticle, ParticleRunner
from cctpy.qs_hard_edge_magnet import QsHardEdgeMagnet
from cctpy.abstract_classes import LocalCoordinateSystem
from cctpy.plotuils import Plot2
class QsTest(unittest.TestCase):
def test_quad_0(self):
"""
测试 qs 四极场
Returns
-------
"""
length = 0.2 * M
aper = 30 * MM
g = 10.
L = 0
lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI)
qs = QsHardEdgeMagnet(length, g, L, aper, lc)
m = qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 0))
self.assertTrue(Equal.equal_vector(m, Vectors.create(0.0, 0.0, -0.1)))
m = qs.magnetic_field_at(Vectors.create(15 * MM, 0.1, 0))
self.assertTrue(Equal.equal_vector(m, Vectors.create(0.0, 0.0, -0.15)))
m = qs.magnetic_field_at(Vectors.create(15 * MM, 0.1, 5 * MM))
self.assertTrue(Equal.equal_vector(m, Vectors.create(-0.05, -3.061616997868383E-18, -0.15)))
def test_quad_1(self):
"""
测试 qs 四极场
Returns
-------
"""
length = 0.2 * M
aper = 30 * MM
g = -45.7
L = 0
lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI)
qs = QsHardEdgeMagnet(length, g, L, aper, lc)
m = qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 0))
self.assertTrue(Equal.equal_vector(m, Vectors.create(0.0, 0.0, 0.457)))
m = qs.magnetic_field_at(Vectors.create(15 * MM, 0.1, 0))
self.assertTrue(Equal.equal_vector(m, Vectors.create(0.0, 0.0, 0.6855)))
m = qs.magnetic_field_at(Vectors.create(15 * MM, 0.1, 5 * MM))
self.assertTrue(Equal.equal_vector(m, Vectors.create(0.2285, 1.399158968025851E-17, 0.6855)))
def test_second_0(self):
length = 0.2 * M
aper = 30 * MM
g = 0
lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI)
mx = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 0))).map(lambda m: m[0]).to_vector()
my = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 0))).map(lambda m: m[1]).to_vector()
mz = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 0))).map(lambda m: m[2]).to_vector()
self.assertTrue(Equal.equal_vector(mx, np.array([-0.0, -0.0, -0.0, -0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0])))
self.assertTrue(Equal.equal_vector(my, np.array([-0.0, -0.0, -0.0, -0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0])))
self.assertTrue(Equal.equal_vector(mz, np.array(
[-0.005, -0.0038888888888888888, -0.002777777777777778, -0.0016666666666666672, -5.555555555555558E-4,
5.555555555555558E-4, 0.001666666666666666, 0.0027777777777777775, 0.0038888888888888888, 0.005])))
def test_second_1(self):
length = 0.2 * M
aper = 30 * MM
g = 0
lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI)
mx = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 1 * MM))).map(lambda m: m[0]).to_vector()
my = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 1 * MM))).map(lambda m: m[1]).to_vector()
mz = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 1 * MM))).map(lambda m: m[2]).to_vector()
self.assertTrue(Equal.equal_vector(mx, np.array(
[-0.001, -7.777777777777777E-4, -5.555555555555557E-4, -3.3333333333333343E-4, -1.1111111111111116E-4,
1.1111111111111116E-4, 3.3333333333333316E-4, 5.555555555555554E-4, 7.777777777777777E-4, 0.001]
)))
self.assertTrue(Equal.equal_vector(my, np.array(
[-6.123233995736766E-20, -4.762515330017485E-20, -3.4017966642982043E-20, -2.0410779985789227E-20,
-6.80359332859641E-21, 6.80359332859641E-21, 2.041077998578921E-20, 3.4017966642982025E-20,
4.762515330017485E-20, 6.123233995736766E-20]
)))
self.assertTrue(Equal.equal_vector(mz, np.array(
[-0.00495, -0.00385, -0.0027500000000000003, -0.0016500000000000006, -5.500000000000002E-4,
5.500000000000002E-4, 0.0016499999999999991, 0.0027499999999999994, 0.00385, 0.00495])))
def test_second_2(self):
length = 0.2 * M
aper = 30 * MM
g = 0
lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI)
mx = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, -1 * MM))).map(lambda m: m[0]).to_vector()
my = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, -1 * MM))).map(lambda m: m[1]).to_vector()
mz = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, -1 * MM))).map(lambda m: m[2]).to_vector()
self.assertTrue(Equal.equal_vector(mx, np.array(
[0.001, 7.777777777777777E-4, 5.555555555555557E-4, 3.3333333333333343E-4, 1.1111111111111116E-4,
-1.1111111111111116E-4, -3.3333333333333316E-4, -5.555555555555554E-4, -7.777777777777777E-4, -0.001]
)))
self.assertTrue(Equal.equal_vector(my, np.array(
[6.123233995736766E-20, 4.762515330017485E-20, 3.4017966642982043E-20, 2.0410779985789227E-20,
6.80359332859641E-21, -6.80359332859641E-21, -2.041077998578921E-20, -3.4017966642982025E-20,
-4.762515330017485E-20, -6.123233995736766E-20]
)))
self.assertTrue(Equal.equal_vector(mz, np.array(
[-0.00495, -0.00385, -0.0027500000000000003, -0.0016500000000000006, -5.500000000000002E-4,
5.500000000000002E-4, 0.0016499999999999991, 0.0027499999999999994, 0.00385, 0.00495]
)))
def test_second_3(self):
length = 0.2 * M
aper = 30 * MM
g = 0
lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI)
mx = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(-5 * MM, 0.1, -1 * MM))).map(lambda m: m[0]).to_vector()
my = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(-5 * MM, 0.1, -1 * MM))).map(lambda m: m[1]).to_vector()
mz = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(-5 * MM, 0.1, -1 * MM))).map(lambda m: m[2]).to_vector()
self.assertTrue(Equal.equal_vector(mx, np.array(
[-5.0E-4, -3.8888888888888887E-4, -2.7777777777777783E-4, -1.6666666666666672E-4, -5.555555555555558E-5,
5.555555555555558E-5, 1.6666666666666658E-4, 2.777777777777777E-4, 3.8888888888888887E-4, 5.0E-4]
)))
self.assertTrue(Equal.equal_vector(my, np.array(
[-3.061616997868383E-20, -2.3812576650087424E-20, -1.7008983321491022E-20, -1.0205389992894614E-20,
-3.401796664298205E-21, 3.401796664298205E-21, 1.0205389992894605E-20, 1.7008983321491013E-20,
2.3812576650087424E-20, 3.061616997868383E-20]
)))
self.assertTrue(Equal.equal_vector(mz, np.array(
[-0.0012000000000000001, -9.333333333333333E-4, -6.666666666666668E-4, -4.0000000000000013E-4,
-1.3333333333333337E-4, 1.3333333333333337E-4, 3.999999999999998E-4, 6.666666666666665E-4,
9.333333333333333E-4, 0.0012000000000000001]
)))
def test_second_4(self):
length = 0.2 * M
aper = 30 * MM
g = 0
lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI)
mx = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(-8 * MM, 0.1, 1 * MM))).map(lambda m: m[0]).to_vector()
my = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(-8 * MM, 0.1, 1 * MM))).map(lambda m: m[1]).to_vector()
mz = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map(
lambda qs: qs.magnetic_field_at(Vectors.create(-8 * MM, 0.1, 1 * MM))).map(lambda m: m[2]).to_vector()
self.assertTrue(Equal.equal_vector(mx, np.array(
[7.999999999999999E-4, 6.222222222222221E-4, 4.444444444444444E-4, 2.6666666666666673E-4,
8.88888888888889E-5, -8.88888888888889E-5, -2.666666666666665E-4, -4.444444444444443E-4,
-6.222222222222221E-4, -7.999999999999999E-4]
)))
self.assertTrue(Equal.equal_vector(my, np.array(
[4.8985871965894125E-20, 3.8100122640139875E-20, 2.7214373314385626E-20, 1.632862398863138E-20,
5.442874662877126E-21, -5.442874662877126E-21, -1.6328623988631368E-20, -2.721437331438562E-20,
-3.8100122640139875E-20, -4.8985871965894125E-20]
)))
self.assertTrue(Equal.equal_vector(mz, np.array(
[-0.00315, -0.00245, -0.00175, -0.0010500000000000004, -3.500000000000001E-4, 3.500000000000001E-4,
0.0010499999999999995, 0.0017499999999999996, 0.00245, 0.00315]
)))
def test_quad_and_second_0(self):
length = 0.2 * M
aper = 30 * MM
lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI)
p = Vectors.create(-8 * MM, 0.1, 1 * MM)
mx = Stream.linspace(-100, 100, 10).map(
lambda k: QsHardEdgeMagnet(length, np.sin(k / 180) * 20, (1.1 ** (k / 2)) * 2, aper, lc)).map(
lambda qs: qs.magnetic_field_at(p)).map(lambda m: m[0]).to_vector()
my = Stream.linspace(-100, 100, 10).map(
lambda k: QsHardEdgeMagnet(length, np.sin(k / 180) * 20, (1.1 ** (k / 2)) * 2, aper, lc)).map(
lambda qs: qs.magnetic_field_at(p)).map(lambda m: m[1]).to_vector()
mz = Stream.linspace(-100, 100, 10).map(
lambda k: QsHardEdgeMagnet(length, np.sin(k / 180) * 20, (1.1 ** (k / 2)) * 2, aper, lc)).map(
lambda qs: qs.magnetic_field_at(p)).map(lambda m: m[2]).to_vector()
self.assertTrue(Equal.equal_vector(mx, np.array(
[0.01054817141861684, 0.008375158765307863, 0.006074168017454833, 0.0036793034142077055,
0.0012243616386317005, -0.0012609533747681091, -0.003760913727964488, -0.006301201552951284,
-0.009026933421046367, -0.012426561361512444]
)))
self.assertTrue(Equal.equal_vector(my, np.array(
[6.458892182333354E-19, 5.128305687142586E-19, 3.7193552100296426E-19, 2.2529235746506973E-19,
7.497052808745602E-20, -7.721112571419089E-20, -2.302895479410525E-19, -3.8583731563020608E-19,
-5.52740256010035E-19, -7.609074297892195E-19]
)))
self.assertTrue(Equal.equal_vector(mz, np.array(
[-0.08438592505476789, -0.06700286672870447, -0.04859794794067867, -0.029447702336309195,
-0.009833171528290597, 0.009977251489327706, 0.029769042946726516, 0.04949189248669594,
0.06956922943567483, 0.09178208545491932]
)))
def test_track_y(self):
"""
六级 qs track 对比 y 方向
Returns
-------
"""
plane = PhaseSpaceParticle.YYP_PLANE
delta = 0.
number = 6
lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI)
qs = QsHardEdgeMagnet(0.2, 0, 10000 * 2, 300000 * MM, lc)
rp = ParticleFactory.create_proton(
Vectors.create(0, -0.5, 0), YI
)
# print(f"rp={rp}")
pps = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_plane(
plane, 3.5 * MM, 7.2 * MM, delta, number
)
# print(*pps, sep='\n', end='\n\n')
pp = ParticleFactory.create_from_phase_space_particles(
rp, rp.get_natural_coordinate_system(y_direction=ZI), pps
)
# print(*pp, sep='\n\n')
ParticleRunner.run_ps_only_cpu0(pp, qs, 1.2)
ParticleRunner.run_only(rp, qs, 1.2)
# print(f"rp={rp}")
# print(*pp, sep='\n\n')
pps_end = PhaseSpaceParticle.create_from_running_particles(rp, rp.get_natural_coordinate_system(), pp)
li = PhaseSpaceParticle.phase_space_particles_project_to_plane(pps_end, plane)
li = np.array(
[[x / MM, xp / MRAD] for x, xp in li]
)
x = li[:, 0]
y = li[:, 1]
x0 = np.array(
[4.571009592873671, 13.005311328487931, 4.473631539146663, -4.5763158484424205, -13.005311328486815,
-4.473631539149022]
)
y0 = np.array(
[1.9535672206449075, 13.092945863265955, 5.607514554681223, -1.9596240807758292, -13.092945863264303,
-5.6075145546827025]
)
self.assertTrue(
(np.abs(x.flatten() - x0.flatten()) < 0.05).all()
)
self.assertTrue(
(np.abs(y.flatten() - y0.flatten()) < 0.05).all()
)
# Plot2.plot2d([(li, 'r.')])
#
# Plot2.plot2d([(np.column_stack((x0, y0)), 'k.')])
#
# Plot2.show()
def test_track_x(self):
"""
六级 QS track 对比 x 方向
Returns
-------
"""
plane = PhaseSpaceParticle.XXP_PLANE
delta = 0.
number = 6
lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI)
qs = QsHardEdgeMagnet(0.2, 0, 10000 * 2, 300000 * MM, lc)
rp = ParticleFactory.create_proton(
Vectors.create(0, -0.5, 0), YI
)
# print(f"rp={rp}")
pps = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_plane(
plane, 3.5 * MM, 7.2 * MM, delta, number
)
# print(*pps, sep='\n', end='\n\n')
pp = ParticleFactory.create_from_phase_space_particles(
rp, rp.get_natural_coordinate_system(y_direction=ZI), pps
)
# print(*pp, sep='\n\n')
ParticleRunner.run_ps_only_cpu0(pp, qs, 1.2)
ParticleRunner.run_only(rp, qs, 1.2)
# print(f"rp={rp}")
# print(*pp, sep='\n\n')
pps_end = PhaseSpaceParticle.create_from_running_particles(rp, rp.get_natural_coordinate_system(), pp)
li = PhaseSpaceParticle.phase_space_particles_project_to_plane(pps_end, plane)
li = np.array(
[[x / MM, xp / MRAD] for x, xp in li]
)
x = li[:, 0]
y = li[:, 1]
x0 = np.array(
[-1.6363082716640025, -2.964662344582841, 3.848704911140664, -10.799631136000919, -29.05411958099093,
-5.103782688758285]
)
y0 = np.array(
[-8.439377477318738, -14.025193841237206, 4.508357473356099, -12.381715740031598, -40.36236303026269,
-6.717141236931342]
)
self.assertTrue(
(np.abs(x.flatten() - x0.flatten()) < 0.05).all()
)
self.assertTrue(
(np.abs(y.flatten() - y0.flatten()) < 0.05).all()
)
# Plot2.plot2d([(li, 'r.')])
#
# Plot2.plot2d([(np.column_stack((x0, y0)), 'k.')])
#
# Plot2.show()
if __name__ == '__main__':
unittest.main()
| 16,430 | 8,833 |
from __future__ import annotations
import os
import sys
if __name__ == '__main__':
# Ensure that the parent directory is not in sys.path.
norm = lambda x: os.path.normpath(os.path.abspath(x))
dirname = os.path.dirname(norm(__file__))
sys.path[:] = [x for x in sys.path if norm(x) != dirname]
del norm, dirname
import ctypes
import io
import json
import re
import shlex
import shutil
import subprocess
import tempfile
import traceback
import typing as t
if os.name == 'nt':
import ctypes.wintypes as wintypes
windll = ctypes.windll # type: ignore
WinError = ctypes.WinError # type: ignore
get_last_error = ctypes.get_last_error # type: ignore
class winapi:
_WaitForSingleObject = windll.kernel32.WaitForSingleObject
_WaitForSingleObject.restype = wintypes.DWORD
_WaitForSingleObject.argtypes = [wintypes.HANDLE, wintypes.DWORD]
@staticmethod
def WaitForSingleObject(handle, ms=0):
return winapi._WaitForSingleObject(handle, ms)
_GetExitCodeProcess = windll.kernel32.GetExitCodeProcess
_GetExitCodeProcess.restype = wintypes.BOOL
_GetExitCodeProcess.argtypes = [wintypes.HANDLE, ctypes.POINTER(wintypes.DWORD)]
@staticmethod
def GetExitCodeProcess(handle):
result = wintypes.DWORD()
success = winapi._GetExitCodeProcess(handle, ctypes.byref(result))
if not success:
raise WinError(get_last_error())
return result.value
_MessageBox = windll.user32.MessageBoxW
_MessageBox.restype = ctypes.c_int
_MessageBox.argtypes = [wintypes.HWND, wintypes.LPWSTR, wintypes.LPWSTR, wintypes.UINT]
@staticmethod
def MessageBox(hwnd, text, caption, type):
return winapi._MessageBox(hwnd, text, caption, type)
class _SHELLEXECUTEINFO(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('fMask', wintypes.ULONG),
('hwnd', wintypes.HWND),
('lpVerb', wintypes.LPCSTR),
('lpFile', wintypes.LPCSTR),
('lpParameters', wintypes.LPCSTR),
('lpDirectory', wintypes.LPCSTR),
('nShow', ctypes.c_int),
('hInstApp', wintypes.HINSTANCE),
('lpIDList', wintypes.LPVOID),
('lpClass', wintypes.LPCSTR),
('hkeyClass', wintypes.HKEY),
('dwHotKey', wintypes.DWORD),
('DUMMYUNIONNAME', wintypes.HANDLE),
('hProcess', wintypes.HANDLE),
]
_ShellExecuteEx = windll.shell32.ShellExecuteEx
_ShellExecuteEx.restype = wintypes.BOOL
_ShellExecuteEx.argtypes = [ctypes.POINTER(_SHELLEXECUTEINFO)]
SW_HIDE = 0
SW_MAXIMIMIZE = 3
SW_MINIMIZE = 6
SW_RESTORE = 9
SW_SHOW = 5
SW_SHOWDEFAULT = 10
SW_SHOWMAXIMIZED = 3
SW_SHOWMINIMIZED = 2
SW_SHOWMINNOACTIVE = 7
SW_SHOWNA = 8
SW_SHOWNOACTIVE = 4
SW_SHOWNORMAL = 1
@staticmethod
def ShellExecuteEx(hwnd=None, verb='', file='', parameters=None,
directory=None, show=SW_SHOW, mask=0): # TODO: More parameters
data = winapi._SHELLEXECUTEINFO()
data.cbSize = ctypes.sizeof(data)
data.fMask = mask
data.hwnd = hwnd
data.lpVerb = verb.encode()
data.lpFile = file.encode()
data.lpParameters = parameters.encode()
data.lpDirectory = directory.encode()
data.nShow = show
data.hInstApp = None
data.lpIDList = None
data.lpClass = None
data.hkeyClass = None
data.dwHotKey = 0
data.DUMMYUNIONNAME = None
data.hProcess = None
result = winapi._ShellExecuteEx(ctypes.byref(data))
if not result:
raise WinError(get_last_error())
return {'hInstApp': data.hInstApp, 'hProcess': data.hProcess}
def alert(*msg: str) -> None:
# TODO (@NiklasRosenstein): Support GUI alerts for other systems.
message = ' '.join(map(str, msg))
print(message, file=sys.stderr)
sys.stderr.flush()
if os.name == 'nt':
winapi.MessageBox(None, message, "Python", 0)
def quote(s: str) -> str:
if os.name == 'nt' and os.sep == '\\':
s = s.replace('"', '\\"')
if re.search(r'\s', s) or any(c in s for c in '<>'):
s = '"' + s + '"'
else:
s = shlex.quote(s)
return s
def is_root() -> bool:
if os.name == 'nt':
try:
return bool(windll.shell32.IsUserAnAdmin())
except:
traceback.print_exc()
print("ctypes.windll.shell32.IsUserAnAdmin() failed -- "
"assuming not an admin.", file=sys.stderr)
sys.stderr.flush()
return False
elif os.name == 'posix':
return os.getuid() == 0
else:
raise RuntimeError('Unsupported os: {!r}'.format(os.name))
def elevate(command: str | list[str], cwd: str | None = None, environ: t.Mapping[str, str] | None = None) -> None:
"""
Runs a command as an admin in the specified *cwd* and *environ*. On Windows, this creates a temporary directory where
this information is stored temporarily so that the new process can launch the proper subprocess.
"""
if isinstance(command, str):
command = shlex.split(command)
if os.name == 'nt':
return _elevate_windows(command, cwd, environ)
elif os.name == 'posix':
command = ['sudo', '-E'] + list(command)
sys.exit(subprocess.call(command))
else:
raise RuntimeError('Unsupported os: {!r}'.format(os.name))
def _elevate_windows(command, cwd, environ):
assert os.name == 'nt'
datadir = tempfile.mkdtemp()
try:
# TODO: Maybe we could also use named pipes and transfer them
# via the processdata.json to the elevated process.
# This file will receive all the process information.
datafile = os.path.join(datadir, 'processdata.json')
data = {
'command': command,
'cwd': cwd or os.getcwd(),
'environ': environ or os.environ.copy(),
'outfile': os.path.join(datadir, 'out.bin')
}
with open(datafile, 'w') as fp:
json.dump(data, fp)
# Ensure the output file exists.
open(data['outfile'], 'w').close()
# Create the windows elevated process that calls this file. This
# file will then know what to do with the information from the
# process data directory.
hProc = winapi.ShellExecuteEx(
file=sys.executable,
verb='runas',
parameters=' '.join(map(quote, [os.path.abspath(__file__), '--windows-process-data', datadir])),
directory=datadir,
mask=64,
show=winapi.SW_HIDE
)['hProcess']
# Read the output from the process and write it to our stdout.
with open(data['outfile'], 'rb+', 0) as outfile:
while True:
hr = winapi.WaitForSingleObject(hProc, 40)
while True:
line = outfile.readline()
if not line: break
sys.stdout.buffer.write(line)
if hr != 0x102: break
return winapi.GetExitCodeProcess(hProc)
finally:
try:
shutil.rmtree(datadir)
except:
print("ERROR: Unable to remove data directory of elevated process.")
print("ERROR: Directory at \"{}\"".format(datadir))
traceback.print_exc()
def _elevate_windows_elevated(datadir):
assert os.name == 'nt'
datafile = os.path.join(datadir, 'processdata.json')
with open(datafile, 'r') as pdata_fp:
data = json.load(pdata_fp)
try:
with open(data['outfile'], 'wb', 0) as fp:
sys.stderr = sys.stdout = io.TextIOWrapper(fp)
os.environ.update(data['environ'])
return subprocess.call(data['command'], cwd=data['cwd'], stdout=fp, stderr=fp)
except:
alert(traceback.format_exc())
sys.exit(1)
def main(argv=None, prog=None):
import argparse
parser = argparse.ArgumentParser(prog=prog)
parser.add_argument('--windows-process-data',
help='The path to a Windows process data directory. This is used to '
'provide data for the elevated process since no environment variables '
'can be via ShellExecuteEx().')
args, unknown = parser.parse_known_args(argv)
if args.windows_process_data:
if not is_root():
alert("--windows-process-data can only be used in an elevated process.")
sys.exit(1)
sys.exit(_elevate_windows_elevated(args.windows_process_data))
elif unknown:
elevate(unknown)
sys.exit()
else:
parser.print_usage()
_entry_point = lambda: sys.exit(main())
if __name__ == '__main__':
_entry_point()
| 8,238 | 2,872 |
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
from sdh.metrics.org import app, st as store
from sdh.metrics.server import ORG, SCM, CI, APIError
import calendar
from datetime import datetime
__author__ = 'Alejandro F. Carrera'
def get_average_list(l):
return reduce(lambda x, y: x + y, l) / len(l)
def get_correct_kwargs(kwargs):
args = {
'begin': 0 if kwargs.get('begin') is None else kwargs.get('begin'),
'end': calendar.timegm(datetime.now().timetuple())
if kwargs.get('end') is None else kwargs.get('end'),
'max': 0 if kwargs.get('max') is None else kwargs.get('max')
}
if args['max'] == 0:
args['step'] = 86400
else:
args['step'] = (args.get('end') - args.get('begin')) / args.get('max')
return args
def detect_overlap_date(a_begin, a_end, b_begin, b_end):
return (
(int(a_begin) <= int(b_begin)) and (int(a_end) >= int(b_end)) # contains
) or (
(int(a_begin) >= int(b_begin)) and (int(a_end) <= int(b_end)) # contains
) or (
(int(a_begin) <= int(b_begin)) and (int(b_begin) <= int(a_end)) # shift right
) or (
(int(a_begin) <= int(b_end)) and (int(b_end) <= int(a_end)) # shift left
)
def detect_project_repositories_overlap(uri, args):
temp_frame = store.get_project_temporal_frame(uri)
return detect_overlap_date(
args.get('begin'), args.get('end'),
temp_frame.get('first_commit'), temp_frame.get('last_commit')
)
def get_external_position_metric(uid, endpoint, position, aggregate, args, flag):
try:
pr = get_position_products(uid, args, position, flag)
pr_res = []
if args['begin'] == 0:
args['begin'] = None
tmp_arg = args
if flag:
if aggregate == 'sum':
tmp_frame = store.get_specific_products_temporal_frame(pr)
tmp_arg['begin'] = tmp_frame.get('first_commit')
tmp_arg['end'] = tmp_frame.get('last_commit')
pr_res = map(
lambda x: app.request_metric(endpoint, prid=x.get('id'), **tmp_arg), pr
)
else:
for k in pr:
pr_temp_frame = store.get_product_temporal_frame(k.get('uri'))
tmp_arg['begin'] = pr_temp_frame.get('first_commit')
tmp_arg['end'] = pr_temp_frame.get('last_commit')
pr_res.append(app.request_metric(endpoint, prid=k.get('id'), **tmp_arg))
else:
pr_res = map(lambda k: app.request_metric(endpoint, prid=k.get('id'), **tmp_arg), pr)
if len(pr_res):
context = pr_res[0][0]
else:
context = args
v = zip(*map(lambda x: x[1], pr_res))
if aggregate == 'avg':
res = [get_average_list(x) for x in v]
else:
res = [sum(x) for x in v]
return context, res
except (EnvironmentError, AttributeError) as e:
raise APIError(e.message)
return args, []
def get_position_repositories(uid, args, position, flag_total, only_uris):
positions_id = store.get_all_members_id(position)
if uid not in positions_id:
return []
else:
projects = store.get_all_member_projects(positions_id[uid])
res_prj = set()
res = []
for x in projects:
repos = store.get_all_project_repositories(x)
if not flag_total:
for k in repos:
rep_info = store.db.hgetall(k)
if detect_overlap_date(
args.get('begin'), args.get('end'),
rep_info.get('first_commit'), rep_info.get('last_commit')
):
res_prj.add(k)
if only_uris:
return res_prj
else:
[res.append({
'id': store.db.hgetall(x).get('id'),
'uri': x
}) for x in res_prj]
return res
def get_position_projects(uid, args, position, flag_total, only_uris):
positions_id = store.get_all_members_id(position)
if uid not in positions_id:
return []
else:
projects = store.get_all_member_projects(positions_id[uid])
if not flag_total:
res_prj = set()
for x in projects:
if detect_project_repositories_overlap(x, args):
res_prj.add(x)
projects = list(res_prj)
res = []
if only_uris:
return projects
else:
[res.append({
'id': store.db.get(x),
'uri': x
}) for x in projects]
return res
def get_position_products(uid, args, position, flag_total):
pr = get_position_projects(uid, args, position, flag_total, False)
pro = set()
res = []
for x in pr:
pro = pro.union(set(store.get_all_project_products(x.get('uri'))))
[res.append({
'id': store.db.get(x),
'uri': x
}) for x in pro]
return res
def get_position_position(uid, args, fil, position, flag_total):
pr = set(get_position_projects(uid, args, fil, flag_total, True))
members = store.get_all_members(position)
members_dir = set()
res = []
for x in members:
if len(pr.intersection(set(store.get_all_member_projects(x)))) > 0:
members_dir.add(x)
[res.append({
'id': store.db.hgetall(x).get("id"),
'uri': x
}) for x in members_dir]
return res
def get_director_position(uid, args, position, flag_total):
return get_position_position(uid, args, 'directors', position, flag_total)
def get_pmanager_position(uid, args, position, flag_total):
return get_position_position(uid, args, 'productmanagers', position, flag_total)
def get_project_roles(pjid, args, role, flag_total):
projects_id = store.get_all_projects_id()
if pjid not in projects_id:
return []
else:
if not flag_total and not detect_project_repositories_overlap(projects_id[pjid], args):
return []
if role == "softwaredeveloper":
tmp_arg = args
if not flag_total:
pr_temp_frame = store.get_project_temporal_frame(projects_id[pjid])
tmp_arg['begin'] = pr_temp_frame.get('first_commit')
tmp_arg['end'] = pr_temp_frame.get('last_commit')
co, res = app.request_view('project-developers', pjid=pjid, **tmp_arg)
return res
else:
res = set()
users_id = store.get_all_members(role)
for x in users_id:
pr_res = store.get_all_member_projects(x)
if projects_id[pjid] in pr_res:
res.add(x)
res_set = []
[res_set.append({
'id': store.db.hgetall(x).get("id"),
'uri': x
}) for x in res]
return res_set
def get_director_roles(uid, args, role, flag_total):
return get_position_position(uid, args, 'directors', role, flag_total)
def get_pmanager_roles(uid, args, role, flag_total):
return get_position_position(uid, args, 'productmanagers', role, flag_total)
def helper_get_director_pmanagers(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_director_position(uid, args, 'productmanagers', flag_total)
def helper_get_director_architects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_director_position(uid, args, 'architects', flag_total)
def helper_get_pmanager_architects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_pmanager_position(uid, args, 'architects', flag_total)
def helper_get_position_developers(uid, position, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
try:
res = set()
pr = get_position_products(uid, args, position, flag_total)
devs = map(lambda k: app.request_view('product-developers', prid=k.get('id'), **kwargs), pr)
[[res.add(j.get('uri')) for j in x] for x in map(lambda x: x[1], devs)]
res_devs = []
[res_devs.append({
"id": store.db.hgetall(x).get("id"),
"uri": x
}) for x in res]
return args, res_devs
except (EnvironmentError, AttributeError) as e:
raise APIError(e.message)
return args, []
@app.view('/product-projects', target=ORG.Project, parameters=[ORG.Product],
id='product-projects', title='Projects of Product')
def get_product_projects(prid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
products_id = store.get_all_products_id()
if prid not in products_id:
return args, []
else:
projects = store.get_all_product_projects(products_id[prid])
if not flag_total:
res_prj = set()
for x in projects:
if detect_project_repositories_overlap(x, args):
res_prj.add(x)
projects = list(res_prj)
res = []
[res.append({
'id': store.db.get(x),
'uri': x
}) for x in projects]
return args, res
@app.view('/project-repositories', target=SCM.Repository, parameters=[ORG.Project],
id='project-repositories', title='Repositories of Project')
def get_project_repositories(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
projects_id = store.get_all_projects_id()
if pjid not in projects_id:
return args, []
else:
repos = store.get_all_project_repositories(projects_id[pjid])
if not flag_total:
res_prj = set()
for k in repos:
rep_info = store.db.hgetall(k)
if detect_overlap_date(
args.get('begin'), args.get('end'),
rep_info.get('first_commit'), rep_info.get('last_commit')
):
res_prj.add(k)
repos = res_prj
res = []
[res.append({
'id': store.db.hgetall(x).get('id'),
'uri': x
}) for x in repos]
return args, res
@app.metric('/total-project-stakeholders', parameters=[ORG.Project],
id='project-stakeholders', title='Stakeholders of Project')
def get_total_project_stakeholders(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_project_roles(pjid, args, 'stakeholder', flag_total))]
@app.view('/project-stakeholders', target=ORG.Person, parameters=[ORG.Project],
id='project-stakeholders', title='Stakeholders of Project')
def get_project_stakeholders(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_project_roles(pjid, args, 'stakeholder', flag_total)
@app.metric('/total-project-swarchitects', parameters=[ORG.Project],
id='project-swarchitects', title='Software Architects of Project')
def get_total_project_swarchitects(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_project_roles(pjid, args, 'softwarearchitect', flag_total))]
@app.view('/project-swarchitects', target=ORG.Person, parameters=[ORG.Project],
id='project-swarchitects', title='Software Architects of Project')
def get_project_swarchitects(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_project_roles(pjid, args, 'softwarearchitect', flag_total)
@app.metric('/total-project-pjmanagers', parameters=[ORG.Project],
id='project-pjmanagers', title='Project Managers of Project')
def get_total_project_pjmanagers(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_project_roles(pjid, args, 'projectmanager', flag_total))]
@app.view('/project-pjmanagers', target=ORG.Person, parameters=[ORG.Project],
id='project-pjmanagers', title='Project Managers of Project')
def get_project_pjmanagers(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_project_roles(pjid, args, 'projectmanager', flag_total)
@app.metric('/total-project-swdevelopers', parameters=[ORG.Project],
id='project-swdevelopers', title='Software Developers of Project')
def get_total_project_swdevelopers(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_project_roles(pjid, args, 'softwaredeveloper', flag_total))]
@app.view('/project-swdevelopers', target=ORG.Person, parameters=[ORG.Project],
id='project-swdevelopers', title='Software Developers of Project')
def get_project_swdevelopers(pjid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_project_roles(pjid, args, 'softwaredeveloper', flag_total)
@app.metric('/total-director-repositories', parameters=[ORG.Person],
id='director-repositories', title='Repositories of Director')
def get_total_director_repositories(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_position_repositories(uid, args, 'directors', flag_total, False))]
@app.view('/director-repositories', target=SCM.Repository, parameters=[ORG.Person],
id='director-repositories', title='Repositories of Director')
def get_director_repositories(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_position_repositories(uid, args, 'directors', flag_total, False)
@app.metric('/total-director-projects', parameters=[ORG.Person],
id='director-projects', title='Projects of Director')
def get_total_director_projects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_position_projects(uid, args, 'directors', flag_total, False))]
@app.view('/director-projects', target=ORG.Project, parameters=[ORG.Person],
id='director-projects', title='Projects of Director')
def get_director_projects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_position_projects(uid, args, 'directors', flag_total, False)
@app.metric('/total-architect-projects', parameters=[ORG.Person],
id='architect-projects', title='Projects of Architect')
def get_total_architects_projects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_position_projects(uid, args, 'architects', flag_total, False))]
@app.view('/architect-projects', target=ORG.Project, parameters=[ORG.Person],
id='architect-projects', title='Projects of Architect')
def get_architect_projects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_position_projects(uid, args, 'architects', flag_total, False)
@app.metric('/total-pmanager-projects', parameters=[ORG.Person],
id='pmanager-projects', title='Projects of Product Manager')
def get_total_manager_projects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_position_projects(uid, args, 'productmanagers', flag_total, False))]
@app.view('/pmanager-projects', target=ORG.Project, parameters=[ORG.Person],
id='pmanager-projects', title='Projects of Product Manager')
def get_manager_projects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_position_projects(uid, args, 'productmanagers', flag_total, False)
@app.metric('/total-director-products', parameters=[ORG.Person],
id='director-products', title='Products of Director')
def get_total_director_products(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_position_products(uid, args, 'directors', flag_total))]
@app.view('/director-products', target=ORG.Product, parameters=[ORG.Person],
id='director-products', title='Products of Director')
def get_director_products(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_position_products(uid, args, 'directors', flag_total)
@app.metric('/total-architect-products', parameters=[ORG.Person],
id='architects-products', title='Products of Architect')
def get_total_architect_products(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_position_products(uid, args, 'architects', flag_total))]
@app.view('/architect-products', target=ORG.Product, parameters=[ORG.Person],
id='architects-products', title='Products of Architect')
def get_architect_products(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_position_products(uid, args, 'architects', flag_total)
@app.metric('/total-pmanager-repositories', parameters=[ORG.Person],
id='pmanager-repositories', title='Repositories of Product Manager')
def get_total_pmanager_repositories(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_position_repositories(uid, args, 'productmanagers', flag_total, False))]
@app.view('/pmanager-repositories', target=SCM.Repository, parameters=[ORG.Person],
id='pmanager-repositories', title='Repositories of Product Manager')
def get_pmanager_repositories(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_position_repositories(uid, args, 'productmanagers', flag_total, False)
@app.metric('/total-pmanager-products', parameters=[ORG.Person],
id='pmanager-products', title='Products of Product Manager')
def get_total_manager_products(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_position_products(uid, args, 'productmanagers', flag_total))]
@app.view('/pmanager-products', target=ORG.Product, parameters=[ORG.Person],
id='pmanager-products', title='Products of Product Manager')
def get_manager_products(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_position_products(uid, args, 'productmanagers', flag_total)
@app.metric('/total-director-productmanagers', parameters=[ORG.Person],
id='director-productmanagers', title='Product Managers of Director')
def get_total_director_pmanagers(uid, **kwargs):
co, res = helper_get_director_pmanagers(uid, **kwargs)
return co, [len(res)]
@app.view('/director-productmanagers', target=ORG.Person, parameters=[ORG.Person],
id='director-productmanagers', title='Product Managers of Director')
def get_director_pmanagers(uid, **kwargs):
return helper_get_director_pmanagers(uid, **kwargs)
@app.metric('/total-director-architects', parameters=[ORG.Person],
id='director-architects', title='Architects of Director')
def get_total_director_architects(uid, **kwargs):
co, res = helper_get_director_architects(uid, **kwargs)
return co, [len(res)]
@app.view('/director-architects', target=ORG.Person, parameters=[ORG.Person],
id='director-architects', title='Architects of Director')
def get_director_architects(uid, **kwargs):
return helper_get_director_architects(uid, **kwargs)
@app.metric('/total-director-developers', parameters=[ORG.Person],
id='director-developers', title='Developers of Director')
def get_total_director_developers(uid, **kwargs):
co, res = helper_get_position_developers(uid, 'directors', **kwargs)
return co, [len(res)]
@app.view('/director-developers', target=ORG.Person, parameters=[ORG.Person],
id='director-developers', title='Developers of Director')
def get_director_developers(uid, **kwargs):
return helper_get_position_developers(uid, 'directors', **kwargs)
@app.metric('/total-director-stakeholders', parameters=[ORG.Person],
id='director-stakeholders', title='Stakeholders of Director')
def get_total_director_stakeholders(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_director_roles(uid, args, 'stakeholder', flag_total))]
@app.view('/director-stakeholders', target=ORG.Person, parameters=[ORG.Person],
id='director-stakeholders', title='Stakeholders of Director')
def get_director_stakeholders(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_director_roles(uid, args, 'stakeholder', flag_total)
@app.metric('/total-director-swarchitects', parameters=[ORG.Person],
id='director-swarchitects', title='Software Architects of Director')
def get_total_director_swarchitects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_director_roles(uid, args, 'softwarearchitect', flag_total))]
@app.view('/director-swarchitects', target=ORG.Person, parameters=[ORG.Person],
id='director-swarchitects', title='Software Architects of Director')
def get_director_swarchitects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_director_roles(uid, args, 'softwarearchitect', flag_total)
@app.metric('/total-director-swdevelopers', parameters=[ORG.Person],
id='director-swdevelopers', title='Software Developers of Director')
def get_total_director_swdevelopers(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_director_roles(uid, args, 'softwaredeveloper', flag_total))]
@app.view('/director-swdevelopers', target=ORG.Person, parameters=[ORG.Person],
id='director-swdevelopers', title='Software Developers of Director')
def get_director_swdevelopers(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_director_roles(uid, args, 'softwaredeveloper', flag_total)
@app.metric('/total-director-pjmanagers', parameters=[ORG.Person],
id='director-pjmanagers', title='Project Managers of Director')
def get_total_director_pjmanagers(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_director_roles(uid, args, 'projectmanager', flag_total))]
@app.view('/director-pjmanagers', target=ORG.Person, parameters=[ORG.Person],
id='director-pjmanagers', title='Project Managers of Director')
def get_director_pjmanagers(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_director_roles(uid, args, 'projectmanager', flag_total)
@app.metric('/total-director-members', parameters=[ORG.Person],
id='director-members', title='Members below Director')
def get_total_director_members(uid, **kwargs):
res = {}
co, pm = helper_get_director_pmanagers(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in pm]
co, ar = helper_get_director_architects(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in ar]
co, dev = helper_get_position_developers(uid, 'directors', **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in dev]
return co, [len(res.keys())]
@app.view('/director-members', target=ORG.Person, parameters=[ORG.Person],
id='director-members', title='Members below Director')
def get_director_members(uid, **kwargs):
res = {}
co, pm = helper_get_director_pmanagers(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in pm]
co, ar = helper_get_director_architects(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in ar]
co, dev = helper_get_position_developers(uid, 'directors', **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in dev]
res_mem = []
[res_mem.append({
"id": x,
"uri": res[x]
}) for x in res.keys()]
return co, res_mem
@app.metric('/director-productmembers', aggr='avg', parameters=[ORG.Person],
id='director-productmembers', title='Product Members AVG of Director')
def get_avg_director_productmembers(uid, **kwargs):
res = {}
co, pm = helper_get_director_pmanagers(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in pm]
co, ar = helper_get_director_architects(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in ar]
co, dev = helper_get_position_developers(uid, 'directors', **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in dev]
res_mem = len(res.keys())
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
res_pr = len(get_position_products(uid, args, 'directors', flag_total))
if res_pr == 0:
return co, [0]
return co, [float(res_mem) / float(res_pr)]
@app.metric('/director-productrepositories', aggr='avg', parameters=[ORG.Person],
id='director-productrepositories', title='Product Repositories AVG of Director')
def get_avg_director_productrepositories(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
res_rep = len(get_position_repositories(uid, args, 'directors', flag_total, True))
res_pr = len(get_position_products(uid, args, 'directors', flag_total))
if res_pr == 0:
return args, [0]
return args, [float(res_rep) / float(res_pr)]
@app.metric('/director-projectmembers', aggr='avg', parameters=[ORG.Person],
id='director-projectmembers', title='Project Members AVG of Director')
def get_avg_director_projectmembers(uid, **kwargs):
res = {}
co, pm = helper_get_director_pmanagers(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in pm]
co, ar = helper_get_director_architects(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in ar]
co, dev = helper_get_position_developers(uid, 'directors', **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in dev]
res_mem = len(res.keys())
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
res_pr = len(get_position_projects(uid, args, 'directors', flag_total, True))
if res_pr == 0:
return co, [0]
return co, [float(res_mem) / float(res_pr)]
@app.metric('/director-projectrepositories', aggr='avg', parameters=[ORG.Person],
id='director-projectrepositories', title='Project Repositories AVG of Director')
def get_avg_director_projectrepositories(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
res_rep = len(get_position_repositories(uid, args, 'directors', flag_total, True))
res_pr = len(get_position_projects(uid, args, 'directors', flag_total, True))
if res_pr == 0:
return args, [0]
return args, [float(res_rep) / float(res_pr)]
@app.metric('/director-activity', parameters=[ORG.Person],
id='director-activity', title='Activity of Director')
def get_director_activity(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
co, res = get_external_position_metric(uid, 'sum-product-activity', 'directors', 'sum', args, flag_total)
res_makeup = []
if len(res):
res_max = max(res)
[res_makeup.append(float(x)/res_max) for x in res]
return co, res_makeup
@app.metric('/director-quality', aggr='avg', parameters=[ORG.Person],
id='director-quality', title='Quality of Director')
def get_director_quality(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return get_external_position_metric(uid, 'sum-product-quality', 'directors', 'avg', args, flag_total)
@app.metric('/director-health', aggr='avg', parameters=[ORG.Person],
id='director-health', title='Health of Director')
def get_director_health(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return get_external_position_metric(uid, 'sum-product-health', 'directors', 'avg', args, flag_total)
@app.metric('/director-costs', parameters=[ORG.Person],
id='director-costs', title='Costs of Director')
def get_director_costs(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return get_external_position_metric(uid, 'sum-product-cost', 'directors', 'sum', args, flag_total)
@app.metric('/director-externals', parameters=[ORG.Person],
id='director-externals', title='External Committers from Products of Director')
def get_director_externals(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return get_external_position_metric(uid, 'sum-product-externals', 'directors', 'sum', args, flag_total)
@app.metric('/director-timetomarket', aggr='avg', parameters=[ORG.Person],
id='director-timetomarket', title='Time To Market from Products of Director')
def get_director_timetomarket(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return get_external_position_metric(uid, 'sum-product-timetomarket', 'directors', 'avg', args, flag_total)
@app.metric('/total-pmanager-architects', parameters=[ORG.Person],
id='pmanager-architects', title='Architects of Product Manager')
def get_total_pmanager_architects(uid, **kwargs):
co, res = helper_get_pmanager_architects(uid, **kwargs)
return co, [len(res)]
@app.view('/pmanager-architects', target=ORG.Person, parameters=[ORG.Person],
id='pmanager-architects', title='Architects of Product Manager')
def get_pmanager_architects(uid, **kwargs):
return helper_get_pmanager_architects(uid, **kwargs)
@app.metric('/total-pmanager-developers', parameters=[ORG.Person],
id='pmanager-developers', title='Developers of Product Manager')
def get_total_pmanager_developers(uid, **kwargs):
co, res = helper_get_position_developers(uid, 'productmanagers', **kwargs)
return co, [len(res)]
@app.view('/pmanager-developers', target=ORG.Person, parameters=[ORG.Person],
id='pmanager-developers', title='Developers of Product Manager')
def get_pmanager_developers(uid, **kwargs):
return helper_get_position_developers(uid, 'productmanagers', **kwargs)
@app.metric('/total-pmanager-stakeholders', parameters=[ORG.Person],
id='pmanager-stakeholders', title='Stakeholders of Product Manager')
def get_total_pmanager_stakeholders(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_pmanager_roles(uid, args, 'stakeholder', flag_total))]
@app.view('/pmanager-stakeholders', target=ORG.Person, parameters=[ORG.Person],
id='pmanager-stakeholders', title='Stakeholders of Product Manager')
def get_pmanager_stakeholders(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_pmanager_roles(uid, args, 'stakeholder', flag_total)
@app.metric('/total-pmanager-swarchitects', parameters=[ORG.Person],
id='pmanager-swarchitects', title='Software Architects of Product Manager')
def get_total_pmanager_swarchitects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_pmanager_roles(uid, args, 'softwarearchitect', flag_total))]
@app.view('/pmanager-swarchitects', target=ORG.Person, parameters=[ORG.Person],
id='pmanager-swarchitects', title='Software Architects of Product Manager')
def get_pmanager_swarchitects(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_pmanager_roles(uid, args, 'softwarearchitect', flag_total)
@app.metric('/total-pmanager-swdevelopers', parameters=[ORG.Person],
id='pmanager-swdevelopers', title='Software Developers of Product Manager')
def get_total_pmanager_swdevelopers(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_pmanager_roles(uid, args, 'softwaredeveloper', flag_total))]
@app.view('/pmanager-swdevelopers', target=ORG.Person, parameters=[ORG.Person],
id='pmanager-swdevelopers', title='Software Developers of Product Manager')
def get_pmanager_swdevelopers(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_pmanager_roles(uid, args, 'softwaredeveloper', flag_total)
@app.metric('/total-pmanager-pjmanagers', parameters=[ORG.Person],
id='pmanager-pjmanagers', title='Project Managers of Product Manager')
def get_total_pmanager_pjmanagers(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, [len(get_pmanager_roles(uid, args, 'projectmanager', flag_total))]
@app.view('/pmanager-pjmanagers', target=ORG.Person, parameters=[ORG.Person],
id='pmanager-pjmanagers', title='Project Managers of Product Manager')
def get_pmanager_pjmanagers(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return args, get_pmanager_roles(uid, args, 'projectmanager', flag_total)
@app.metric('/total-pmanager-members', parameters=[ORG.Person],
id='pmanager-members', title='Members below Product Manager')
def get_total_pmanager_members(uid, **kwargs):
res = {}
co, ar = helper_get_pmanager_architects(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in ar]
co, dev = helper_get_position_developers(uid, 'productmanagers', **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in dev]
return co, [len(res.keys())]
@app.metric('/pmanager-productrepositories', aggr='avg', parameters=[ORG.Person],
id='pmanager-productrepositories', title='Product Repositories AVG of Product Manager')
def get_avg_pmanager_productrepositories(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
res_rep = len(get_position_repositories(uid, args, 'productmanagers', flag_total, True))
res_pr = len(get_position_products(uid, args, 'productmanagers', flag_total))
if res_pr == 0:
return args, [0]
return args, [float(res_rep) / float(res_pr)]
@app.metric('/pmanager-productmembers', aggr='avg', parameters=[ORG.Person],
id='pmanager-productmembers', title='Product Members AVG of Product Manager')
def get_avg_pmanager_productmembers(uid, **kwargs):
res = {}
co, ar = helper_get_pmanager_architects(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in ar]
co, dev = helper_get_position_developers(uid, 'productmanagers', **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in dev]
res_mem = len(res.keys())
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
res_pr = len(get_position_products(uid, args, 'productmanagers', flag_total))
if res_pr == 0:
return co, [0]
return co, [float(res_mem) / float(res_pr)]
@app.metric('/pmanager-projectrepositories', aggr='avg', parameters=[ORG.Person],
id='pmanager-projectrepositories', title='Project Repositories AVG of Product Manager')
def get_avg_pmanager_projectrepositories(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
res_rep = len(get_position_repositories(uid, args, 'productmanagers', flag_total, True))
res_pr = len(get_position_projects(uid, args, 'productmanagers', flag_total, True))
if res_pr == 0:
return args, [0]
return args, [float(res_rep) / float(res_pr)]
@app.metric('/pmanager-projectmembers', aggr='avg', parameters=[ORG.Person],
id='pmanager-projectmembers', title='Project Members AVG of Product Manager')
def get_avg_pmanager_projectmembers(uid, **kwargs):
res = {}
co, ar = helper_get_pmanager_architects(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in ar]
co, dev = helper_get_position_developers(uid, 'productmanagers', **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in dev]
res_mem = len(res.keys())
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
res_pr = len(get_position_projects(uid, args, 'productmanagers', flag_total, True))
if res_pr == 0:
return co, [0]
return co, [float(res_mem) / float(res_pr)]
@app.view('/pmanager-members', target=ORG.Person, parameters=[ORG.Person],
id='pmanager-members', title='Members below Product Manager')
def get_pmanager_members(uid, **kwargs):
res = {}
co, ar = helper_get_pmanager_architects(uid, **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in ar]
co, dev = helper_get_position_developers(uid, 'productmanagers', **kwargs)
[res.update({x.get('id'): x.get('uri')}) for x in dev]
res_mem = []
[res_mem.append({
"id": x,
"uri": res[x]
}) for x in res.keys()]
return co, res_mem
@app.metric('/pmanager-activity', parameters=[ORG.Person],
id='pmanager-activity', title='Activity of Product Manager')
def get_pmanager_activity(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
co, res = get_external_position_metric(uid, 'sum-product-activity', 'productmanagers', 'sum', args, flag_total)
res_makeup = []
if len(res):
res_max = max(res)
[res_makeup.append(float(x)/res_max) for x in res]
return co, res_makeup
@app.metric('/pmanager-quality', aggr='avg', parameters=[ORG.Person],
id='pmanager-quality', title='Quality of Product Manager')
def get_pmanager_quality(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return get_external_position_metric(uid, 'sum-product-quality', 'productmanagers', 'avg', args, flag_total)
@app.metric('/pmanager-health', aggr='avg', parameters=[ORG.Person],
id='pmanager-health', title='Health of Product Manager')
def get_pmanager_health(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return get_external_position_metric(uid, 'sum-product-health', 'productmanagers', 'avg', args, flag_total)
@app.metric('/pmanager-costs', parameters=[ORG.Person],
id='pmanager-costs', title='Costs of Product Manager')
def get_pmanager_costs(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return get_external_position_metric(uid, 'sum-product-cost', 'productmanagers', 'sum', args, flag_total)
@app.metric('/pmanager-timetomarket', aggr='avg', parameters=[ORG.Person],
id='pmanager-timetomarket', title='Time To Market from Products of Product Manager')
def get_pmanager_timetomarket(uid, **kwargs):
flag_total = kwargs.get('begin') is None and kwargs.get('end') is None
args = get_correct_kwargs(kwargs)
return get_external_position_metric(uid, 'sum-product-timetomarket', 'productmanagers', 'avg', args, flag_total)
| 43,200 | 14,148 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-31 11:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daiquiri_jobs', '0003_owner_fk'),
]
operations = [
migrations.AlterField(
model_name='job',
name='job_type',
field=models.CharField(choices=[('QUERY', 'Query')], max_length=10),
),
migrations.AlterField(
model_name='job',
name='phase',
field=models.CharField(choices=[('PENDING', 'Pending'), ('QUEUED', 'Queued'), ('EXECUTING', 'Executing'), ('COMPLETED', 'Completed'), ('ERROR', 'Error'), ('ABORTED', 'Aborted'), ('UNKNOWN', 'Unknown'), ('HELD', 'Held'), ('SUSPENDED', 'Suspended'), ('ARCHIVED', 'Archived')], max_length=10),
),
]
| 874 | 309 |
#----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
# Constants for the CH (Federation Registry service)
# AKA Service Registry (SR) AKA Clearinghouse (CH)
# List of services provided by the CH server
services = ["SERVICE"]
# dictionary of types of services provided by the CH (name : code)
# That is the kinds of services that are advertised in the CH
SERVICE_AGGREGATE_MANAGER = 0
SERVICE_SLICE_AUTHORITY = 1
SERVICE_PROJECT_AUTHORITY = 2
SERVICE_MEMBER_AUTHORITY = 3
SERVICE_AUTHORIZATION_SERVICE = 4
SERVICE_LOGGING_SERVICE = 5
SERVICE_CREDENTIAL_STORE = 6
SERVICE_CERTIFICATE_AUTHORITY = 7
SERVICE_KEY_MANAGER = 8
SERVICE_WIMAX_SITE = 10
SERVICE_IRODS = 11
service_types = {
"AGGREGATE_MANAGER" : SERVICE_AGGREGATE_MANAGER,
"SLICE_AUTHORITY" : SERVICE_SLICE_AUTHORITY,
"PROJECT_AUTHORITY" : SERVICE_PROJECT_AUTHORITY,
"MEMBER_AUTHORITY" : SERVICE_MEMBER_AUTHORITY,
"AUTHORIZATION_SERVICE" : SERVICE_AUTHORIZATION_SERVICE,
"LOGGING_SERVICE" : SERVICE_LOGGING_SERVICE,
"CREDENTIAL_STORE" : SERVICE_CREDENTIAL_STORE,
"CERTIFICATE_AUTHORITY" : SERVICE_CERTIFICATE_AUTHORITY,
"KEY_MANAGER" : SERVICE_KEY_MANAGER,
"WIMAX_SITE" : SERVICE_WIMAX_SITE,
"IRODS" : SERVICE_IRODS
}
# Mapping from external to internal data schema
field_mapping = {
"_GENI_SERVICE_ID" : "id",
"SERVICE_URN": 'service_urn',
"SERVICE_URL": 'service_url',
"_GENI_SERVICE_CERT_FILENAME": 'service_cert',
"SERVICE_CERT": 'service_cert',
"SERVICE_NAME": 'service_name',
"SERVICE_DESCRIPTION": 'service_description',
"SERVICE_TYPE": "service_type",
"_GENI_SERVICE_SHORT_NAME": "short_name"
}
# The externally visible data schema for services
mandatory_fields = {
"SERVICE_URN": {"TYPE": "URN"},
"SERVICE_URL": {"TYPE": "URL"},
"SERVICE_CERT": {"TYPE": "CERTIFICATE"},
"SERVICE_NAME" : {"TYPE" : "STRING"},
"SERVICE_DESCRIPTION": {"TYPE" : "STRING"}
}
supplemental_fields = {
"_GENI_SERVICE_CERT_FILENAME": {"TYPE": "STRING", "OBJECT": "SERVICE"},
"_GENI_SERVICE_ID" : {"TYPE" : "INTEGER", "OBJECT": "SERVICE"},
"_GENI_SERVICE_ATTRIBUTES" : {"TYPE" : "DICTIONARY", "OBJECT" : "SERVICE"},
"_GENI_SERVICE_SHORT_NAME" : {"TYPE": "STRING", "OBJECT": "SERVICE"}
}
# Defined attributes on services
# A dictionary: For each attribute we have a name pointing to a dictionary
# with 'description', 'service_types', 'acceptable_values'
# 'service_types' means a list of service types to which this attribute
# applies. This tag is optional and if not supplied it is not restricted
# 'acceptable_values' means a list of acceptable values for this attribute
# This tag is optional and if not supplied it is not restricted
defined_attributes = {
"SPEAKS_FOR" : {
"description" : "Does this aggregate accept speaks-for credentials and options?",
"service_types" : [SERVICE_AGGREGATE_MANAGER],
"acceptable_values" : ['t', 'f']
},
"AM_API_VERSION" : {
"description" : "The version of the AM API supported by this aggregate",
"service_types" : [SERVICE_AGGREGATE_MANAGER],
"acceptable_values" : ['1', '2', '3']
}
}
| 4,339 | 1,528 |
import pygame
class Logger():
def __init__(self):
self.mem_text = "" # to store previous text
self.text1, self.text2, self.text3 = "", "", "" # text buffer
self.text_color1, self.text_color2, self.text_color3 = (0,0,0), (0,0,0), (0,0,0) # text color buffer
self.max_width = 100
# log displayed on surface during simulation
def gui_logger(self, surface, font):
surface.blit(font.render(str(self.text1), True, self.text_color1), (363, 669)) # text output line 1
surface.blit(font.render(str(self.text2), True, self.text_color2), (363, 684)) # text output line 2
surface.blit(font.render(str(self.text3), True, self.text_color3), (363, 699)) # text output line 3
# add new text to log
def log_add(self, text, text_color=(0,0,0)):
if text != self.mem_text: # if new text is sent:
self.text3, self.text2, self.text1 = self.text2, self.text1, text # add it to begenning, and shift others
self.text_color3, self.text_color2, self.text_color1 = self.text_color2, self.text_color1, text_color # same for colors
if len(text) > self.max_width: # if text is larger than window: split it and shift again
self.text3, self.text2, self.text1 = self.text2, text[self.max_width : len(text)], text[0 : self.max_width]
self.text_color3, self.text_color2, self.text_color1 = self.text_color2, text_color, text_color # same for colors
self.mem_text = text # update mem
# load max text width
def gui_max_width(self, max_width):
self.max_width = max_width
| 1,657 | 574 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2012 Peter Kuma
import os
from datetime import date, datetime
import urllib.request, urllib.error, urllib.parse
import json
from django.utils import timezone
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from markdown import markdown
from textile import textile
from django.utils.safestring import mark_safe
from django.contrib.contenttypes.fields import GenericRelation
from django_attach.models import Attachment
from linguo.models import MultilingualModel
from linguo.managers import MultilingualManager
from django.urls import reverse
from django.utils.translation import get_language
from django.core.validators import MaxValueValidator
from main.models import Category
MARKUP_CHOICES = (
('markdown', 'Markdown'),
('textile', 'Textile'),
('html', 'HTML'),
)
MARKERS = (
'amenity/arts_centre',
'amenity/atm',
'amenity/bank',
'amenity/bar',
'amenity/bbq',
'amenity/bench',
'amenity/bicycle_parking',
'amenity/bicycle_repair_station',
'amenity/biergarten',
'amenity/boat_rental',
'amenity/bureau_de_change',
'amenity/bus_station',
'amenity/cafe',
'amenity/car_wash',
'amenity/casino',
'amenity/charging_station',
'amenity/cinema',
'amenity/community_centre',
'amenity/courthouse',
'amenity/dentist',
'amenity/doctors',
'amenity/drinking_water',
'amenity/emergency_phone',
'amenity/excrement_bags',
'amenity/fast_food',
'amenity/ferry',
'amenity/firestation',
'amenity/fountain',
'amenity/fuel',
'amenity/hospital',
'amenity/hunting_stand',
'amenity/ice_cream',
'amenity/internet_cafe',
'amenity/library',
'amenity/motorcycle_parking',
'amenity/nightclub',
'amenity/parking',
'amenity/parking_entrance_multistorey',
'amenity/parking_entrance_underground',
'amenity/parking_subtle',
'amenity/parking_tickets',
'amenity/pharmacy',
'amenity/place_of_worship',
'amenity/police',
'amenity/post_box',
'amenity/post_office',
'amenity/prison',
'amenity/pub',
'amenity/public_bath',
'amenity/public_bookcase',
'amenity/public_transport_tickets',
'amenity/recycling',
'amenity/rental_bicycle',
'amenity/rental_car',
'amenity/restaurant',
'amenity/shelter',
'amenity/shower',
'amenity/social_facility',
'amenity/taxi',
'amenity/telephone',
'amenity/theatre',
'amenity/toilets',
'amenity/town_hall',
'amenity/vehicle_inspection',
'amenity/veterinary',
'amenity/waste_basket',
'amenity/waste_disposal',
'barrier/cattle_grid',
'barrier/cycle_barrier',
'barrier/full-height_turnstile',
'barrier/gate',
'barrier/kissing_gate',
'barrier/lift_gate',
'barrier/motorcycle_barrier',
'barrier/stile',
'barrier/toll_booth',
'highway/bus_stop',
'highway/elevator',
'highway/ford',
'highway/traffic_light',
'historic/archaeological_site',
'historic/bust',
'historic/castle',
'historic/city_gate',
'historic/fort',
'historic/fortress',
'historic/manor',
'historic/memorial',
'historic/monument',
'historic/obelisk',
'historic/palace',
'historic/plaque',
'historic/shrine',
'historic/statue',
'historic/stone',
'leisure/amusement_arcade',
'leisure/beach_resort',
'leisure/bird_hide',
'leisure/bowling_alley',
'leisure/firepit',
'leisure/fishing',
'leisure/fitness',
'leisure/golf',
'leisure/miniature_golf',
'leisure/outdoor_seating',
'leisure/playground',
'leisure/sauna',
'leisure/slipway',
'leisure/water_park',
'man_made/bell_tower',
'man_made/chimney',
'man_made/communications_tower',
'man_made/crane',
'man_made/cross',
'man_made/lighthouse',
'man_made/mast',
'man_made/mast_communications',
'man_made/mast_lighting',
'man_made/power_tower',
'man_made/power_tower_small',
'man_made/storage_tank',
'man_made/telescope_dish',
'man_made/telescope_dome',
'man_made/tower_cantilever_communication',
'man_made/tower_cooling',
'man_made/tower_defensive',
'man_made/tower_dish',
'man_made/tower_dome',
'man_made/tower_generic',
'man_made/tower_lattice',
'man_made/tower_lattice_communication',
'man_made/tower_lattice_lighting',
'man_made/tower_lighting',
'man_made/tower_observation',
'man_made/water_tower',
'man_made/windmill',
'natural/cave',
'natural/peak',
'natural/saddle',
'office/consulate',
'office/embassy',
'religion/buddhist',
'religion/christian',
'religion/hinduist',
'religion/jewish',
'religion/muslim',
'religion/shintoist',
'religion/sikhist',
'religion/taoist',
'shop/alcohol',
'shop/art',
'shop/bag',
'shop/bakery',
'shop/beauty',
'shop/bed',
'shop/beverages',
'shop/bicycle',
'shop/bookmaker',
'shop/butcher',
'shop/car',
'shop/car_parts',
'shop/carpet',
'shop/car_repair',
'shop/charity',
'shop/chemist',
'shop/clothes',
'shop/coffee',
'shop/computer',
'shop/confectionery',
'shop/convenience',
'shop/copyshop',
'shop/dairy',
'shop/deli',
'shop/department_store',
'shop/diy',
'shop/electronics',
'shop/fabric',
'shop/florist',
'shop/furniture',
'shop/garden_centre',
'shop/gift',
'shop/greengrocer',
'shop/hairdresser',
'shop/hifi',
'shop/houseware',
'shop/interior_decoration',
'shop/jewelry',
'shop/laundry',
'shop/marketplace',
'shop/massage',
'shop/medical_supply',
'shop/mobile_phone',
'shop/music',
'shop/musical_instrument',
'shop/newsagent',
'shop/optician',
'shop/outdoor',
'shop/paint',
'shop/perfumery',
'shop/pet',
'shop/photo',
'shop/seafood',
'shop/second_hand',
'shop/shoes',
'shop/sports',
'shop/stationery',
'shop/supermarket',
'shop/tea',
'shop/ticket',
'shop/tobacco',
'shop/toys',
'shop/trade',
'shop/travel_agency',
'shop/tyres',
'shop/variety_store',
'shop/video',
'shop/video_games',
'tourism/alpinehut',
'tourism/apartment',
'tourism/artwork',
'tourism/audioguide',
'tourism/board',
'tourism/camping',
'tourism/caravan_park',
'tourism/chalet',
'tourism/guest_house',
'tourism/guidepost',
'tourism/hostel',
'tourism/hotel',
'tourism/information',
'tourism/map',
'tourism/motel',
'tourism/museum',
'tourism/office',
'tourism/picnic',
'tourism/terminal',
'tourism/viewpoint',
'tourism/wilderness_hut',
)
MARKERS = [(x, x) for x in MARKERS]
class Member(models.Model):
first_name = models.CharField(_('first name'), max_length=50)
surname = models.CharField(_('surname'), max_length=50)
category = models.CharField(_('category'), max_length=5)
email = models.EmailField(_('e-mail'), blank=True)
def __unicode__(self):
return '%s %s' % (self.first_name, self.surname)
def email_special(self):
return self.email.replace('@', '[zavinac]')
class Meta:
ordering = ('category','surname')
verbose_name = _('member')
verbose_name_plural = _('members')
class Event(MultilingualModel):
title = models.CharField(_('title'), max_length=100)
name = models.SlugField(
_('name'),
unique=True,
help_text=_('Short name that will appear in the URL')
)
start_date = models.DateField(_('start date'))
end_date = models.DateField(_('end date'), null=True, blank=True)
location = models.CharField(_('location'), max_length=100)
latitude = models.FloatField(_('latitude'), null=True, blank=True)
longitude = models.FloatField(_('longitude'), null=True, blank=True)
map_zoom = models.PositiveIntegerField(_('map zoom'),
default=15,
validators=[MaxValueValidator(19),]
)
category = models.ForeignKey(Category,
verbose_name=_('category'),
on_delete=models.CASCADE,
)
markup = models.CharField(
_('markup'),
max_length=50,
choices=MARKUP_CHOICES,
default='markdown',
help_text=_('Documentation: <a href="https://en.wikipedia.org/wiki/Markdown">Markdown</a>, <a href="http://en.wikipedia.org/wiki/Textile_(markup_language)">Textile</a>')
)
head = models.TextField(
_('head'),
blank=True,
help_text=_('Add files and images below')
)
body = models.TextField(
_('body'),
blank=True,
help_text=_('Add files and images below')
)
attachments = GenericRelation(Attachment)
created = models.DateTimeField(_('created'),auto_now_add=True)
modified = models.DateTimeField(_('modified'),auto_now=True)
def get_absolute_url(self):
import ob.views
return reverse(ob.views.event, kwargs={
'lang': get_language(),
'category_name': Category.objects.get(name_en='orienteering').name,
'name': self.name,
})
def head_html(self):
if self.markup == 'markdown': return mark_safe(markdown(self.head))
elif self.markup == 'textile': return mark_safe(textile(self.head))
else: return mark_safe(self.head)
def body_html(self):
if self.markup == 'markdown': return mark_safe(markdown(self.body))
elif self.markup == 'textile': return mark_safe(textile(self.body))
else: return mark_safe(self.body)
def is_upcoming(self):
return self.end_date is None and self.start_date >= date.today() or \
self.end_date is not None and self.end_date >= date.today()
def larger_map_link(self):
import ob.views
return None if self.mappoint_set.count() == 0 else \
reverse(ob.views.event_map, kwargs={
'lang': get_language(),
'category_name': self.category.name,
'name': self.name,
})
objects = MultilingualManager()
class Meta:
ordering = ('-start_date',)
verbose_name = _('event')
verbose_name_plural = _('events')
translate = ('title', 'name', 'location', 'head', 'body')
class MapPoint(MultilingualModel):
title = models.CharField(_('title'), max_length=100)
latitude = models.FloatField(_('latitude'))
longitude = models.FloatField(_('longitude'))
marker = models.CharField(_('marker'),
null=True,
blank=True,
max_length=100,
choices=MARKERS,
)
event = models.ForeignKey(Event,
verbose_name=_('event'),
on_delete=models.CASCADE,
)
objects = MultilingualManager()
class Meta:
verbose_name = _('map point')
verbose_name_plural = _('map points')
translate = ('title',)
| 9,765 | 4,088 |
'''Refactored tests from test_hal_nav.py'''
import json
import httpretty
import pytest
import conftest
import uritemplate
import restnavigator as RN
from restnavigator import exc
import restnavigator.halnav as HN
def uri_of(doc):
'''Pull out the url from a hal document'''
return doc['_links']['self']['href']
def link_to(doc):
'''Pull out the self link of a hal document'''
return doc['_links']['self']
def register_hal_page(doc, **kwargs):
status = kwargs.pop('status', 200)
method = kwargs.pop('method', 'GET')
content_type = kwargs.pop('content_type', 'application/hal+json')
def body_callback(request, url, headers):
'''We do a callback so the response body can be updated'''
headers2 = kwargs.pop('headers', headers)
return (
status,
headers2,
json.dumps(doc),
)
httpretty.HTTPretty.register_uri(
method,
body=body_callback,
content_type=content_type,
uri=uri_of(doc),
**kwargs
)
@pytest.fixture
def page(index_page, curie_links, index_uri):
'''Returns a function that creates pages'''
def _page(name, number):
selflink = {
'href': index_uri + name + '/' + str(number),
'name': name + str(number),
}
nextlink = {
'href': index_uri + name + '/' + str(number + 1),
'name': name + str(number + 1),
}
doc = {
'_links': {
'self': selflink,
'curies': curie_links,
'next': nextlink
},
'name': name,
'number': number,
'data': conftest.random_sentence(),
}
register_hal_page(doc)
_page.registry.setdefault(name, []).append(doc)
return doc
_page.registry = {}
return _page
@pytest.yield_fixture
def http(request):
'''Enables httpretty and disables it after the test'''
httpretty.HTTPretty.enable()
yield httpretty.HTTPretty
httpretty.HTTPretty.disable()
httpretty.HTTPretty.reset()
@pytest.fixture
def index_uri():
'''Fixture for the root uri'''
return 'http://fakeuri.example/api/'
@pytest.fixture
def curie():
'''Returns the current curie string'''
return conftest.random_word(2).lower()
@pytest.fixture
def curify(curie):
def _curify(rel):
return curie + ':' + rel
return _curify
@pytest.fixture
def curie_links(curie, index_uri):
'''Returns a templated curie link'''
return [{
'name': curie,
'href': index_uri + 'rels/{rel}',
'templated': True,
}]
@pytest.fixture
def index_page(curie_links, index_uri, http):
'''Registers a basic index page that can be extended'''
doc = {
'_links': {
'curies': curie_links,
'self': {'href': index_uri},
},
'data': conftest.random_paragraphs(),
}
register_hal_page(doc)
return doc
@pytest.fixture
def N(index_uri, index_page):
'''A basic HALNavigator with the index_uri as root'''
return RN.Navigator.hal(index_uri)
class TestNavigator:
'''tests for halnav.Navigator'''
@pytest.fixture
def fake_session(self):
'''Creates a non functional fake session object'''
class FakeNonFuncSession:
headers = {'X-Custom': 'foo'}
return FakeNonFuncSession()
def test_custom_session(self, index_uri, fake_session):
N = RN.Navigator.hal(index_uri, session=fake_session)
N2 = RN.Navigator.hal(index_uri)
assert N._core.session is fake_session
assert N.headers is fake_session.headers
class TestPartialNavigator:
'''tests for halnav.PartialNavigator'''
@pytest.fixture
def rel(self, curify, name):
'''The link relation for the templated link'''
return curify(name)
@pytest.fixture(params=[set(['x']), set(['x', 'y']), set(['x', 'y', 'z'])])
def vars(self, request):
'''A set of random variables'''
return request.param
@pytest.fixture(params=[(0,0,0), (1,2,3)])
def values(self, request):
return dict(zip('xyz', request.param))
@pytest.fixture
def name(self):
'''The name of the templated resource'''
return conftest.random_word(5).lower() + 's'
@pytest.fixture
def post_template(self, name, index_uri, index_page, rel, vars):
'''Creates and registers a post templated link'''
href = "{index_uri}{name}/{{{varpath}}}".format(
index_uri=index_uri,
name=name,
varpath='}/{'.join(v for v in sorted(vars))
)
link = {
'href': href,
'title': 'Templated link for ' + name,
'templated': True,
}
index_page['_links'][rel] = link
return href
@pytest.fixture
def tpl_rel(self, name, curify):
return curify(name + '_tpl')
@pytest.fixture
def posts(self, rel, name, index_uri, index_page, page, tpl_rel):
'''Creates and registers some posts'''
resource0 = page(name, 0)
index_page['_links'][rel] = link_to(resource0)
index_page['_links'][tpl_rel] = {
'href': index_uri + name + '/{id}',
'title': 'Template for ' + name,
'templated': True,
}
register_hal_page(resource0)
last = resource0
for i in range(1, 5):
resource = page(name, i)
last['_links']['next'] = link_to(resource)
last = resource
register_hal_page(resource)
return page.registry[name][:]
@pytest.fixture
def template_partial(self, rel, index_page, N, post_template):
return N[rel]
def test_template_uri(self, template_partial, post_template):
assert template_partial.template_uri == post_template
def test_expand_uri(
self, vars, post_template, template_partial, values):
uri = template_partial.expand_uri(**values)
assert uri == uritemplate.expand(post_template, values)
def test_expand_link(
self, vars, post_template, template_partial, values):
link = template_partial.expand_link(**values)
assert not link.props.get('templated', False)
assert link.uri == uritemplate.expand(post_template, values)
def test_expand(self, vars, post_template, template_partial, values):
post1 = template_partial(**values)
assert not post1.fetched
assert post1.uri == uritemplate.expand(post_template, values)
def test_variables(self, template_partial, vars):
assert template_partial.variables == vars
@pytest.mark.parametrize('i', range(0, 5))
def test_valid_expansion(self, posts, name, N, tpl_rel, i):
partial = N[tpl_rel]
nav = partial(id=i)
nav.fetch()
assert nav.status == (200, 'OK')
assert nav.uri == uri_of(posts[i])
class TestHALNavGetItem:
'''Tests the __getitem__ method of HALNavigator '''
@pytest.fixture
def names(self):
namelist = [conftest.random_word().lower() for _ in range(3)]
def _names(i):
return namelist[i]
return _names
@pytest.fixture
def rels(self, names, curify):
def _rels(i):
return curify(names(i))
return _rels
@pytest.fixture
def resources(self, names, rels, index_page, index_uri, page):
last = index_page
for i in range(3):
new = page(names(i), i)
last['_links'][rels(i)] = {
'href': uri_of(new),
'title': "Page for " + names(i)
}
last = new
def test_fetch_behavior(self, N, resources, rels):
Na = N[rels(0)]
Nb = N[rels(0), rels(1)]
assert Na.fetched
assert not Nb.fetched
def test_sequence_equivalence(self, N, resources, rels):
Na = N[rels(0), rels(1), rels(2)]
Nb = N[rels(0)][rels(1)][rels(2)]
assert Na is Nb
@pytest.fixture
def link_resources(self, rels, names, index_page, page):
first = page(names(0), 1)
index_page['_links'][rels(0)] = link_to(first)
register_hal_page(first)
second1 = page(names(1), 1)
second2 = page(names(1), 2)
first['_links'][rels(1)] = [
{
'href': uri_of(second1),
'name': 'name_x',
},{
'href': uri_of(second2),
'name': 'name_y',
}
]
register_hal_page(second1)
register_hal_page(second2)
third_1 = page(names(2), 1)
third_2 = page(names(2), 2)
second1['_links'][rels(2)] = link_to(third_1)
second2['_links'][rels(2)] = link_to(third_2)
register_hal_page(third_1)
register_hal_page(third_2)
def test_linklist_in_sequence(self, N, link_resources, rels):
Nchained = N[rels(0), rels(1), 'name':'name_x', rels(2)]
Nfirst = N[rels(0)]
Nsecondlist = Nfirst[rels(1)]
Nsecond = Nsecondlist.get_by('name', 'name_x')
Nthird = Nsecond[rels(2)]
assert Nchained is Nthird
def test_linklist_index(self, N, link_resources, rels):
Nchained = N[rels(0), rels(1), 1, rels(2)]
Nfirst = N[rels(0)]
Nsecondlist = Nfirst[rels(1)]
Nsecond = Nsecondlist[1]
Nthird = Nsecond[rels(2)]
assert Nchained is Nthird
def test_bad_rel(self, N, link_resources, rels):
with pytest.raises(exc.OffTheRailsException):
N[rels(1)]
with pytest.raises(exc.OffTheRailsException):
N[rels(0), rels(0)]
def test_bad_name(self, N, link_resources, rels):
with pytest.raises(exc.OffTheRailsException):
N[rels(0), rels(1), 'name':'badname']
def test_bad_index(self, N, link_resources, rels):
with pytest.raises(exc.OffTheRailsException):
N[rels(0), rels(1), 100]
@pytest.fixture
def template_uri(self, index_uri):
return index_uri + 'tpl/{id}'
@pytest.fixture
def tpl_rel(self, curify):
return curify('tpl')
@pytest.fixture
def tpl_resources(self, page, tpl_rel, template_uri, index_page):
index_page['_links'][tpl_rel] = {
'href': template_uri,
'templated': True,
'title': 'Template link',
}
for i in range(3):
resource = page('tpl', i)
register_hal_page(resource)
return template_uri
def test_template_sequence(self, N, tpl_resources, tpl_rel):
Na = N[tpl_rel](id=0)
Nb = N[tpl_rel](id=1)
Nc = N[tpl_rel](id=2)
Na(), Nb(), Nc()
assert Na.status == (200, 'OK')
assert Nb.status == (200, 'OK')
assert Nc.status == (200, 'OK')
class TestEmbedded:
'''tests for embedded document features'''
@pytest.fixture
def blog_posts(self, http, page):
'''Posts are both linked and embedded'''
_posts = [page('post', x) for x in range(3)]
for post in _posts:
register_hal_page(post)
return _posts
@pytest.fixture
def comments(self, page):
'''Comments are embedded only and have no self link'''
comments = [page('comments', x) for x in range(3)]
for comment in comments:
del comment['_links']['self']
return comments
@pytest.fixture
def nested(self, page):
'''Nested are several layers deep embedded docs. They are not
linked to, but do have urls.
'''
nested = [page('nested', n) for n in range(3)]
for (nest1, nest2) in zip(nested[:-1], nested[1:]):
nest1['_embedded'] = {
'xx:nested': nest2
}
register_hal_page(nest1)
register_hal_page(nest2) # register remaining page
return nested
@pytest.fixture
def index(self, index_uri, comments, blog_posts, http, nested):
doc = {
'_links': {
'curies': [{
'name': 'xx',
'href': index_uri + 'rels/{rel}',
'templated': True,
}],
'self': {'href': index_uri},
'first': link_to(blog_posts[0]),
'xx:second': link_to(blog_posts[1]),
'xx:posts': [link_to(post) for post in blog_posts],
'xx:nested-links': [link_to(nest) for nest in nested],
'xx:non-embedded-nest': link_to(nested[0]),
},
'data': 'Some data here',
'_embedded': {
'xx:posts': blog_posts,
'xx:comments': comments,
}
}
register_hal_page(doc)
return doc
def test_comments_are_orphans(self, N, index):
'''Checks that all embedded documents that don't have self
links are OrphanHALNavigators
'''
comments = N['xx:comments']
for comment in comments:
assert comment.parent is N
def test_posts_arent_orphans(self, N, index):
posts = N['xx:posts']
for i, post in enumerate(posts):
href = index['_embedded']['xx:posts'][i]['_links']['self']['href']
assert post.uri == href
def test_length_accurate(self, N, index, comments):
assert len(N['xx:comments']) == len(comments)
def test_embedded_only_rel_in_navigator(self, N, index):
N.fetch()
assert 'xx:comments' in N
def test_embedded_only_rel_not_in_links(self, N, index):
assert 'xx:comments' not in N.links()
def test_embedded_only_rel_in_embedded(self, N, index):
assert 'xx:comments' in N.embedded()
def test_both_rel_in_navigator(self, N, index):
N.fetch()
assert 'xx:posts' in N
def test_both_rel_in_links(self, N, index):
assert 'xx:posts' in N.links()
def test_both_rel_in_embedded(self, N, index):
assert 'xx:posts' in N.embedded()
def test_embedded_default_curie(self, N, index):
N._core.default_curie = 'xx'
p1 = N['posts']
assert p1 is N['xx:posts']
def test_nested_works(self, N, index, nested):
nest1 = N['xx:non-embedded-nest']
nest2 = nest1['xx:nested']
nest3 = nest2['xx:nested']
nest3_chained = N['xx:non-embedded-nest', 'xx:nested', 'xx:nested']
assert nest3 is nest3_chained
def test_fetch_then_get_embedded(self, N, index):
# for this test, nested[0] is linked from index, but not
# embedded anywhere. nested[1] is embedded in nested[0], but
# is also its own resource. We want to ensure the same
# navigator object is used for both
nested1 = N['xx:nested-links'][1]
nested1.fetch()
assert N['xx:non-embedded-nest', 'xx:nested'] is nested1
def test_get_embedded_then_fetch(self, N, index):
# reverse order of previous test
nested1 = N['xx:non-embedded-nest', 'xx:nested']
nested1_linked = N['xx:nested-links'][1]
# Nothing we've done to nested1_linked should have resolved it
# except that we already saw it as an embedded doc.
assert nested1_linked.resolved
assert nested1 is nested1_linked
def test_cached_embedded_requests(self, N, index, http):
N.fetch()
main_nav_request = http.last_request
embedded = N.embedded()['xx:posts'][0]
# get the cached state of the embedded resource, no additional
# http requests should be made.
embedded()
assert main_nav_request is http.last_request
# if we explicitly refetch the embedded (not orphaned) Navigator, we
# expect that a new http request is being made.
embedded.fetch()
assert main_nav_request is not http.last_request
class TestCreate:
@pytest.fixture
def new_resource(self, page):
grelp = page('grelp', 0)
register_hal_page(grelp)
return grelp
@pytest.fixture(params=[302, 303, 202, 202, 303])
def post_status(self, request):
return request.param
@pytest.fixture
def hosts(self, page, http, post_status, new_resource):
host_page = page('hosts', 0)
register_hal_page(
host_page,
method='POST',
status=post_status,
location=uri_of(new_resource),
)
return host_page
@pytest.fixture
def index(self, hosts, index_uri):
doc = {
'_links': {
'self': {'href': index_uri},
'xx:create-hosts': link_to(hosts),
}
}
register_hal_page(doc)
return doc
def test_uses_post(self, N, index, http):
N['xx:create-hosts'].create({'name': 'foo'})
last_request_method = http.last_request.method
assert last_request_method == 'POST'
def test_content_type_json(self, N, index, http):
N['xx:create-hosts'].create({'name': 'foo'})
last_content_type = http.last_request.headers['Content-Type']
assert last_content_type == 'application/json'
def test_body_is_correct(self, N, index, http):
N['xx:create-hosts'].create({'name': 'foo'})
last_body = http.last_request.body
assert last_body == b'{"name": "foo"}'
def test_new_resource_uri_correct(
self, N, index, new_resource, post_status):
N2 = N['xx:create-hosts']
N3 = N2.create({'name': 'foo'})
if post_status == 202:
assert N3.parent.uri == N2.uri
assert N3.fetched
else:
assert N3.uri == uri_of(new_resource)
assert not N3.fetched
def test_headers_passed(self, N, index, http):
headers = {'X-Custom': 'foo'}
N['xx:create-hosts'].create({'name': 'foo'}, headers=headers)
custom_header = http.last_request.headers['X-Custom']
assert custom_header == 'foo'
def test_files_passed(self, N, index, http):
headers = {'X-Custom': 'foo'}
N['xx:create-hosts'].create(files={'file': ('filename', )}, headers=headers)
custom_header = http.last_request
assert custom_header == 'foo'
def test_empty_post(self, N, index):
# Just want to ensure no error is thrown
N['xx:create-hosts'].create()
| 18,394 | 5,940 |
import os
import vonage
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), "../.env")
load_dotenv(dotenv_path)
VONAGE_APPLICATION_ID=os.environ.get("VONAGE_APPLICATION_ID")
VONAGE_APPLICATION_PRIVATE_KEY_PATH=os.environ.get("VONAGE_APPLICATION_PRIVATE_KEY_PATH")
client = vonage.Client(
application_id=VONAGE_APPLICATION_ID,
private_key=VONAGE_APPLICATION_PRIVATE_KEY_PATH,
)
| 405 | 163 |
#!/usr/bin/bin/python
| 22 | 10 |
import sys
def read(f):
with open(f) as file:
lines = file.readlines()
T = int(lines[0])
line = 1
for t in range(1, T+1):
N, Q, H, A, UV, line = _get_case(line, lines)
y = solve(N, Q, H, A, UV)
print('Case #%i: %0.6f' % (t, y))
def _get_case(line, lines):
N, Q = [int(s) for s in lines[line].split()]
H = []
for r in range(N):
row = [int(s) for s in lines[line+1+r].split()]
H.append(row)
line = line + 1 + r
A = []
for r in range(N):
row = [int(s) for s in lines[line+1+r].split()]
A.append(row)
line = line + 1 + r
UV = []
for r in range(Q):
row = [int(s) for s in lines[line+1+r].split()]
UV.append(row)
line = line + 1 + r
return N, Q, H, A, UV, line
def solve(N, Q, H, A, UV):
city = 0
horse = H[0]
visited = []
queue = [0]
read('sample.in')
#read(sys.argv[1])
| 931 | 401 |
import django_filters
from .models import Product
class ProductFilter(django_filters.rest_framework.FilterSet):
"""
业务线过滤类
"""
pid = django_filters.NumberFilter("pid")
class Meta:
model = Product
fields = ['pid', ]
| 254 | 78 |
import re
import os
import pickle
import sys
from math import log
DICT_NAME = "dict.txt"
BI_GRAM_FILE = "bi_gram.txt"
HMM_PROB = "hmm_prob"
SMALL_PROB = 1e-200
# 识别汉字、数字和字母、全角字符,及+,#,&,.,%
re_ch = re.compile("([\u4E00-\u9FD5a-zA-Z0-9\uFF10-\uFF5A+#&\._%%]+)", re.U)
re_stop = re.compile("([。,]+)", re.U)
# 处理分词语料,生成词典和2-gram列表
# 语料内容:每行一个句子,词用空格分开
def process(input_file, output_path):
words = {}
bi_grams = {}
max_word_length = 0
m_w = ''
with open(input_file, 'r', encoding='utf8') as f:
for line in f:
line = line.strip()
if not line: continue
word_line = line.split()
last_word = '<BOS>'
for w in word_line:
if re_ch.match(w):
words[w] = words.get(w, 0) + 1 # 没匹配到的是一些符号:、,等等
if last_word:
bg = last_word + ' ' + w
bi_grams[bg] = bi_grams.get(bg, 0) + 1
last_word = w
if len(w) > max_word_length:
max_word_length = len(w)
m_w = w
elif re_stop.match(w):
if last_word:
bg = last_word + ' <EOS>'
bi_grams[bg] = bi_grams.get(bg, 0) + 1
last_word = '<BOS>'
if last_word:
bg = last_word + ' <EOS>'
bi_grams[bg] = bi_grams.get(bg, 0) + 1
print("字典大小:%d" % len(words))
print("最长词长度:%d %s" % (max_word_length,m_w))
with open(os.path.join(output_path, DICT_NAME), 'w', encoding='utf8') as f:
for k in sorted(words):
f.write("%s %d\n" % (k, words[k]))
print("2-gram 数量:%d" % len(bi_grams))
with open(os.path.join(output_path, BI_GRAM_FILE), 'w', encoding='utf8') as f:
for k in sorted(bi_grams):
f.write("%s %d\n" % (k, bi_grams[k]))
# 估计HMM模型的概率
def process_hmm(input_file, output_path):
line_count = 0
state_list = ['B', 'M', 'E', 'S']
A = {}
B = {}
Pi = {}
State_Count = {}
for s in state_list:
A[s] = {t: 0. for t in state_list} # 转移概率
B[s] = {} # 观测概率
Pi[s] = 0. # 初始概率
State_Count[s] = 0
print('开始估计HMM概率...')
with open(input_file, 'r', encoding='utf8') as f:
for line in f:
line_count += 1
line = line.strip()
if not line: continue
word_list = line.split()
chars = ''.join(word_list)
states = []
for w in word_list:
if len(w) == 1: states.append('S')
else: states += ['B']+['M']*(len(w)-2)+['E']
assert len(chars) == len(states)
i = 0
for s in states:
State_Count[s] += 1
if i == 0:
Pi[s] += 1.
else:
A[states[i-1]][s] += 1.
B[s][chars[i]] = B[s].get(chars[i], 0) + 1.
i += 1
A = {k: {tk: log(max(tv/State_Count[k], SMALL_PROB)) for tk, tv in v.items()} for k, v in A.items()}
B = {k: {tk: log(max(tv/State_Count[k], SMALL_PROB)) for tk, tv in v.items()} for k, v in B.items()}
Pi = {k: log(max(v/line_count, SMALL_PROB)) for k, v in Pi.items()}
with open(os.path.join(output_path, HMM_PROB), 'wb') as f:
pickle.dump(A, f)
pickle.dump(B, f)
pickle.dump(Pi, f)
if __name__ == '__main__':
argv = sys.argv[1:]
if len(argv) < 2:
print('corpus_process.py <corpus_file> <out_dir>')
sys.exit()
else:
process(argv[0], argv[1])
process_hmm(argv[0], argv[1])
print("处理完成")
| 3,724 | 1,526 |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import requests
import time
import os
import csv
root_url = "https://seekingalpha.com"
query = "stock repurchase program"
url = "https://seekingalpha.com/search?q="+query.replcae(" ", "+")
chrome_driver_path = "/usr/lib/chromium-browser/chromedriver" #add your own driver path
opts = Options()
opts.add_argument("--headless")
opts.add_argument("--no-sandbox")
opts.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36")
driver = webdriver.Chrome(chrome_driver_path, options=opts)
driver.get(url)
time.sleep(5)
soup = BeautifulSoup(driver.page_source, 'lxml')
result_list = soup.find("div", {"id":"result_list"})
result_page = result_list.find("div", {"class":"result-pages"})
fields = ['Title', 'Link', 'MetaData', 'Summary']
csv_rows = []
for a in result_page.find_all("a"):
link = a['href']
new_url = url+link
driver.get(new_url)
time.sleep(5)
new_soup = BeautifulSoup(driver.page_source, 'lxml')
new_result_list = new_soup.find("div", {"id":"result_list"})
items = new_result_list.find_all("li")
for item in items:
item_link = item.find("div", {"class":"item-link"})
item_link_a = item_link.find("a")
item_meta = item.find("div", {"class":"item-metadata"})
item_summary = item.find("div", {"class":"item-summary"})
name = item_link_a.text.replace(" ", "").replace("\n", "")
link = root_url+item_link_a['href']
metadata = item_meta.text.replace(" ", "")
summary = item_summary.text
csv_rows.append([str(name), str(link), str(metadata), str(summary)])
with open("SeekingAlpha.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(csv_rows)
print("Done") | 1,954 | 695 |
import eval_utils
import json
from falx.eval_interface import FalxEvalInterface
from falx.utils import table_utils
from timeit import default_timer as timer
import numpy as np
from pprint import pprint
from falx.visualization.chart import VisDesign, LayeredChart
from falx.visualization.matplotlib_chart import MatplotlibChart
import falx.visualization.visual_trace as visual_trace
np.random.seed(2019)
def get_mark_type(chart):
chart_obj = chart.to_vl_obj()
marks = [chart_obj['mark']] if "mark" in chart_obj else [layer["mark"] for layer in chart_obj["layer"]]
marks = [m if isinstance(m, (str,)) else m["type"] for m in marks]
return marks
def process_data(bid, num_samples_dict):
f_in = f"../../benchmarks/{bid}.json"
for k in [2, 3, 4]:
if bid in num_samples_dict[k]:
break
num_samples = k
with open(f_in, "r") as f:
data = json.load(f)
#print(data)
input_data = data["input_data"]
extra_consts = data["constants"] if "constants" in data else []
vis = VisDesign.load_from_vegalite(data["vl_spec"], data["output_data"])
full_trace = vis.eval()
partitioned = visual_trace.partition_trace(full_trace)
sample_trace = []
raw_sample_trace = []
raw_full_trace = []
for key in partitioned:
ty = "bar" if key in ["BarV","BarH"] else ("line" if key == "Line" else ("point" if key == "Point" else "area"))
traces = partitioned[key]
num_samples = int(np.ceil(num_samples / 2.0)) if ty == "line" or ty == "area" else num_samples
indexes = np.random.choice(list(range(len(traces))), num_samples)
samples = [traces[i] for i in indexes]
tr_table = visual_trace.trace_to_table(samples)
full_tr_table = visual_trace.trace_to_table(traces)
for tr in full_tr_table[key]:
raw_full_trace.append({"type": ty, "props": tr})
for tr in tr_table[key]:
raw_sample_trace.append({"type": ty, "props": tr})
if ty == "line":
kreplace = lambda x: "x" if x in ["x1", "x2"] else "y" if x in ["y1", "y2"] else x
sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x1", "y1", "size", "color", "column"] if k in tr}})
sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x2", "y2", "size", "color", "column"] if k in tr}})
elif ty == "bar":
kreplace = lambda x: "x" if x in ["x1"] else "y" if x in ["y1"] else x
sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in tr}})
elif ty == "point":
sample_trace.append({"type": ty, "props": tr})
elif ty == "area":
kreplace = lambda x: "x" if x in ["x1", "x2"] else ("y2" if x in ["yb1", "yb2"] else "y" if x in ["yt1", "yt2"] else x)
sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x1", "yt1", "yb1", "color", "column"] if k in tr}})
sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x2", "yt2", "yb2", "color", "column"] if k in tr}})
data["sample_trace"] = sample_trace
data["raw_sample_trace"] = raw_sample_trace
data["raw_full_trace"] = raw_full_trace
return data
if __name__ == '__main__':
benchmark_ids = [
"test_1", "test_2", "test_3", "test_4", "test_5", "test_6", "test_7",
"test_8", "test_9", "test_10", "test_11", "test_12", "test_13", "test_14",
"test_15", "test_16", "test_17", "test_18", "test_19", "test_20", "test_21",
"test_22", "test_23",
"001", "002", "003", "004", "005", "006", "007", "008", "009", "010",
"011", "012", "013", "014", "015", "016", "017", "018", "019", "020",
"021", "022", "023", "024", "025", "026", "027", "028", "029", "030",
"031", "032", "033", "034", "035", "036", "037", "038", "039", "040",
"041", "042", "043", "044", "045", "046", "047", "048", "049", "050",
"051", "052", "053", "054", "055", "056", "057", "058", "059", "060",
]
num_samples_dict = {
1: ['test_21', '050', '025', '058', '001', '011', 'test_7', '042', '032', '012', 'test_15', 'test_10', '023', 'test_1', '052', 'test_6', '035', '010', '006', '054', '051', 'test_14', '056', '024', '017', '053', '020', '033', '031', 'test_8', '047', '030', '029', 'test_2', 'test_11', 'test_13'],
2: ['test_21', '025', '050', '001', '011', '058', '012', '032', 'test_7', 'test_10', '010', '017', '023', '042', '052', 'test_15', '035', 'test_6', 'test_1', '006', '054', '051', 'test_14', '024', '053', '056', '009', '020', '033', 'test_8', '031', '047', '030', 'test_16', '029', 'test_2', '034', 'test_13', '014', '037', 'test_12', 'test_11', 'test_23'],
3: ['test_21', '015', '050', '001', '025', '011', '058', '012', '006', '032', 'test_7', '010', 'test_10', '017', '023', 'test_1', '042', 'test_6', 'test_15', '052', '035', '005', '045', '054', '051', 'test_14', '007', '038', '041', '022', '024', '053', '056', '020', '009', '033', 'test_22', '004', 'test_8', 'test_2', '031', 'test_13', '047', 'test_16', '029', '030', '034', '014', '037', 'test_12', 'test_11', 'test_23', '044'],
4: ['058', '015', 'test_21', '050', '006', '011', '032', 'test_7', '010', 'test_10', '012', '017', '025', 'test_14', '023', '042', 'test_15', 'test_1', '052', 'test_6', '005', '035', '045', '001', '051', '038', '007', '041', '022', '054', '016', '024', '056', '053', '009', '020', 'test_17', '033', '021', '008', '044', '031', '030', '047', 'test_16', 'test_22', '004', '029', 'test_13', '034', 'test_2', 'test_8', '014', 'test_11', 'test_12', '037', 'test_23']
}
#benchmark_ids = ["test_4"]
full_data = []
for i, bid in enumerate(benchmark_ids):
data = process_data(bid, num_samples_dict)
full_data.append(data)
print(json.dumps(full_data))
| 6,071 | 2,710 |
from collections import deque
def read_matrix(dimensions):
mtrx = []
for _ in range(dimensions):
col = [int(n) for n in input().split(' ')]
mtrx.append(col)
return mtrx
def get_cells(matrix):
cells_alive = 0
cells_sum = 0
for r in matrix:
for c in r:
if c > 0:
cells_alive += 1
cells_sum += c
return cells_alive, cells_sum
dimensions = int(input())
matrix = read_matrix(dimensions)
bombs = deque([[int(y) for y in x.split(",")] for x in input().split(' ')])
for b in range(len(bombs)):
bomb_row, bomb_col = bombs.popleft()
damage = matrix[bomb_row][bomb_col]
if matrix[bomb_row][bomb_col] > 0:
for row in range(3):
for col in range(3):
if 0 <= bomb_row - 1 + row < len(matrix) and 0 <= bomb_col - 1 + col < len(matrix):
if matrix[bomb_row - 1 + row][bomb_col - 1 + col] > 0:
matrix[bomb_row - 1 + row][bomb_col - 1 + col] -= damage
alive, sum = get_cells(matrix)
print(f'Alive cells: {alive}')
print(f'Sum: {sum}')
for el in matrix:
print(' '.join(str(x) for x in el))
| 1,180 | 443 |
"""
Contains class that runs inferencing
"""
import torch
import numpy as np
from networks.RecursiveUNet import UNet
from utils.utils import med_reshape
class UNetInferenceAgent:
"""
Stores model and parameters and some methods to handle inferencing
"""
def __init__(self, parameter_file_path='', model=None, device="cpu", patch_size=64):
self.model = model
self.patch_size = patch_size
self.device = device
if model is None:
self.model = UNet(num_classes=3)
if parameter_file_path:
self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device))
self.model.to(device)
def single_volume_inference_unpadded(self, volume):
"""
Runs inference on a single volume of arbitrary patch size,
padding it to the conformant size first
Arguments:
volume {Numpy array} -- 3D array representing the volume
Returns:
3D NumPy array with prediction mask
"""
raise NotImplementedError
def single_volume_inference(self, volume):
"""
Runs inference on a single volume of conformant patch size
Arguments:
volume {Numpy array} -- 3D array representing the volume
Returns:
3D NumPy array with prediction mask
"""
self.model.eval()
# Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
slices = []
# TASK: Write code that will create mask for each slice across the X (0th) dimension. After
# that, put all slices into a 3D Numpy array. You can verify if your method is
# correct by running it on one of the volumes in your training set and comparing
# with the label in 3D Slicer.
# <YOUR CODE HERE>
slices.append(volume[0:1])
arr = np.zeros(volume.shape, dtype=np.float32)
for idx, label in enumerate(slices):
if label is not np.nan:
label = label.split(" ")
mask = np.zeros(volume.shape[1] * volume.shape[2], dtype=np.uint8)
posit = map(int, label[0::2])
leng = map(int, label[1::2])
for p, l in zip(posit, leng):
mask[p:(p+l)] = 1
arr[:, :, idx] = mask.reshape(volume.shape[1], volume.shape[1], order='F')
slices = slices.asarray()
slices.reshape((-1, slices.shape[0], slices.shape[1]))
return slices
| 2,548 | 740 |
# Copyright (c) 2019 Cable Television Laboratories, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unit tests for http_session.py
import time
import requests
import unittest
import logging
from trans_sec.controller.http_server_flask import SDNControllerServer
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('http_server_flask_tests')
class HttpSessionTests(unittest.TestCase):
"""
Unit tests for utility functions in convert.py
"""
def setUp(self):
"""
Start HTTP server
:return:
"""
self.http_server = SDNControllerServer(TestSDNController())
self.http_server.start()
# TODO - sleeping to wait for the server to start. Look at the
# http_server class to see if the start() call can bock
time.sleep(1)
def tearDown(self):
self.http_server.stop()
def test_agg_attack_url_params(self):
# Test attack with params
param_attack = {
'src_mac': '00:00:00:00:00',
'dst_ip': '10.1.0.1',
'dst_port': '1234',
}
ret_val = requests.post(url='http://127.0.0.1:9998/aggAttack',
params=param_attack)
self.assertEquals(201, ret_val.status_code)
json_attack = {'event': param_attack}
ret_val = requests.post(url='http://127.0.0.1:9998/aggAttack',
json=json_attack)
self.assertEquals(201, ret_val.status_code)
ret_val = requests.post(url='http://127.0.0.1:9998/aggAttack',
json=param_attack)
self.assertEquals(201, ret_val.status_code)
class TestSDNController:
def __init__(self):
pass
@staticmethod
def add_attacker(body):
logging.info('Adding an attacker - [%s]', body)
@staticmethod
def remove_attacker(body):
logging.info('Removing an attacker - [%s]', body)
@staticmethod
def add_agg_attacker(body):
logging.info('Adding an attacker - [%s]', body)
@staticmethod
def remove_agg_attacker(body):
logging.info('Removing an attacker - [%s]', body)
| 2,666 | 856 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import numpy as np
import os
path = '/'.join(os.path.abspath(__file__).split('/')[:-1])
fftw_path = 'cpp_src/fftw-2.1.5/fftw/.libs/libfftw.so.2'
crv_path = 'cpp_src/curvelet.so'
ctypes.cdll.LoadLibrary(os.path.join(path,fftw_path))
curlib = ctypes.cdll.LoadLibrary(os.path.join(path,crv_path))
def curvelet(m,r_scale,n_scales=7,n_wedges=10,ac=1):
assert r_scale<=n_scales,'Incompatible scale request.'
m = np.array(m, dtype=np.double)
nx = m.shape[0]
ny = m.shape[1]
aptr = m.ctypes.data_as(ctypes.POINTER(ctypes.POINTER(ctypes.c_double)))
curlib.curvelet(aptr,nx,ny,n_scales,r_scale-1,n_wedges,ac)
return m
| 764 | 327 |
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.http import JsonResponse
def json_response(data, status_code=200):
resp = JsonResponse(data)
resp.status_code = status_code
return resp
| 296 | 94 |
#texteditor.py
#Description: uses csv-format file 'dictionary' to make a series of replacements in 'oldfile' and create 'newfile'
#Execute as "python texteditor.py oldfile dictionary"
#Creates 'newfile'
#see also README.md
import os
#tells python to use the module 'os'
import sys
#tells python to use the module 'sys' used to do one line command in the terminal
#changes to a particular directory
#Todo: edit this so the program can be called anywhere, look for files in the given directory or based on input
arglist = sys.argv
#creates a list which stores the variables which are input directly in the one-line command in the terminal
oldfile = str(arglist[1])
newfile = str(arglist[2])
dic = str(arglist[3])
colonne = int(arglist[4])
chosenDirectory = str(arglist[5])
#takes as variable the elements of list 'arglist'
#Todo: default 'newfile=oldfile' if no third argument
os.chdir(chosenDirectory)
#Run this with parameters in commandline ... python texteditor1.py oldfilename newfilename substitutionlistfilename columnNumberinCSV directoryofNewfile(+ other parameter perhaps)
mydictionary = open(dic,'rb')
data = csv.reader(mydictionnary)
d={}
# creates a dictionary called d
colonne = colonne - 1
for line in mydictionary:
x=line.split(',')
a = x[0]
b = x[colonne]
for p in b:
b = b.replace('+' ,',')
#no presentation errors eg. new line or extra spaces
#DR: Later on we may want to allow quoted strings containing characters like newline
#DR: I'm not sure if we want this 'cleanup' -- is it necessary?
#DR: Also, we need a way to allow *intended* commas in the dictionary file itself; perhaps "\," suffices
d[a]=b
# x is a string which contains the whole line
# x[0] is the first word of the line x[1] is the second
# d[a]=b appends in the dictionary (d) the old and new word in the form d{'oldword' = newword}
mydictionary.close()
thefile = open(oldfile,'r')
#opens oldfile, read mode
content = thefile.read()
#copies everything in the file and assigns it to the string variable 'content'
for k, v in d.items():
content = content.replace(k ,v)
#makes requested substitutions using the dictionary d
#DR: How does this command work? Also, why did you choose the letters 'k' and 'v'
#to change the existing file this is the code :
#DR: For future work, let's allow a parameter to the command that specifies whether it edits in place or creates a new file;
#... or even better, have it edit in place if the new and old filenames and locations are identical
'''
thefile.close()
#closes the file to allow edits
thefile = open(file1,'w')
#opens the file in the writing mode
thefile.truncate()
#clears all text from thefile
thefile.write(content)
#puts in the text from the string variable 'content'
thefile.close()
#need to close the file for some reason
'''
#to create a new file this is the code:
mynewfile = open(newfile,'w+')
#mynewfile = open("newfile.txt",'w+')
#DR: can we have this file named based on the third input argument?
mynewfile.write(content)
mynewfile.close() | 3,058 | 920 |
# Import all external libraries
import json
import datetime
import time
import os
import io
import errno
from flask import Flask, render_template, send_file, request, session, redirect, render_template_string
from flask_socketio import SocketIO, emit, send
from threading import Thread, Lock
import resources
import sys
print(sys.argv)
# This is a dictionary of all the settings Flask has. Since I have a complex setup it is best to have the
# config ready at all times so I can quickly change server settings. This dictionary is a default dictionary
# and will be overwritten when the server starts with the settings specified in CONFIX.txt
CONFIG = {
'JSON_AS_ASCII': True,
'USE_X_SENDFILE': False,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_NAME': 'session',
'DEBUG': False,
'LOGGER_HANDLER_POLICY': 'always',
'LOGGER_NAME': None,
'SESSION_COOKIE_SECURE': False,
'SECRET_KEY': None,
'EXPLAIN_TEMPLATE_LOADING': False,
'MAX_CONTENT_LENGTH': None,
'PROPAGATE_EXCEPTIONS': None,
'APPLICATION_ROOT': None,
'SERVER_NAME': None,
'PREFERRED_URL_SCHEME': 'http',
'JSONIFY_PRETTYPRINT_REGULAR': True,
'TESTING': False,
'PERMANENT_SESSION_LIFETIME': datetime.timedelta(31),
'TEMPLATES_AUTO_RELOAD': None,
'TRAP_BAD_REQUEST_ERRORS': False,
'JSON_SORT_KEYS': True,
'JSONIFY_MIMETYPE': 'application/json',
'SESSION_COOKIE_HTTPONLY': True,
'SEND_FILE_MAX_AGE_DEFAULT': datetime.timedelta(0, 43200),
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SESSION_REFRESH_EACH_REQUEST': True,
'TRAP_HTTP_EXCEPTIONS': False
}
# This library contains game data, such as the name, and whether or not the game is active. A lot of this isn't
# currently implimented however it outlines a plan of future development. Some elements like USER_LIST and
# ACTIVE are used throughout the program.
GAME = {
'GAME_NAME': None,
'DESCRIPTION': None,
'DIFFICULTY': 0,
'ONLINE_MODE': True,
'WHITE_LIST': False,
'BLACK_LIST': False,
'CHEATS': False,
'MAX_GAME_LENGTH': None,
'BANNED_USERNAMES': False,
'SCINARIO': False,
'ACTIVE': False,
'USER_LIST': []
}
users = [] # A list of all the current users in the game.
# This defines the user class.
class User:
def __init__(self, name):
self.name = name
#The code below is responsible for loading in the server settings from CONFIG.txt
if os.path.exists('CONFIG.txt') == True: # checks if CONFIG.txt exists yet
with open('CONFIG.txt', 'r') as f: # if it does exist, open the file
for line in f.readlines():
line_split = line.strip().split("=") # For every line, split the left and right half at the = sign
CONFIG[line_split[0].strip()] = line_split[1].strip() # Update CONFIG dictionary
f.close() # close CONFIG.txt
else: # if it doesn't exit
with open('CONFIG.txt', 'w') as f: # Open CONFIG.txt (generates file)
for key in CONFIG:
f.write(key+' = '+str(CONFIG[key])+'\n') # write the default config to the file
f.close() # Close new file
# Flask server setup
app = Flask(__name__, template_folder='../client') # Defines the Flask Server. The template_folder is set to
# ../client (the client folder) which means the browser has
# no access to the server at all as it is in a different
# directory.
app.config['SECRET_KEY'] = 'secret' # Sets the server encryption key
app.config['DEBUG'] = CONFIG['DEBUG'] # Sets the server settings to equal CONFIG
for i in app.config: print(i, app.config[i]) # prints the new server CONFIG to the terminal
socketio = SocketIO(app) # Defines SOCKETIO app
# Below is a meta data dictionary that is to updated and sent with each websocket message
# to the client so the message can be verified for debugging / tracking purposes, or
# can be used to measure information loss in the websocket stream
meta = {
'time' : '', #current Time
'zone' : 'GMT', #timezone
'serv' : '', #time the server's been active
'uuid' : '' #unique message id
}
start = datetime.datetime.now() # This represents the time the server started
#================== RUN =================================
player = resources.setup() # Calls the setup function in resources that returns a player object
#================== APP ROUTES - FLASK =================================
# This is the index route. If a user navigates to {ip}:{port}/ then this route will be run.
# It is set to render the index.html page when the GET request is recieved.
@app.route('/')
def index():
return render_template('index.html')
# This is the login route. When a user submits their name in /index.html, before joining
# it is directed to /login. This route takes the data POSTed to it and assigns is to the
# client specific session variable. The session is now indentifiable with that username.
@app.route('/login', methods=['POST'])
def login():
if request.method == "POST":
session['username'] = request.form['username']
username = session['username']
exec(str(username)+"=User('"+str(username)+"')") # this creates a new user class in the name of
# the user POSTed to the server.
print(session['username'])
exec("print("+str(username)+".name)")
GAME['USER_LIST'].append(username) # Adds the name to the list of current users
print(session)
return redirect('/lobby') # redirects the client's page to /lobby
return redirect(url_for('/')) # If nothing is posted to /login, send back to the index.
@app.route('/lobby')
def lobby():
if session['username'] != None: # If the user has logged in, this will render the lobby.html
# webpage.
return render_template('lobby.html')
# This route is required if an image is requested from the server. Since the server can't send
# images or files over HTTP normally it has to encode it into byte data first and send it as a
# string where the browser will automatically decode it at the other end because it knows the
# mimetype to be image.
@app.route('/resources/img/<path:path>')
def img_route(path):
ext = os.path.splitext(path[-1].lower())
if ext == '.jpg' or '.png' or '.gif':
with open('../client/resources/img/'+str(path), 'rb') as bites:
return send_file(
io.BytesIO(bites.read()),
mimetype='image/'+str(ext)
)
# This route is for anything else that hasn't been listed above. E.g Javascript/css files in
# the client folder. It takes the path and if the path exists, it will return what ever is
# at that path location within /client folder. If there is nothing there it returns 404
@app.route('/<path:path>')
def route(path):
if os.path.exists('../client/'+str(path)) == True:
return render_template('/'+str(path))
else:
return "ERROR 404: "+str(path)+" doesn't exist"
#================== APP ROUTES - SOCKETIO ==============================
lock = Lock() # defines multithreading lock
# This socketIO decorator defines what happens when a websocket message is recieved on the
# open websocket channel ('/')
@socketio.on('message')
def handle_message(message):
meta['time'] = str(datetime.datetime.now()) # updates the meta time
meta['serv'] = str(datetime.datetime.now()-start) # updates the meta server time
meta['uuid'] = str(uuid.uuid4()) # gives the meta a Unique ID
to_send = {}
to_send.update(meta)
# The below packages the meta data with the data from the server so it can all be
# sent in one message rather than many different confusing messages that could
# get lost or fall out of time. The multithreading Lock is needed because the
# calculation thread is running seperate and both threads are unable to access
# the same information at the same time, so the calculation thread is very
# quickly locked while the latest data is pulled and then unlocked.
with lock:
to_send.update(resources.data)
send(to_send) # send data via websockets to whoever sent the origional message
# This socketIO decorator defines what happens when a websocket message is recieved on the
# /update namespace channel. This channel is used for user input, so it runs the update
# function in resources.py with the message (new updated data) as the parameter
# so the data can be updated. Again this requires a Thread Lock.
@socketio.on('message', namespace='/update')
def handle_incoming_data(message):
print('Incoming Data: '+str(message))
with lock:
resources.update(player, message)
print(resources.data)
print('Data updated')
# This socketIo decorator defines what happens when a websocket message is recieved
# on the /lobbu channel (from lobby.html). All it needs to do is simply send
# the game information back to the client that requested it
@socketio.on('message', namespace='/lobbu')
def handle_lobby_message(message):
send(GAME)
#================== THREADS ==============================
# This defines the compute thread. This runs seperately from everything else
# in this script so that it can run in real time and wont be interruped by any
# other processes.
def compute(time):
while True:
for i in resources.OBJECTS: # This is for gravity purposes. It runs the calculation against
if (i[1] == 'planet'): resources.run(i[0], player, time) # every object in the game (bar the player)
global resources.data
time.sleep(time) # waits interval before running calculations again.
if __name__ == '__main__':
compute_thread = Thread(target=compute, args=(0.1)) # Here the thread is actually set up with the parameter 0.1 second delay
compute_thread.start() # Starts the thread
socketio.run(app) # Starts the server app
| 9,832 | 2,959 |
# -*- coding: utf-8 -*-
"""CountsDistribution class."""
import json
import logging
from collections.abc import Mapping, MappingView
from types import GeneratorType
import numpy
import ndd.fnsb
from ndd.exceptions import NddError
logger = logging.getLogger(__name__)
def unique(nk, sort=True):
"""Return nk, zk"""
counter = ndd.fnsb.counter
counter.fit(nk)
nk = counter.nk
zk = counter.zk
unique.counter = counter
# always return a copy
if sort:
ids = numpy.argsort(nk)
nk = nk[ids]
zk = zk[ids]
else:
nk = numpy.array(nk)
zk = numpy.array(zk)
return nk, zk
def as_counts_array(counts):
"""Convert input to counts array."""
if isinstance(counts, (Mapping, MappingView)):
return numpy.fromiter(counts.values(), dtype=int)
if isinstance(counts, (GeneratorType, map, filter)):
return numpy.fromiter(counts, dtype=int)
return numpy.asarray(counts)
def check_k(k):
"""
if k is an integer, just check
if an array set k = prod(k)
if None, return
Raises
------
NddError
If k is not valid (wrong type, negative, too large...)
"""
MAX_LOGK = 200 * numpy.log(2)
if k is None:
return k
try:
k = numpy.float64(k)
except ValueError:
raise NddError('%r is not a valid cardinality' % k)
if k.ndim:
# if k is a sequence, set k = prod(k)
if k.ndim > 1:
raise NddError('k must be a scalar or 1D array')
logk = numpy.sum(numpy.log(x) for x in k)
if logk > MAX_LOGK:
# too large a number; backoff to n_bins?
# TODO: log warning
raise NddError('k is too large (%e).'
'Must be < 2^200 ' % numpy.exp(logk))
k = numpy.prod(k)
else:
# if a scalar check size
if k <= 0:
raise NddError('k must be > 0 (%r)' % k)
if numpy.log(k) > MAX_LOGK:
raise NddError('k is too large (%e).' 'Must be < 2^200 ' % k)
if not k.is_integer():
raise NddError('k must be a whole number (got %r).' % k)
return k
class CountsDistribution:
"""
Contains counts data and statistics.
Parameters
----------
nk : array-like
Unique frequencies in a counts array.
zk : array_like, optional
Frequencies distribution or "multiplicities".
Must be len(zk) == len(nk).
k : int or array-like, optional
Alphabet size (the number of bins with non-zero probability).
Must be >= len(nk). A float is a valid input for whole numbers
(e.g. k=1.e3). If an array, set k = numpy.prod(k).
Default: k = sum(nk > 0)
"""
def __init__(self, *, nk=None, zk=None, k=None):
self.nk = None
self.k = None
self.zk = None
self._n = None
self._k1 = None
self.counts = None
if (nk is None) != (zk is None):
raise NddError('nk and zk should be passed together.')
if nk is not None:
self.nk = as_counts_array(nk)
self.zk = as_counts_array(zk)
self._n = numpy.sum(self.zk * self.nk)
self._k1 = numpy.sum(self.zk[self.nk > 0])
if k is not None:
self.k = check_k(k)
def __repr__(self):
return 'CountsDistribution(nk=%r, k=%r, zk=%r)' % (self.nk, self.k,
self.zk)
def __str__(self):
return json.dumps(
{
'nk': [int(x) for x in self.nk],
'k': self.k,
'zk': [int(x) for x in self.zk]
},
indent=4)
def fit(self, counts):
"""Fit nk, zk (multiplicities) from counts array."""
counts = as_counts_array(counts)
self.nk, self.zk = unique(counts)
self._n = numpy.sum(self.zk * self.nk)
self._k1 = numpy.sum(self.zk[self.nk > 0])
return self
@property
def normalized(self):
"""CountsDistribution are normalized."""
if self.nk is None:
return False
return (len(self.nk) == 1 and self.nk[0] == 0
and numpy.isclose(sum(self.nk), 1))
def random(self, k=1000, n=100):
"""Generate random counts and fit multiplicities."""
a = numpy.random.randint(k, size=n)
_, self.counts = numpy.unique(a, return_counts=1)
self.nk, self.zk = numpy.unique(self.counts, return_counts=1)
return self
@staticmethod
def sorted_are_equal(a, b):
"""True if sorted arrays are equal."""
def int_sort(x):
return sorted(x.astype(numpy.int32))
return int_sort(a) == int_sort(b)
def __eq__(self, other):
return (self.sorted_are_equal(self.nk, other.nk)
and self.sorted_are_equal(self.zk, other.zk))
@property
def n(self):
"""Number of samples"""
if self._n is None:
self._n = numpy.sum(self.zk * self.nk)
return self._n
@property
def k1(self):
"""Number of bins with counts > 0."""
if self._k1 is None:
self._k1 = numpy.sum(self.zk[self.nk > 0])
return self._k1
@property
def coincidences(self):
"""Number of coincidences."""
return self.n - self.k1
@property
def sampling_ratio(self):
"""The strongly undersampled regime is defined as ratio < 0.1"""
return self.coincidences / self.n
@property
def multiplicities(self):
"""Return counts and their frequencies as (counts, frequencies)."""
return self.nk, self.zk
| 5,676 | 1,936 |
import aiohttp
import asyncio
import base64
import os
from events import EventIn, EventOut, EventUpdate
from fastapi.logger import logger
from db import events, database
STRIPE_CHARGES_URL = os.getenv("STRIPE_CHARGES_URL")
STRIPE_API_KEY = os.getenv("STRIPE_API_KEY")
async def add_event(payload: EventIn):
logger.debug(f"Service: Adding event with {payload}")
query = events.insert().values(**payload.dict())
return await database.execute(query=query)
async def get_all_events():
logger.debug(f"Service: Getting all events")
query = events.select()
return await database.fetch_all(query=query)
async def get_event(id):
logger.debug(f"Service: Getting event {id}")
query = events.select(events.c.id==id)
return await database.fetch_one(query=query)
async def delete_event(id: int):
logger.debug(f"Service: Deleting event {id}")
query = events.delete().where(events.c.id==id)
return await database.execute(query=query)
async def update_event(id: int, payload: EventIn):
logger.debug(f"Service: Updating event {id} with {payload}")
query = (
events
.update()
.where(events.c.id == id)
.values(**payload.dict())
)
return await database.execute(query=query)
async def add_stripe_payment(payload: PaymentIn):
logger.debug(f"Service: Adding stripe payment with {payload}")
# Convert amount to stripe (implied decimals)
stripeAmount = int(payload.amount * 100)
stripe_payload = {
"amount": stripeAmount,
"currency": payload.currency,
"source": payload.source,
"description": payload.description,
}
authorizationToken = base64.b64encode(f"{STRIPE_API_KEY}:".encode())
headers = {"Authorization": "Basic " + "".join(chr(x) for x in authorizationToken)}
async with aiohttp.ClientSession() as session:
async with session.post(STRIPE_CHARGES_URL, data=stripe_payload, headers=headers) as resp:
return await resp.text()
| 1,999 | 627 |
# coding: utf-8
from fr.hayj.util.number import *;
from fr.hayj.util.duration import *;
import unittest
import re
# The level allow the unit test execution to choose only the top level test
unittestLevel = 1
if unittestLevel <= 1:
class UtilTest(unittest.TestCase):
def test1(self):
self.assertTrue(truncateFloat(0.00002000002, 2) == 0.00)
self.assertTrue(truncateFloat(0.00002000002, 8) == 0.00002)
self.assertTrue(truncateFloat(0.00002000002, 20) == 0.00002000002)
self.assertTrue(truncateFloat(0.02, 8) == 0.02)
self.assertTrue(truncateFloat(0.02, 1) == 0.0)
self.assertTrue(truncateFloat(5e-5, 1) == 0.0)
self.assertTrue(truncateFloat(5e-5, 10) == 0.00005)
| 776 | 335 |
import os
from flask import Flask, send_from_directory, request, jsonify
# Path settings
SERVER_PATH = os.path.abspath(os.path.dirname(__file__))
FTP_PATH = os.path.abspath(os.path.join(SERVER_PATH, 'ftp/'))
# Flask app settings
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.route('/api/getFilesList', methods=['POST', 'GET'])
def api_get_files_list():
files_list = os.listdir(FTP_PATH)
files_list.sort()
return jsonify({"list": files_list})
@app.route('/api/uploadFile', methods=['POST', 'GET'])
def api_upload_file():
file = request.files['file']
file_path = os.path.abspath(os.path.join(FTP_PATH, file.filename))
file.save(file_path)
return jsonify({})
@app.route('/api/downloadFile/<path:filename>', methods=['POST', 'GET'])
def api_download_file(filename):
return send_from_directory(FTP_PATH, filename, as_attachment=True)
if __name__ == "__main__":
if not os.path.exists(FTP_PATH):
os.makedirs(FTP_PATH)
app.run(debug=True)
| 1,006 | 371 |
# This file is empty | 20 | 6 |
class Solution:
def validIPAddress(self, IP: str) -> str:
# if not IP:
# return "Neither"
if len(IP.split('.')) == 4:
ips = IP.split('.')
return self.validIPv4(ips)
elif len(IP.split(':')) == 8:
ips = IP.split(':')
return self.validIPv6(ips)
return "Neither"
def validIPv4(self, ips):
for number in ips:
# no leading zero
if len(number) > 1 and number[0] == '0':
return "Neither"
# 0 to 255
if number.isdigit() and 0 <= int(number) <= 255:
continue
else:
return "Neither"
return "IPv4"
def validIPv6(self, ips):
hexSet = set(['a', 'b', 'c', 'd', 'e', 'f'])
for numbers in ips:
# empty group or too long
if len(numbers) == 0 or len(numbers) > 4:
return "Neither"
for idx, num in enumerate(numbers):
if not num.isdigit() and num.lower() not in hexSet:
print("char not valid")
return "Neither"
return "IPv6"
| 1,240 | 379 |
"""Utilities for calculating directions and distances given coords."""
import math
EARTH_RADIUS = 6.3710088e6
def get_distance(fix1, fix2):
"""Calculates great-circle distance between two positions in meters."""
lat1 = math.radians(fix1.latitude)
lon1 = math.radians(fix1.longitude)
lat2 = math.radians(fix2.latitude)
lon2 = math.radians(fix2.longitude)
angle = (math.pow(math.sin((lat2 - lat1) / 2), 2)
+ math.cos(lat1) * math.cos(lat2)
* math.pow(math.sin((lon2 - lon1) / 2), 2))
unit_distance = 2 * math.atan2(math.sqrt(angle), math.sqrt(1 - angle))
return EARTH_RADIUS * unit_distance
def get_forward_angle(fix1, fix2):
"""Calculates forward azimuth between two positions in radians."""
lat1 = math.radians(fix1.latitude)
lon1 = math.radians(fix1.longitude)
lat2 = math.radians(fix2.latitude)
lon2 = math.radians(fix2.longitude)
y = math.sin(lon2 - lon1) * math.cos(lat2)
x = (math.cos(lat1) * math.sin(lat2)
- math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1))
angle = math.atan2(y, x)
return (angle + 2 * math.pi) % (2 * math.pi)
| 1,158 | 449 |
from iot_api import socketio
def emit_alert_event(event, recipient):
socketio.emit('new_alert', event, room=recipient) | 123 | 42 |
from contextlib import contextmanager
import pickle
import numpy
import pandas
from ..base import Connection
class DataFrameConnection(Connection):
"""Connection to a pandas DataFrame.
This connection is meant when it is not possible to use the file system
or other type of traditional database (e.g. a `Kaggle <http://kaggle.com>`_
scripts) and absolutely not in concurrent processes. In fact, using this
connection in different processes will result in two independent searches
**not** sharing any information.
Args:
from_file: The name of a file containing a pickled data frame
connection.
Using this connection requires small adjustments to the proposed main
script. When the main process finishes, all data will vanish if not
explicitly writen to disk. Thus, instead of doing a single evaluation,
the main process will incorporate a loop calling the search/sample
``next`` method multiple times. Additionally, at the end of the experiment,
either extract the best configuration using :meth:`results_as_dataframe`
or write all the data using :mod:`pickle`.
"""
def __init__(self, from_file=None):
if from_file is not None:
with open(from_file, "rb") as f:
conn = pickle.load(f.read())
if type(conn) != DataFrameConnection:
raise TypeError("Unpickled connection is not of type DataFrameConnection")
self.results = conn.results
self.complementary = conn.complementary
self.space = conn.space
else:
self.results = pandas.DataFrame()
self.complementary = pandas.DataFrame()
self.space = None
@contextmanager
def lock(self, *args, **kwargs):
"""This function does not lock anything. Do not use in concurrent
processes.
"""
yield
def all_results(self):
"""Get a list of all entries of the result table. The order is
undefined.
"""
return list(self.results.T.to_dict().values())
def find_results(self, filter):
"""Get a list of all results associated with *filter*. The order is
undefined.
"""
selection = self.results
for k, v in filter.items():
selection = selection[selection[k] == v]
return list(selection.T.to_dict().values())
def insert_result(self, document):
"""Insert a new *document* in the result data frame. The columns does
not need to be defined nor all present. Any new column will be added
to the database and any missing column will get value None.
"""
self.results = self.results.append(document, ignore_index=True)
def update_result(self, document, value):
"""Update or add *value* of given rows in the result data frame.
Args:
document: An identifier of the rows to update.
value: A mapping of values to update or add.
"""
size = len(self.results.index)
selection = [True] * size
for k, v in document.items():
selection = numpy.logical_and(self.results[k] == v, selection)
for k, v in value.items():
if not k in self.results:
self.results[k] = pandas.Series([None] * size)
self.results.loc[selection, k] = v
def count_results(self):
"""Get the total number of entries in the result table."""
return len(self.results.index)
def all_complementary(self):
"""Get all entries of the complementary information table as a list.
The order is undefined.
"""
return list(self.complementary.T.to_dict().values())
def insert_complementary(self, document):
"""Insert a new document (row) in the complementary information data frame."""
self.complementary = self.complementary.append(document, ignore_index=True)
def find_complementary(self, filter):
"""Find a document (row) from the complementary information data frame."""
selection = self.complementary
for k, v in filter.items():
selection = selection[selection[k] == v]
return list(selection.T.to_dict().values())[0]
def get_space(self):
"""Returns the space used for previous experiments."""
return self.space
def insert_space(self, space):
"""Insert a space in the database.
Raises:
AssertionError: If a space is already present.
"""
assert self.space is None, "Space table cannot contain more than one space, clear table first."
self.space = space
def clear(self):
"""Clear all data."""
self.results = pandas.DataFrame()
self.complementary = pandas.DataFrame()
self.space = None
def pop_id(self, document):
"""Pops the database unique id from the document."""
return document
| 4,968 | 1,268 |
from alphaorm.AlphaORM import AlphaORM,AlphaRecord
AlphaORM.setup('mysql', {
'host' : 'localhost',
'user' : 'root',
'password' : '',
'database' : 'alphaorm'
})
m = AlphaORM.create('python')
m.name = 'Alpha'
m.age = 10
AlphaORM.store(m) | 241 | 97 |
"""Data schema for a player's state."""
import collections
PlayerState = collections.namedtuple(
"PlayerState", [
# Dictionary of Gems held by player.
"gems",
# List of purchased DevelopmentCards.
"purchased_cards",
# List of reserved (non-hidden) DevelopmentCards.
"unhidden_reserved_cards",
# List of reserved (hidden) DevelopmentCards. Note that reserved cards are
# typically hidden when topdecked.
"hidden_reserved_cards",
# NobleTiles obtained.
"noble_tiles",
])
| 523 | 158 |
from configparser import ConfigParser, NoOptionError
from typing import Dict # pylint: disable=unused-import
import os
from sheepdoge.exception import (
SheepdogeConfigurationAlreadyInitializedException,
SheepdogeConfigurationNotInitializedException,
)
DEFAULTS = {
"kennel_playbook_path": "kennel.yml",
"kennel_roles_path": ".kennel_roles",
"pupfile_path": "pupfile.yml",
"vault_password_file": None,
} # type: Dict[str, str]
class Config(object):
"""Config class for which there should only be one instance at anytime.
Additionally, we can only set the config values during initialization.
Multiple different classes can access this single instance at a time.
"""
_config = None # type: Config
def __init__(self, config_dict):
# type: (Dict[str, str]) -> None
self._config_dict = config_dict
@classmethod
def clear_config_singleton(cls):
# type: () -> None
"""Delete the current configuration singleton to allow the
initialization of a new one. This method is predominantly used
during test.
"""
cls._config = None
@classmethod
def get_config_singleton(cls):
# type: () -> Config
"""Return the current config singleton instance. We must initialize
the singleton before calling this method.
:return: The singleton instance.
"""
if cls._config is None:
raise SheepdogeConfigurationNotInitializedException
return cls._config
@classmethod
def initialize_config_singleton(
cls, config_file_contents=None, config_options=None
):
# type: (str, Dict[str, str]) -> None
"""Initialize the config singleton with the proper values. If we
specify no additional values during configuration, then the config
will contain all defaults. We can, in priority order, pass in the
contents of a *.cfg file and a dictionary of options. Typically we
derive this dictionary of options from the command line.
Finally, after setting all of the base configuration values,
we compute additional configuration values which are useful
throughout the program.
:param config_file_contents: The str contents of the .cfg file
containing kennel configuration.
:param config_options: The dict specifying the highest priority
configuration values.
"""
if cls._config is not None:
raise SheepdogeConfigurationAlreadyInitializedException()
config_dict = {} # type: Dict[str, str]
cls._set_config_default_values(config_dict)
if config_file_contents:
cls._set_config_file_values(config_dict, config_file_contents)
if config_options:
cls._set_config_option_values(config_dict, config_options)
cls._set_calculated_config_values(config_dict)
cls._config = cls(config_dict)
@classmethod
def _set_config_default_values(cls, config_dict):
# type: (Dict[str, str]) -> None
"""Set defaults for all views here - they will be overwritten in the
following steps if necessary.
"""
config_dict.update(DEFAULTS)
@classmethod
def _set_config_file_values(cls, config_dict, config_file_contents):
# type: (Dict[str, str], str) -> None
config_parser = ConfigParser()
config_parser.read_string(config_file_contents)
kennel_cfg_section = "kennel"
for currently_defined_key in config_dict.keys():
try:
config_file_value = config_parser.get(
kennel_cfg_section, currently_defined_key
)
config_dict[currently_defined_key] = config_file_value
except NoOptionError:
pass # If the value isn't specified, skip
@classmethod
def _set_config_option_values(cls, config_dict, config_options):
# type: (Dict[str, str], Dict[str, str]) -> None
config_dict.update(config_options)
@classmethod
def _set_calculated_config_values(cls, config_dict):
# type: (Dict[str, str]) -> None
pupfile_path = config_dict["pupfile_path"]
pupfile_dir = os.path.dirname(os.path.realpath(pupfile_path))
kennel_roles_path = config_dict["kennel_roles_path"]
abs_kennel_roles_dir = os.path.realpath(kennel_roles_path)
calculated_config = {
"abs_pupfile_dir": pupfile_dir,
"abs_kennel_roles_dir": abs_kennel_roles_dir,
}
config_dict.update(calculated_config)
def get(self, key):
# type: (str) -> str
"""Retrieve the value for the given configuration key.
:param key: One of the available configuration options.
"""
return self._config_dict[key]
| 4,871 | 1,357 |
from argparse import ArgumentParser
from USTCHelper import config
import json, base64
import os
class ArgumentError(Exception):
def __init__(self, text):
self.text = text
def __str__(self):
return f"ArgumentError: {self.text}"
def ArgParser():
parser = ArgumentParser()
parser.add_argument("--daily", help="run your daily schedule", action='store_true')
parser.add_argument("-s", "--service", help="service to run", metavar="SERVICE", dest="service")
parser.add_argument("--silence", help="run in silence", action='store_true')
parser.add_argument("-u", "--username", help="your student ID", metavar="ID", dest="stuid")
parser.add_argument("--store-password", help="store password in config", action='store_true')
parser.add_argument("--config", help="config for services", metavar="CONF")
return parser
def ArgConflictCheck(args):
if args.daily:
if args.service:
raise ArgumentError("Conflict arguments: --daily, --service")
def ArgInit(args):
if args.config:
config["in-command"]["state"] = True
config["in-command"]["config"] = json.loads(base64.b64decode(args.config.encode()).decode('gbk'))
| 1,202 | 361 |
from .abacus import Abacus
from .draw_pad import DrawPad
from .ledger import Ledger
__all__ = ('Abacus', 'DrawPad', 'Ledger')
| 127 | 48 |
import os
import sys
import argparse
from cuttsum.readers import gold_reader
from cuttsum.summarizers import RankSummarizer
def main():
args = parse_args()
bow_file, lvec_file, sim_file = args[0:3]
dbg_sim_mode, odir, n_return, use_temp, penalty_mode = args[3:]
if sim_file is None:
if dbg_sim_mode == u'max':
sim_idx = 3
elif dbg_sim_mode == u'min':
sim_idx = 4
elif dbg_sim_mode == u'avg':
sim_idx = 5
data_reader = gold_reader(bow_file, lvec_file, sim_idx)
else:
print "IMPLEMENT SIM FILE LOADER"
sys.exit()
ts_system = RankSummarizer(use_temp, vec_dims=100)
ts_system.run(data_reader, odir, n_return, penalty_mode)
print "Run complete!"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bow-file',
help=u'BOW file',
type=unicode, required=True)
parser.add_argument('-l', '--lvec-file',
help=u'latent vector file',
type=unicode, required=True)
parser.add_argument('--debug-sim-mode',
help=u'max, min, or avg',
type=unicode,
default=u'max',
required=False)
parser.add_argument('-s', '--sim-file',
help=u'sim file',
type=unicode, required=False)
parser.add_argument('-o', '--output-dir',
help=u'Location iterative summaries for rouge',
type=unicode, required=True)
parser.add_argument('-n', '--num-return',
help=u'Return top-n updates at each time interval',
type=int, required=True)
parser.add_argument('--temp', dest='use_temp', action='store_true')
parser.add_argument('--no-temp', dest='use_temp', action='store_false')
parser.set_defaults(use_temp=True)
parser.add_argument('-p', '--penalty-mode',
help=u'agg or max',
type=unicode, required=True)
args = parser.parse_args()
bow_file = args.bow_file
lvec_file = args.lvec_file
sim_file = args.sim_file
dbg_sim_mode = args.debug_sim_mode
odir = args.output_dir
n_return = args.num_return
use_temp = args.use_temp
penalty_mode = args.penalty_mode
if use_temp is True and penalty_mode not in [u'agg', u'max']:
import sys
sys.stderr.write(u'Bad --penalty-mode argument: \'agg\' or \'max\'\n')
sys.stderr.flush()
sys.exit()
if odir != '' and not os.path.exists(odir):
os.makedirs(odir)
if dbg_sim_mode not in [u'max', u'min', u'avg']:
sys.stderr.write(u'Bad argument for --debug-sim-mode: ')
sys.stderr.write(u'max, min or avg are legal args\n')
sys.stderr.flush()
sys.exit()
if not os.path.exists(bow_file) or os.path.isdir(bow_file):
sys.stderr.write((u'--bow-file argument {} either does not exist' \
+ u' or is a directory!\n').format(bow_file))
sys.stderr.flush()
sys.exit()
if not os.path.exists(lvec_file) or os.path.isdir(lvec_file):
sys.stderr.write((u'--lvec-file argument {} either does not exist' \
+ u' or is a directory!\n').format(lvec_file))
sys.stderr.flush()
sys.exit()
if sim_file is not None:
if not os.path.exists(sim_file) or os.path.isdir(sim_file):
sys.stderr.write((u'--sim-file argument {} either does not exist' \
+ u' or is a directory!\n').format(sim_file))
sys.stderr.flush()
sys.exit()
return (bow_file, lvec_file, sim_file, dbg_sim_mode, odir, n_return, use_temp, penalty_mode)
if __name__ == '__main__':
main()
| 3,920 | 1,304 |
"""
This file is property of the Ingram Micro Cloud Blue.
Copyright (c) 2019 Ingram Micro. All Rights Reserved.
"""
import json
from requests import api
from connect.logger import logger
from urllib.parse import urlencode, quote_plus
class Client:
@staticmethod
def send_request(verb, uri, config, body=None):
logger.error("REQUEST------------------->")
logger.error('Request: %s %s' % (verb, uri))
logger.debug(body)
options = {'url': uri, 'headers': {'Content-Type': config['Content-Type']}}
if 'bearer' in config:
options['headers']['Authorization'] = 'Bearer ' + config['bearer']
elif 'basic' in config:
options['headers']['Authorization'] = 'Basic ' + config['basic']
if body:
options['data'] = urlencode(body, quote_via=quote_plus) if config[
'Content-Type'] == 'application/x-www-form-urlencoded' else json.dumps(
body)
response = api.request(verb, **options)
if 200 <= response.status_code <= 300:
logger.debug(str(response))
if response.content:
return response.json()
else:
logger.error('Response')
logger.error(str(response))
raise Exception(response.json()['error'])
| 1,378 | 373 |
import argparse
import csv
import re
# read arg (the num of shards, also the num of distributed nodes) from command line
def parse_args():
global ARGS
parser = argparse.ArgumentParser()
parser.add_argument('node_tot', type=int,
help='total number of distributed-nodes/shards in the system')
ARGS = parser.parse_args()
parse_args()
node_tot = ARGS.node_tot
# read clustering result of the graph, and bind them to distributed nodes
# (ensure the total vertex to each node is as equal as possible)
## first, read in total number of vertices and clusters
## from the first line of the input file
fp = open("./results/clustered_venmo_dataset_7024852.txt", "r") # , encoding='utf-8')
firstline_str = fp.readline()
firstline_pattern = re.compile(r"Clustering with (\d+) elements and (\d+) clusters")
firstline_match = firstline_pattern.match(firstline_str)
if not firstline_match:
raise Exception('Failed to identify total vertex number and cluster number in input file')
vertex_tot = eval(firstline_match.group(1))
cluster_tot = eval(firstline_match.group(2))
print('%d vertices, %d clusters, %d shards' % (vertex_tot, cluster_tot, node_tot))
## binding initialization
curline_str = fp.readline()
cur_cluster = -1
expected_node_size = vertex_tot / node_tot
vertex_cluster_no = [-1] * vertex_tot
vertex_node_no = [-1] * vertex_tot
cluster_size = [0] * cluster_tot
cluster_vertices = [[] for i in range(cluster_tot)] # '[[]] * cluster_tot' is wrong, because it is shallow copy
cluster_node_no = [-1] * cluster_tot
## read in each line and count
while curline_str:
if curline_str[0] == '[': # beginning of a new cluster
cur_cluster += 1
curline_str = curline_str[curline_str.find(']')+1:]
# remove the preceding '[ cluster_no]' of the string
strs = curline_str[:-1].split(',') # slice off newline character, and split by comma
for cur_str in strs:
if not cur_str:
continue # ignore empty substrings (in the case the last char is a comma)
# bind vertex to its cluster written in the input file
cur_vertex = eval(cur_str)
vertex_cluster_no[cur_vertex] = cur_cluster
cluster_vertices[cur_cluster].append(cur_vertex)
cluster_size[cur_cluster] += 1
curline_str = fp.readline() # next line
## bind clusters to distributed nodes according to counting result
cur_node = 0
cur_node_size = 0
for cur_cluster in range(cluster_tot):
if cur_node + 1 < node_tot \
and cur_node_size + cluster_size[cur_cluster] / 2 > expected_node_size \
and cur_node_size != 0:
# should bind to the next node, unless this cluster is the first cluster on the current node
cur_node += 1
cur_node_size = 0
cur_node_size += cluster_size[cur_cluster]
cluster_node_no[cur_cluster] = cur_node
for cur_vertex in cluster_vertices[cur_cluster]:
vertex_node_no[cur_vertex] = cur_node
fp.close()
print('-- Binding result:')
node_size = [0] * node_tot
for cur_vertex in range(vertex_tot):
node_size[vertex_node_no[cur_vertex]] += 1
for cur_node in range(node_tot):
print('Shard %d size:' % cur_node, node_size[cur_node])
# read txes and sort by time
def takeThird(elem):
return elem[2]
# This function is used to sort txes; here we
# assume dates can be sorted in string sort manner
fp = open("../venmo_dataset_normalized_shorted.csv", "r") # , encoding='utf-8')
csv_file = csv.reader(fp)
all_tx = []
for row in csv_file:
all_tx.append([int(row[0]), int(row[1]), row[2]])
# all_tx = all_tx[:4000000] # we only used the first 4000000 lines of txes
all_tx.sort(key=takeThird)
fp.close()
# simulate txs, calculate ratio of remote tx
tx_remote_cnt = 0
tx_cnt = 0
for tx in all_tx:
tx_cnt += 1
if vertex_node_no[tx[0]] != vertex_node_no[tx[1]]:
tx_remote_cnt += 1
vertex_node_no[tx[0]] = vertex_node_no[tx[1]]
print('-- Result:')
print('tx total:', tx_cnt)
print('remote:', tx_remote_cnt)
print('remote ratio:', tx_remote_cnt / tx_cnt)
| 4,048 | 1,367 |
"""Ampio Systems Platform."""
import asyncio
import json
import logging
from typing import Any, Dict, Optional
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_DEVICE,
CONF_DEVICE_CLASS,
CONF_FRIENDLY_NAME,
CONF_ICON,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, callback
from homeassistant.helpers import (
config_validation as cv,
device_registry as dr,
event,
template,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_registry import EntityRegistry, async_get_registry
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .client import AmpioAPI, async_setup_discovery
from .const import (
AMPIO_CONNECTED,
AMPIO_DISCOVERY_UPDATED,
AMPIO_MODULE_DISCOVERY_UPDATED,
COMPONENTS,
CONF_BROKER,
CONF_STATE_TOPIC,
CONF_UNIQUE_ID,
DATA_AMPIO,
DATA_AMPIO_API,
DATA_AMPIO_DISPATCHERS,
DATA_AMPIO_PLATFORM_LOADED,
PROTOCOL_311,
SIGNAL_ADD_ENTITIES,
)
from .models import AmpioModuleInfo
_LOGGER = logging.getLogger(__name__)
DOMAIN = "ampio"
VERSION_TOPIC_FROM = "ampio/from/info/version"
VERSION_TOPIC_TO = "ampio/to/info/version"
DISCOVERY_TOPIC_FROM = "ampio/from/can/dev/list"
DISCOVERY_TOPIC_TO = "ampio/to/can/dev/list"
ATTR_DEVICES = "devices"
CONF_KEEPALIVE = "keepalive"
PROTOCOL_31 = "3.1"
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_PROTOCOL = PROTOCOL_311
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
vol.Schema(
{
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All(
vol.Coerce(int), vol.Range(min=15)
),
vol.Optional(CONF_BROKER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.All(
cv.string, vol.In([PROTOCOL_31, PROTOCOL_311])
),
},
),
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Stub to allow setting up this component.
Configuration through YAML is not supported at this time.
"""
return True
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry) -> bool:
"""Set up the Ampio component."""
ampio_data = hass.data.setdefault(DATA_AMPIO, {})
for component in COMPONENTS:
ampio_data.setdefault(component, [])
conf = CONFIG_SCHEMA({DOMAIN: dict(config_entry.data)})[DOMAIN]
ampio_data[DATA_AMPIO_API]: AmpioAPI = AmpioAPI(
hass, config_entry, conf,
)
ampio_data[DATA_AMPIO_DISPATCHERS] = []
ampio_data[DATA_AMPIO_PLATFORM_LOADED] = []
for component in COMPONENTS:
coro = hass.config_entries.async_forward_entry_setup(config_entry, component)
ampio_data[DATA_AMPIO_PLATFORM_LOADED].append(hass.async_create_task(coro))
await ampio_data[DATA_AMPIO_API].async_connect()
async def async_connected():
"""Start discovery on connected."""
await async_setup_discovery(hass, conf, config_entry)
async_dispatcher_connect(hass, AMPIO_CONNECTED, async_connected)
async def async_stop_ampio(_event: Event):
"""Stop MQTT component."""
await ampio_data[DATA_AMPIO_API].async_disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_ampio)
return True
async def async_unload_entry(hass, config_entry):
"""Unload ZHA config entry."""
dispatchers = hass.data[DATA_AMPIO].get(DATA_AMPIO_DISPATCHERS, [])
for unsub_dispatcher in dispatchers:
unsub_dispatcher()
for component in COMPONENTS:
await hass.config_entries.async_forward_entry_unload(config_entry, component)
return True
| 4,363 | 1,562 |
#!/usr/bin/env python
# (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
inventory: nova
short_description: OpenStack external inventory script
description:
- Generates inventory that Ansible can understand by making API request to OpenStack endpoint using the novaclient library.
- |
When run against a specific host, this script returns the following variables:
os_os-ext-sts_task_state
os_addresses
os_links
os_image
os_os-ext-sts_vm_state
os_flavor
os_id
os_rax-bandwidth_bandwidth
os_user_id
os_os-dcf_diskconfig
os_accessipv4
os_accessipv6
os_progress
os_os-ext-sts_power_state
os_metadata
os_status
os_updated
os_hostid
os_name
os_created
os_tenant_id
os__info
os__loaded
where some item can have nested structure.
- All information are set on B(nova.ini) file
version_added: None
options:
version:
description:
- OpenStack version to use.
required: true
default: null
choices: [ "1.1", "2" ]
username:
description:
- Username used to authenticate in OpenStack.
required: true
default: null
api_key:
description:
- Password used to authenticate in OpenStack, can be the ApiKey on some authentication system.
required: true
default: null
auth_url:
description:
- Authentication URL required to generate token.
- To manage RackSpace use I(https://identity.api.rackspacecloud.com/v2.0/)
required: true
default: null
auth_system:
description:
- Authentication system used to login
- To manage RackSpace install B(rackspace-novaclient) and insert I(rackspace)
required: true
default: null
region_name:
description:
- Region name to use in request
- In RackSpace some value can be I(ORD) or I(DWF).
required: true
default: null
project_id:
description:
- Project ID to use in connection
- In RackSpace use OS_TENANT_NAME
required: false
default: null
endpoint_type:
description:
- The endpoint type for novaclient
- In RackSpace use 'publicUrl'
required: false
default: null
service_type:
description:
- The service type you are managing.
- In RackSpace use 'compute'
required: false
default: null
service_name:
description:
- The service name you are managing.
- In RackSpace use 'cloudServersOpenStack'
required: false
default: null
insicure:
description:
- To no check security
required: false
default: false
choices: [ "true", "false" ]
author: Marco Vito Moscaritolo
notes:
- This script assumes Ansible is being executed where the environment variables needed for novaclient have already been set on nova.ini file
- For more details, see U(https://github.com/openstack/python-novaclient)
examples:
- description: List instances
code: nova.py --list
- description: Instance property
code: nova.py --instance INSTANCE_IP
'''
import sys
import re
import os
import ConfigParser
from novaclient import client as nova_client
try:
import json
except:
import simplejson as json
###################################################
# executed with no parameters, return the list of
# all groups and hosts
def nova_load_config_file():
p = ConfigParser.SafeConfigParser()
path1 = os.getcwd() + "/nova.ini"
path2 = os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini"))
path3 = "/etc/ansible/nova.ini"
if os.path.exists(path1):
p.read(path1)
elif os.path.exists(path2):
p.read(path2)
elif os.path.exists(path3):
p.read(path3)
else:
return None
return p
config = nova_load_config_file()
client = nova_client.Client(
version = config.get('openstack', 'version'),
username = config.get('openstack', 'username'),
api_key = config.get('openstack', 'api_key'),
auth_url = config.get('openstack', 'auth_url'),
region_name = config.get('openstack', 'region_name'),
project_id = config.get('openstack', 'project_id'),
auth_system = config.get('openstack', 'auth_system')
)
if len(sys.argv) == 2 and (sys.argv[1] == '--list'):
groups = {}
# Cycle on servers
for f in client.servers.list():
private = [ x['addr'] for x in getattr(f, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'fixed']
public = [ x['addr'] for x in getattr(f, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'floating']
# Define group (or set to empty string)
group = f.metadata['group'] if f.metadata.has_key('group') else 'undefined'
# Create group if not exist
if group not in groups:
groups[group] = []
# Append group to list
if f.accessIPv4:
groups[group].append(f.accessIPv4)
continue
if public:
groups[group].append(''.join(public))
continue
if private:
groups[group].append(''.join(private))
continue
# Return server list
print json.dumps(groups)
sys.exit(0)
#####################################################
# executed with a hostname as a parameter, return the
# variables for that host
elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
results = {}
ips = []
for instance in client.servers.list():
private = [ x['addr'] for x in getattr(instance, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'fixed']
public = [ x['addr'] for x in getattr(instance, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'floating']
ips.append( instance.accessIPv4)
ips.append(''.join(private))
ips.append(''.join(public))
if sys.argv[2] in ips:
for key in vars(instance):
# Extract value
value = getattr(instance, key)
# Generate sanitized key
key = 'os_' + re.sub("[^A-Za-z0-9\-]", "_", key).lower()
# Att value to instance result (exclude manager class)
#TODO: maybe use value.__class__ or similar inside of key_name
if key != 'os_manager':
results[key] = value
print json.dumps(results)
sys.exit(0)
else:
print "usage: --list ..OR.. --host <hostname>"
sys.exit(1)
| 7,096 | 2,184 |
l=[[1,2,3],[2,2,3],[3,3,3]]
print(l)
a=[2,3]
b=[4,5]
print(b) | 61 | 46 |
"""
Representation of a recursive automaton
"""
from typing import AbstractSet
from pyformlang.finite_automaton.finite_automaton import to_symbol
from pyformlang.finite_automaton.symbol import Symbol
from pyformlang.regular_expression import Regex
from pyformlang.cfg import CFG, Epsilon
from pyformlang.rsa.box import Box
def remove_repetition_of_nonterminals_from_productions(grammar_in_text: str):
""" Remove nonterminal repeats on the left side of the rule
For example:
grammar: S -> a S b
S -> a b
grammar after function execution: S -> a S b | a b
"""
productions = dict()
for production in grammar_in_text.splitlines():
if "->" not in production:
continue
head, body = production.split(" -> ")
if head in productions:
productions[head] += " | " + body
else:
productions[head] = body
grammar_new = str()
for nonterminal in productions:
grammar_new += f'{nonterminal} -> {productions[nonterminal]}\n'
return grammar_new[:-1]
class RecursiveAutomaton:
""" Represents a recursive automaton
This class represents a recursive automaton.
Parameters
----------
labels : set of :class:`~pyformlang.finite_automaton.Symbol`, optional
A finite set of labels for boxes
initial_label : :class:`~pyformlang.finite_automaton.Symbol`, optional
A start label for automaton
boxes : set of :class:`~pyformlang.rsa.Box`, optional
A finite set of boxes
"""
def __init__(self,
labels: AbstractSet[Symbol] = None,
initial_label: Symbol = None,
boxes: AbstractSet[Box] = None):
if labels is not None:
labels = {to_symbol(x) for x in labels}
self._labels = labels or set()
if initial_label is not None:
initial_label = to_symbol(initial_label)
if initial_label not in self._labels:
self._labels.add(initial_label)
self._initial_label = initial_label or Symbol("")
self._boxes = dict()
if boxes is not None:
for box in boxes:
self._boxes.update({to_symbol(box.label): box})
self._labels.add(box.label)
for label in self._labels:
box = self.get_box(label)
if box is None:
raise ValueError("RSA must have the same number of labels and DFAs")
def get_box(self, label: Symbol):
""" Box by label """
label = to_symbol(label)
if label in self._boxes:
return self._boxes[label]
return None
def add_box(self, new_box: Box):
""" Set a box
Parameters
-----------
new_box : :class:`~pyformlang.rsa.Box`
The new box
"""
self._boxes.update({new_box.label: new_box})
self._labels.add(to_symbol(new_box.label))
def get_number_of_boxes(self):
""" Size of set of boxes """
return len(self._boxes)
def change_initial_label(self, new_initial_label: Symbol):
""" Set an initial label
Parameters
-----------
new_initial_label : :class:`~pyformlang.finite_automaton.Symbol`
The new initial label
"""
new_initial_label = to_symbol(new_initial_label)
if new_initial_label not in self._labels:
raise ValueError("New initial label not in set of labels for boxes")
@property
def labels(self) -> set:
""" The set of labels """
return self._labels
@property
def boxes(self) -> dict:
""" The set of boxes """
return self._boxes
@property
def initial_label(self) -> Symbol:
""" The initial label """
return self._initial_label
@classmethod
def from_regex(cls, regex: Regex, initial_label: Symbol):
""" Create a recursive automaton from regular expression
Parameters
-----------
regex : :class:`~pyformlang.regular_expression.Regex`
The regular expression
initial_label : :class:`~pyformlang.finite_automaton.Symbol`
The initial label for the recursive automaton
Returns
-----------
rsa : :class:`~pyformlang.rsa.RecursiveAutomaton`
The new recursive automaton built from regular expression
"""
initial_label = to_symbol(initial_label)
box = Box(regex.to_epsilon_nfa().minimize(), initial_label)
return RecursiveAutomaton({initial_label}, initial_label, {box})
@classmethod
def from_cfg(cls, cfg: CFG):
""" Create a recursive automaton from context-free grammar
Parameters
-----------
cfg : :class:`~pyformlang.cfg.CFG`
The context-free grammar
Returns
-----------
rsa : :class:`~pyformlang.rsa.RecursiveAutomaton`
The new recursive automaton built from context-free grammar
"""
initial_label = to_symbol(cfg.start_symbol)
grammar_in_true_format = remove_repetition_of_nonterminals_from_productions(cfg.to_text())
boxes = set()
labels = set()
notation_for_epsilon = Epsilon().to_text()
for production in grammar_in_true_format.splitlines():
head, body = production.split(" -> ")
labels.add(to_symbol(head))
if body == "":
body = notation_for_epsilon
boxes.add(Box(Regex(body).to_epsilon_nfa().minimize(), to_symbol(head)))
return RecursiveAutomaton(labels, initial_label, boxes)
def is_equivalent_to(self, other):
""" Check whether two recursive automata are equivalent
Parameters
----------
other : :class:`~pyformlang.rsa.RecursiveAutomaton`
The input recursive automaton
Returns
----------
are_equivalent : bool
Whether the two recursive automata are equivalent or not
"""
if not isinstance(other, RecursiveAutomaton):
return False
if self._labels != other._labels:
return False
for label in self._labels:
box_1 = self.get_box(label)
box_2 = other.get_box(label)
if not box_1 == box_2:
return False
return True
def __eq__(self, other):
return self.is_equivalent_to(other)
| 6,477 | 1,803 |
from bisect import bisect
import ColladaMaterial
# Named array of points.
class PointArray():
def __init__(self, n, ofs):
self.name = n
self.points = []
self.stride = 1
self.offset = int(ofs)
def GetFirstChildElement(node):
for elem in node.childNodes:
if elem.nodeType == elem.ELEMENT_NODE:
return elem
return None
def GetChildElements(node):
elems = []
for elem in node.childNodes:
if elem.nodeType == elem.ELEMENT_NODE:
elems.append(elem)
return elems
def GetSourceArray(parent, srcId):
for src in parent.getElementsByTagName('source'):
if src.getAttribute('id') == srcId[1:]:
technique = src.getElementsByTagName('technique_common')[0]
accessor = GetFirstChildElement(technique)
sourceURL = accessor.getAttribute('source')
count = int(accessor.getAttribute('count'))
param = GetFirstChildElement(accessor)
paramType = param.getAttribute('type')
for node in GetChildElements(src):
if node.getAttribute('id') == sourceURL[1:]:
data = node.firstChild.data
data = data.strip()
data = data.replace('\n', ' ')
if paramType == 'name':
data = [str(s) for s in data.split(' ')]
elif paramType == 'float':
data = [float(s) for s in data.split(' ')]
elif paramType == 'float4x4':
data = [float(s) for s in data.split(' ')]
return data
return []
def GetChildArray(parent, tag, typecast):
for node in GetChildElements(parent):
if node.tagName == tag:
return [typecast(x) for x in node.firstChild.data.strip().split(' ')]
return []
class Skin:
def __init__(self, skin, numWeights, origPosMap):
self.bindShapeMatrix = GetChildArray(skin, 'bind_shape_matrix', float)
jointURL = ""
for joints in skin.getElementsByTagName('joints'):
for inp in joints.getElementsByTagName('input'):
semantic = inp.getAttribute('semantic')
sourceURL = inp.getAttribute('source')
if semantic == 'JOINT':
self.jointNames = GetSourceArray(skin, sourceURL)
jointURL = sourceURL
elif semantic == 'INV_BIND_MATRIX':
self.invBindMatrices = GetSourceArray(skin, sourceURL)
else:
print('Skipping input with unknown semantic ' + semantic)
for vertexWeights in skin.getElementsByTagName('vertex_weights'):
jointOffset = 0
weightOffset = 0
weightURL = ''
for inp in vertexWeights.getElementsByTagName('input'):
semantic = inp.getAttribute('semantic')
offset = int(inp.getAttribute('offset'))
sourceURL = inp.getAttribute('source')
if semantic == 'JOINT':
if sourceURL != jointURL:
print('TODO: multiple jointURLs specified, need to match up indices.')
jointOffset = offset
elif semantic == 'WEIGHT':
weightURL = sourceURL
weightOffset = offset
else:
print('Skipping input with unknown semantic ' + semantic)
weights = GetSourceArray(skin, weightURL)
vertexCount = GetChildArray(vertexWeights, 'vcount', int)
v = GetChildArray(vertexWeights, 'v', int)
vstride = max(jointOffset, weightOffset) + 1
self.vertexWeightCount = numWeights
self.vertexWeights = []
self.jointIndices = []
index = 0
for vc in vertexCount:
tempWeights = []
tempIndices = []
for c in range(vc):
tempWeights.append(weights[v[(index + c) * vstride + weightOffset]])
tempIndices.append(v[(index + c) * vstride + jointOffset])
temp = zip(tempWeights, tempIndices)
temp.sort()
temp.reverse()
tempWeights = [s[0] for s in temp][:numWeights]
tempIndices = [s[1] for s in temp][:numWeights]
for n in range(len(tempWeights), numWeights):
tempWeights.append(0)
tempIndices.append(0)
weightSum = 0
for n in range(numWeights):
weightSum = weightSum + tempWeights[n]
for n in range(numWeights):
tempWeights[n] = tempWeights[n] / weightSum
self.vertexWeights.extend(tempWeights)
self.jointIndices.extend(tempIndices)
index = index + vc
#Expand vertex weights and joint indices according to origPosMap
newVertexWeights = []
newJointIndices = []
for i in range(len(origPosMap)):
origIndex = int(origPosMap[i])
for j in range(numWeights):
newVertexWeights.append(self.vertexWeights[origIndex * numWeights + j])
newJointIndices.append(self.jointIndices[origIndex * numWeights + j])
self.vertexWeights = newVertexWeights
self.jointIndices = newJointIndices
def Write(self, fileHandle):
fileHandle.write(' "bindShapeMatrix" : ')
fileHandle.write(str(self.bindShapeMatrix) + ",\n")
fileHandle.write(' "jointNames" : ')
fileHandle.write(str(self.jointNames).replace("'", '"') + ",\n")
fileHandle.write(' "invBindMatrices" : ')
fileHandle.write(str(self.invBindMatrices) + ",\n")
fileHandle.write(' "vertexWeights" : ')
fileHandle.write(str(self.vertexWeights) + ",\n")
fileHandle.write(' "jointIndices" : ')
fileHandle.write(str(self.jointIndices) + ",\n")
#TODO: passing in the doc is like begging for trouble, pass in the needed elements instead.
class Mesh:
def __init__(self, doc, node):
self.materialLUT = dict()
instanceMaterials = node.getElementsByTagName('instance_material')
for mat in instanceMaterials:
self.materialLUT[mat.getAttribute('symbol')] = mat.getAttribute('target')
geometry = None
instanceGeometryURL = ''
if node.tagName == 'instance_controller':
instanceControllerURL = node.getAttribute('url')
controllers = doc.getElementsByTagName('controller')
controller = None
for c in controllers:
if c.getAttribute('id') == instanceControllerURL[1:]:
controller = c
break
if c == None:
print("Couldn't find the controller with id '" + instanceControllerURL + "', skipping")
return
skins = c.getElementsByTagName('skin')
if len(skins) != 1:
print("Controller doesn't contain exactly one skin, skipping.")
return
instanceGeometryURL = skins[0].getAttribute('source');
elif node.tagName == 'instance_geometry':
instanceGeometryURL = node.getAttribute('url')
if len(instanceGeometryURL) != 0:
if instanceGeometryURL[0] != '#':
print('Geometry URL pointing outside of this document, skipping.')
return
geometries = doc.getElementsByTagName('geometry')
geometry = None
for g in geometries:
if g.getAttribute('id') == instanceGeometryURL[1:]:
geometry = g
break
if geometry == None:
print("Couldn't find the geometry with id '" + instanceGeometryURL + "', skipping")
return
self.faces = []
self.materials = []
self.verts = []
geometryId = geometry.getAttribute("id")
self.uniqueVerts = dict()
self.sourceArrays = dict()
self.outFileName = geometry.getAttribute('name') + '.json'
self.skin = None
self.origPosMap = dict()
self.skinNode = None
# Check if there's a skin node for this mesh.
for controller in doc.getElementsByTagName("controller"):
if self.skinNode != None:
break
controllerId = controller.getAttribute("id")
for skin in controller.getElementsByTagName("skin"):
if skin.getAttribute("source")[1:] == geometryId:
self.skinNode = skin
break
for mesh in geometry.getElementsByTagName("mesh"):
# TODO: This assumes there's only one <mesh> per <geometry>, check spec.
# Only export normals and uv's if they're required by the material.
self.needsNormals = False
self.needsUV = False
# Get all the triangles and polygons in the mesh.
polygons = mesh.getElementsByTagName("polygons")
triangles = mesh.getElementsByTagName("triangles")
for tri in triangles:
polygons.append(tri)
# Get all the materials in the mesh.
self.BuildMaterials(doc, polygons)
# Create a list of all the sources
sourceList = self.BuildSourceList(mesh, polygons)
# Look up the source and pull the data.
for srcItem in sourceList:
sourceURL = srcItem[0]
offset = srcItem[1]
targetAttr = srcItem[2]
foundSource = False
for source in mesh.getElementsByTagName('source'):
if source.getAttribute('id') == sourceURL[1:]:
foundSource = True
if not self.sourceArrays.has_key(targetAttr):
self.sourceArrays[targetAttr] = []
self.GetSrcArray(source, targetAttr, offset)
break
if not foundSource:
print("Couldn't find matching source.")
break
# Get unique indices.
for polygon in polygons:
for p in polygon.getElementsByTagName("p"):
face = p.firstChild.data.strip().split(' ');
stride = len(face) / (int(polygon.getAttribute("count")) * 3)
for i in range(0, len(face), stride):
posArr = self.sourceArrays["vertexPositions"][0]
fIndex = int(face[i+posArr.offset])
px = posArr.points[fIndex*3]
py = posArr.points[fIndex*3+1]
pz = posArr.points[fIndex*3+2]
vert = (px,py,pz)
if self.needsNormals:
for nc in range(0, len(self.sourceArrays["vertexNormals"])):
normArr = self.sourceArrays["vertexNormals"][nc]
fIndex = int(face[i+normArr.offset])
nx = normArr.points[fIndex*3]
ny = normArr.points[fIndex*3+1]
nz = normArr.points[fIndex*3+2]
vert = vert + (nx,ny,nz)
if self.needsUV:
for tn in range(0, len(self.sourceArrays["vertexTextureCoords"])):
texArr = self.sourceArrays["vertexTextureCoords"][tn]
fIndex = int(face[i+texArr.offset])
u = texArr.points[fIndex*2]
v = texArr.points[fIndex*2+1]
vert = vert + (u,v)
index = self.GetUniqueVertexIndex(vert)
self.origPosMap[index] = face[i]
self.faces.append(index)
self.vertArrays = [None]*len(self.uniqueVerts)
for v in self.uniqueVerts.iteritems():
self.vertArrays[v[1]] = v[0]
offs = 0
arr = PointArray("vertexPositions", 0)
for v in self.vertArrays:
arr.points.append(v[offs+0])
arr.points.append(v[offs+1])
arr.points.append(v[offs+2])
self.verts.append([arr])
offs += 3
if (self.needsNormals):
self.verts.append([])
for nc in range(0, len(self.sourceArrays["vertexNormals"])):
arr = PointArray("vertexNormals", 3)
for v in self.vertArrays:
arr.points.append(v[offs+0])
arr.points.append(v[offs+1])
arr.points.append(v[offs+2])
self.verts[-1].append(arr)
offs += 3
if (self.needsUV):
self.verts.append([])
for tn in range(0, len(self.sourceArrays["vertexTextureCoords"])):
arr = PointArray("vertexTextureCoords", offs)
for v in self.vertArrays:
arr.points.append(v[offs+0])
arr.points.append(v[offs+1])
self.verts[-1].append(arr)
offs += 2
# If there's a skin node set, create the skin.
if self.skinNode:
self.skin = Skin(self.skinNode, 4, self.origPosMap)
def BuildSourceList(self, mesh, polygons):
# Build a list of (sourceURL, offset, targetAttr) tuples to extract.
srcArray = []
for polygon in polygons:
for input in polygon.getElementsByTagName("input"):
semantic = input.getAttribute("semantic")
offset = input.getAttribute('offset')
sourceURL = input.getAttribute('source')
targetAttr = 'vertexPositions'
if semantic == 'NORMAL':
targetAttr = 'vertexNormals'
if not self.needsNormals:
continue
elif semantic == 'TEXCOORD':
targetAttr = 'vertexTextureCoords'
if not self.needsUV:
continue
# There's an extra level of indirection for vertex semantics.
if semantic == 'VERTEX':
for vertex in mesh.getElementsByTagName('vertices'):
for input in vertex.getElementsByTagName('input'):
sourceURL = input.getAttribute('source')
semantic = input.getAttribute('semantic')
if semantic == 'NORMAL':
if not self.needsNormals:
continue
targetAttr = 'vertexNormals'
elif semantic == 'POSITION':
targetAttr = 'vertexPositions'
if [sourceURL, offset, targetAttr] not in srcArray:
srcArray.append([sourceURL, offset, targetAttr])
else:
if [sourceURL, offset, targetAttr] not in srcArray:
srcArray.append([sourceURL, offset, targetAttr])
return srcArray
def BuildMaterials(self, doc, polygons):
# Get all the materials in the mesh.
gcount = 0
for polygon in polygons:
materialSymbol = polygon.getAttribute('material')
if materialSymbol == '':
continue
materialURL = self.materialLUT[materialSymbol]
material = None
for mat in doc.getElementsByTagName('material'):
if mat.getAttribute('id') == materialURL[1:]:
material = mat;
break
if material == None:
print("Couldn't find material '" + materialURL + "'.")
instanceEffects = material.getElementsByTagName('instance_effect')
if len(instanceEffects) == 0:
print('No instance effects')
effectURL = instanceEffects[0].getAttribute('url')
if effectURL[0] != '#':
print('Effect URL points outside document.')
for fx in doc.getElementsByTagName("effect"):
fxId = fx.getAttribute('id')
if fxId == effectURL[1:]:
mat = ColladaMaterial.Material(fx, doc, self.skinNode != None)
mat.count = gcount
self.materials.append(mat)
if mat.materialType != "matte":
self.needsNormals = True
if mat.HasTextureChannel():
self.needsUV = True
break
gcount += int(polygon.getAttribute("count")) * 3
def WriteToScene(self, fileHandle, indent, outFolder):
for i in range(indent):
fileHandle.write(' ')
fileHandle.write('{ "type" : "mesh", "file" : "' + outFolder + '/' + self.outFileName + '" }')
# Write the mesh as a JSON file.
def Write(self, outFolder):
print('Writing mesh ' + outFolder + '/' + self.outFileName)
fileHandle = open(outFolder + '/' + self.outFileName, 'w')
fileHandle.write('{\n')
fileHandle.write(' "materials" : \n [\n')
for m in range(len(self.materials)):
self.materials[m].Write(outFolder)
fileHandle.write(' { "file" : "' + outFolder + '/' + self.materials[m].name + '.json", "start" : ' + str(self.materials[m].count) + ' }')
if m != len(self.materials) - 1:
fileHandle.write(',')
fileHandle.write('\n')
fileHandle.write(' ],\n')
fileHandle.write(' "indices" : ')
fileHandle.write(str(self.faces))
fileHandle.write(',\n')
if self.skin != None:
self.skin.Write(fileHandle)
for pa in range(len(self.verts)):
fileHandle.write(' "' + self.verts[pa][0].name + '" : [')
for vsi in range(len(self.verts[pa])):
fileHandle.write(str(self.verts[pa][vsi].points).replace("'", ""))
if vsi != len(self.verts[pa]) - 1:
fileHandle.write(', ')
fileHandle.write(']')
if pa != len(self.verts) - 1:
fileHandle.write(',')
fileHandle.write('\n')
fileHandle.write('\n}')
fileHandle.close()
def GetUniqueVertexIndex(self, a):
if a not in self.uniqueVerts:
self.uniqueVerts[a] = len(self.uniqueVerts)
return self.uniqueVerts[a];
# Pull a float_array from the Collada format and store it as a PointArray in the mesh.
def GetSrcArray(self, source, dstName, offset):
the_array = source.getElementsByTagName("float_array")[0];
arr = the_array.firstChild.data
arr = arr.strip();
arr = arr.replace('\n', ' ')
newArray = PointArray(dstName, offset)
newArray.points = [str(s) for s in arr.split(' ')]
s = source.getElementsByTagName("accessor")[0].getAttribute("stride")
if s == "":
newArray.stride = 1
else:
newArray.stride = int(s)
#print("Got source " + newArray.name + ", count " + str(len(newArray.points)) + ", stride " + str(newArray.stride) + ", offset " + str(newArray.offset))
self.sourceArrays[dstName].append(newArray)
| 17,080 | 5,163 |
import doctest
import unittest
from zope.site.folder import Folder
from zope.site.testing import siteSetUp, siteTearDown, checker
from zope.site.tests.test_site import TestSiteManagerContainer
def setUp(test=None):
siteSetUp()
def tearDown(test=None):
siteTearDown()
class FolderTest(TestSiteManagerContainer):
def makeTestObject(self):
return Folder()
class TestRootFolder(unittest.TestCase):
def test_IRoot_before_IContainer_rootFolder(self):
from zope.site.folder import rootFolder
from zope.interface import providedBy
from zope.location.interfaces import IRoot
from zope.container.interfaces import IContainer
folder = rootFolder()
provides = list(providedBy(folder).flattened())
iroot = provides.index(IRoot)
container = provides.index(IContainer)
self.assertLess(iroot, container)
def test_IRoot_before_IContainer_IRootFolder(self):
from zope.site.interfaces import IRootFolder
from zope.location.interfaces import IRoot
from zope.container.interfaces import IContainer
provides = list(IRootFolder.__iro__)
iroot = provides.index(IRoot)
container = provides.index(IContainer)
self.assertLess(iroot, container)
def test_suite():
flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
return unittest.TestSuite((
unittest.defaultTestLoader.loadTestsFromName(__name__),
doctest.DocTestSuite('zope.site.folder',
setUp=setUp, tearDown=tearDown),
doctest.DocFileSuite("folder.txt",
setUp=setUp, tearDown=tearDown,
checker=checker, optionflags=flags),
))
| 1,757 | 530 |
# -*- coding: utf-8 -*-
from collections import deque, namedtuple
from functools import lru_cache
import funcy as fn
from lenses import lens, bind
import stl
def flatten_binary(phi, op, dropT, shortT):
def f(x):
return x.args if isinstance(x, op) else [x]
args = [arg for arg in phi.args if arg is not dropT]
if any(arg is shortT for arg in args):
return shortT
elif not args:
return dropT
elif len(args) == 1:
return args[0]
else:
return op(tuple(fn.mapcat(f, phi.args)))
class AST(object):
__slots__ = ()
def __or__(self, other):
return flatten_binary(Or((self, other)), Or, BOT, TOP)
def __and__(self, other):
return flatten_binary(And((self, other)), And, TOP, BOT)
def __invert__(self):
if isinstance(self, Neg):
return self.arg
return Neg(self)
def __rshift__(self, t):
if self in (BOT, TOP):
return self
phi = self
for _ in range(t):
phi = Next(phi)
return phi
def __call__(self, trace, time=0):
return stl.pointwise_sat(self)(trace, time)
@property
def children(self):
return tuple()
def walk(self):
"""Walk of the AST."""
pop = deque.pop
children = deque([self])
while len(children) > 0:
node = pop(children)
yield node
children.extend(node.children)
@property
def params(self):
def get_params(leaf):
if isinstance(leaf, ModalOp):
if isinstance(leaf.interval[0], Param):
yield leaf.interval[0]
if isinstance(leaf.interval[1], Param):
yield leaf.interval[1]
elif isinstance(leaf, LinEq):
if isinstance(leaf.const, Param):
yield leaf.const
return set(fn.mapcat(get_params, self.walk()))
def set_params(self, val):
phi = param_lens(self)
return phi.modify(lambda x: float(val.get(x, val.get(str(x), x))))
@property
def lineqs(self):
return set(lineq_lens.collect()(self))
@property
def atomic_predicates(self):
return set(AP_lens.collect()(self))
@property
def var_names(self):
symbols = set(bind(self.lineqs).Each().terms.Each().collect())
symbols |= self.atomic_predicates
return set(bind(symbols).Each().id.collect())
def inline_context(self, context):
phi, phi2 = self, None
def update(ap):
return context.get(ap, ap)
while phi2 != phi:
phi2, phi = phi, AP_lens.modify(update)(phi)
return phi
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
class _Top(AST):
__slots__ = ()
def __repr__(self):
return "⊤"
def __invert__(self):
return BOT
class _Bot(AST):
__slots__ = ()
def __repr__(self):
return "⊥"
def __invert__(self):
return TOP
TOP = _Top()
BOT = _Bot()
class AtomicPred(namedtuple("AP", ["id"]), AST):
__slots__ = ()
def __repr__(self):
return f"{self.id}"
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
@property
def children(self):
return tuple()
class LinEq(namedtuple("LinEquality", ["terms", "op", "const"]), AST):
__slots__ = ()
def __repr__(self):
return " + ".join(map(str, self.terms)) + f" {self.op} {self.const}"
@property
def children(self):
return tuple()
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
class Var(namedtuple("Var", ["coeff", "id"])):
__slots__ = ()
def __repr__(self):
if self.coeff == -1:
coeff_str = "-"
elif self.coeff == +1:
coeff_str = ""
else:
coeff_str = f"{self.coeff}"
return f"{coeff_str}{self.id}"
class Interval(namedtuple('I', ['lower', 'upper'])):
__slots__ = ()
def __repr__(self):
return f"[{self.lower},{self.upper}]"
class NaryOpSTL(namedtuple('NaryOp', ['args']), AST):
__slots__ = ()
OP = "?"
def __repr__(self):
return f" {self.OP} ".join(f"({x})" for x in self.args)
@property
def children(self):
return tuple(self.args)
class Or(NaryOpSTL):
__slots__ = ()
OP = "∨"
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
class And(NaryOpSTL):
__slots__ = ()
OP = "∧"
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
class ModalOp(namedtuple('ModalOp', ['interval', 'arg']), AST):
__slots__ = ()
OP = '?'
def __repr__(self):
return f"{self.OP}{self.interval}({self.arg})"
@property
def children(self):
return (self.arg,)
class F(ModalOp):
__slots__ = ()
OP = "◇"
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
class G(ModalOp):
__slots__ = ()
OP = "□"
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
class Until(namedtuple('ModalOp', ['arg1', 'arg2']), AST):
__slots__ = ()
def __repr__(self):
return f"({self.arg1}) U ({self.arg2})"
@property
def children(self):
return (self.arg1, self.arg2)
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
class Neg(namedtuple('Neg', ['arg']), AST):
__slots__ = ()
def __repr__(self):
return f"¬({self.arg})"
@property
def children(self):
return (self.arg,)
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
class Next(namedtuple('Next', ['arg']), AST):
__slots__ = ()
def __repr__(self):
return f"◯({self.arg})"
@property
def children(self):
return (self.arg,)
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
class Param(namedtuple('Param', ['name']), AST):
__slots__ = ()
def __repr__(self):
return self.name
def __hash__(self):
# TODO: compute hash based on contents
return hash(repr(self))
@lru_cache()
def param_lens(phi, *, getter=False):
return bind(phi).Recur(Param)
def type_pred(*args):
ast_types = set(args)
return lambda x: type(x) in ast_types
lineq_lens = lens.Recur(LinEq)
AP_lens = lens.Recur(AtomicPred)
| 6,712 | 2,298 |
# coding: utf-8
from __future__ import unicode_literals
import logging
from spacy.kb import KnowledgeBase
from train_descriptions import EntityEncoder
import wiki_io as io
logger = logging.getLogger(__name__)
def create_kb(
nlp,
max_entities_per_alias,
min_entity_freq,
min_occ,
entity_def_path,
entity_descr_path,
entity_alias_path,
entity_freq_path,
prior_prob_path,
entity_vector_length,
):
# Create the knowledge base from Wikidata entries
kb = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=entity_vector_length)
entity_list, filtered_title_to_id = _define_entities(nlp, kb, entity_def_path, entity_descr_path, min_entity_freq, entity_freq_path, entity_vector_length)
_define_aliases(kb, entity_alias_path, entity_list, filtered_title_to_id, max_entities_per_alias, min_occ, prior_prob_path)
return kb
def _define_entities(nlp, kb, entity_def_path, entity_descr_path, min_entity_freq, entity_freq_path, entity_vector_length):
# read the mappings from file
title_to_id = io.read_title_to_id(entity_def_path)
id_to_descr = io.read_id_to_descr(entity_descr_path)
# check the length of the nlp vectors
if "vectors" in nlp.meta and nlp.vocab.vectors.size:
input_dim = nlp.vocab.vectors_length
logger.info("Loaded pretrained vectors of size %s" % input_dim)
else:
raise ValueError(
"The `nlp` object should have access to pretrained word vectors, "
" cf. https://spacy.io/usage/models#languages."
)
logger.info("Filtering entities with fewer than {} mentions".format(min_entity_freq))
entity_frequencies = io.read_entity_to_count(entity_freq_path)
# filter the entities for in the KB by frequency, because there's just too much data (8M entities) otherwise
filtered_title_to_id, entity_list, description_list, frequency_list = get_filtered_entities(
title_to_id,
id_to_descr,
entity_frequencies,
min_entity_freq
)
logger.info("Kept {} entities from the set of {}".format(len(description_list), len(title_to_id.keys())))
logger.info("Training entity encoder")
encoder = EntityEncoder(nlp, input_dim, entity_vector_length)
encoder.train(description_list=description_list, to_print=True)
logger.info("Getting entity embeddings")
embeddings = encoder.apply_encoder(description_list)
logger.info("Adding {} entities".format(len(entity_list)))
kb.set_entities(
entity_list=entity_list, freq_list=frequency_list, vector_list=embeddings
)
return entity_list, filtered_title_to_id
def _define_aliases(kb, entity_alias_path, entity_list, filtered_title_to_id, max_entities_per_alias, min_occ, prior_prob_path):
logger.info("Adding aliases from Wikipedia and Wikidata")
_add_aliases(
kb,
entity_list=entity_list,
title_to_id=filtered_title_to_id,
max_entities_per_alias=max_entities_per_alias,
min_occ=min_occ,
prior_prob_path=prior_prob_path,
)
def get_filtered_entities(title_to_id, id_to_descr, entity_frequencies,
min_entity_freq: int = 10):
filtered_title_to_id = dict()
entity_list = []
description_list = []
frequency_list = []
for title, entity in title_to_id.items():
freq = entity_frequencies.get(title, 0)
desc = id_to_descr.get(entity, None)
if desc and freq > min_entity_freq:
entity_list.append(entity)
description_list.append(desc)
frequency_list.append(freq)
filtered_title_to_id[title] = entity
return filtered_title_to_id, entity_list, description_list, frequency_list
def _add_aliases(kb, entity_list, title_to_id, max_entities_per_alias, min_occ, prior_prob_path):
wp_titles = title_to_id.keys()
# adding aliases with prior probabilities
# we can read this file sequentially, it's sorted by alias, and then by count
logger.info("Adding WP aliases")
with prior_prob_path.open("r", encoding="utf8") as prior_file:
# skip header
prior_file.readline()
line = prior_file.readline()
previous_alias = None
total_count = 0
counts = []
entities = []
while line:
splits = line.replace("\n", "").split(sep="|")
new_alias = splits[0]
count = int(splits[1])
entity = splits[2]
if new_alias != previous_alias and previous_alias:
# done reading the previous alias --> output
if len(entities) > 0:
selected_entities = []
prior_probs = []
for ent_count, ent_string in zip(counts, entities):
if ent_string in wp_titles:
wd_id = title_to_id[ent_string]
p_entity_givenalias = ent_count / total_count
selected_entities.append(wd_id)
prior_probs.append(p_entity_givenalias)
if selected_entities:
try:
kb.add_alias(
alias=previous_alias,
entities=selected_entities,
probabilities=prior_probs,
)
except ValueError as e:
logger.error(e)
total_count = 0
counts = []
entities = []
total_count += count
if len(entities) < max_entities_per_alias and count >= min_occ:
counts.append(count)
entities.append(entity)
previous_alias = new_alias
line = prior_file.readline()
def read_kb(nlp, kb_file):
kb = KnowledgeBase(vocab=nlp.vocab)
kb.load_bulk(kb_file)
return kb
| 5,958 | 1,864 |
from collections import deque, defaultdict
GRAY, BLACK = 0, 1
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list) # dictionary containing adjacency List
self.V = vertices # No. of vertices
def addEdge(self, u, v):
''' Function to add an edge to graph '''
self.graph[u].append(v)
def topological(self):
order, enter, state = deque(), set(self.graph), {}
def dfs(node):
state[node] = False
for k in self.graph.get(node, ()):
sk = state.get(k, None)
if not sk:
print("No valid ordering exists.")
return
else:
continue
enter.discard(k)
dfs(k)
order.appendleft(node)
state[node] = True
while enter:
dfs(enter.pop())
return order
if __name__ == "__main__":
g = Graph(3)
g.addEdge(1, 2)
g.addEdge(1, 3)
print(g.topological())
| 1,045 | 313 |
import threaded_printer
class Printer(threaded_printer.Printer):
def __init__(self, profile, usb_info):
threaded_printer.Printer.__init__(self, profile, usb_info)
| 178 | 63 |
# -*- coding: utf-8 -*-
import os
from flask import Flask, request, abort, jsonify
import requests
SEND_API = 'https://graph.facebook.com/v2.6/me/messages'
WEBHOOK_VERIFY_TOKEN = os.environ['WEBHOOK_VERIFY_TOKEN']
PAGE_ACCESS_TOKEN = os.environ['PAGE_ACCESS_TOKEN']
APP_SECRET = os.environ['APP_SECRET']
app = Flask(__name__)
@app.route('/webhook', methods=['GET', 'POST'])
def webhook():
if request.method == 'GET': # GET for webhook verification
return verify_webhook()
# POST for message events
assert request.is_json
data = request.get_json()
if data['object'] == 'page':
for entry in data['entry']:
for event in entry['messaging']:
timestamp = event['timestamp']
sender_id = event['sender']['id']
recipient_id = event['recipient']['id']
if 'message' in event:
on_message_event(timestamp, sender_id, event['message'])
elif 'postback' in event:
on_postback_event(timestamp, sender_id, event['postback'])
else:
abort(400) # Unknown event
else:
abort(400) # Bad Request
return ''
def verify_webhook(mode, verify_token):
query_params = request.args
hub_mode = query_params.get('hub.mode')
hub_verify_token = query_params.get('hub.verify_token')
if hub_verify_token != WEBHOOK_VERIFY_TOKEN:
abort(403) # Forbidden
elif hub_mode == 'subscribe':
return query_params['hub.challenge']
def on_message_event(timestamp, sender_id, message):
if 'text' in message:
handle_text_message(sender_id, message['text'])
elif 'attachments' in message:
pass
else:
abort(400)
def on_postback_event(timestamp, sender_id, postback):
payload = postback['payload']
send_text(sender_id, 'Thanks for selecting %s' % payload)
def send_text(recipient_id, text):
send_message(recipient_id, {'text': text})
def send_message(recipient_id, message):
params = {'access_token': PAGE_ACCESS_TOKEN}
data = {
'recipient': {
'id': recipient_id
},
'message': message,
}
resp = requests.post(SEND_API, params=params, json=data)
app.logger.info('Message posted: message = %s, response = %s', data, resp.json())
def handle_text_message(sender_id, text):
if u'吃什麼' in text:
send_text(sender_id, u'Judy 爸爸說:不知道')
elif text == 'generic':
send_message(sender_id, demo_generic_template(sender_id, text))
else:
send_text(sender_id, text)
def demo_generic_template(sender_id, message):
return {
'attachment': {
'type': 'template', # structured message
'payload': {
'template_type': 'generic',
'elements': [
{
'title': 'rift',
'subtitle': 'Next-generation virtual reality',
'item_url': 'https://www.oculus.com/en-us/rift/',
'image_url': 'http://messengerdemo.parseapp.com/img/rift.png',
'buttons': [
{
'type': 'web_url',
'url': 'https://www.oculus.com/en-us/rift/',
'title': 'Open Web URL',
},
{
'type': 'postback',
'title': 'Call Postback',
'payload': 'Payload for first bubble'
}
],
},
{
'title': 'touch',
'subtitle': 'Your Hands, Now in VR',
'item_url': 'https://www.oculus.com/en-us/touch/',
'image_url': 'http://messengerdemo.parseapp.com/img/touch.png',
'buttons': [
{
'type': 'web_url',
'url': 'https://www.oculus.com/en-us/touch/',
'title': 'Open Web URL',
},
{
'type': 'postback',
'title': 'Call Postback',
'payload': 'Payload for second bubble'
}
],
},
],
}
}
}
if __name__ == '__main__':
app.run()
| 4,661 | 1,350 |
"""Config flow for Midea Dehumidifier (Local) integration."""
from __future__ import annotations
import ipaddress
import logging
from typing import Any, Final
from homeassistant import data_entry_flow
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import (
CONF_API_VERSION,
CONF_DEVICES,
CONF_ID,
CONF_IP_ADDRESS,
CONF_NAME,
CONF_PASSWORD,
CONF_TOKEN,
CONF_TYPE,
CONF_UNIQUE_ID,
CONF_USERNAME,
)
import voluptuous as vol
from midea_beautiful.appliance import AirConditionerAppliance, DehumidifierAppliance
from midea_beautiful.cloud import MideaCloud
from midea_beautiful.exceptions import (
AuthenticationError,
CloudAuthenticationError,
CloudError,
MideaError,
MideaNetworkError,
ProtocolError,
RetryLaterError,
)
from midea_beautiful.lan import LanDevice
from midea_beautiful.midea import DEFAULT_APP_ID, DEFAULT_APPKEY, SUPPORTED_APPS
from custom_components.midea_dehumidifier_lan import MideaClient
from .const import ( # pylint: disable=unused-import
CONF_ADVANCED_SETTINGS,
CONF_APPID,
CONF_APPKEY,
CONF_DETECT_AC_APPLIANCES,
CONF_MOBILE_APP,
CONF_BROADCAST_ADDRESS,
CONF_TOKEN_KEY,
CONF_USE_CLOUD,
CONF_WHAT_TO_DO,
CURRENT_CONFIG_VERSION,
DEFAULT_APP,
DEFAULT_PASSWORD,
DEFAULT_USERNAME,
DOMAIN,
IGNORED_IP_ADDRESS,
TAG_CAUSE,
TAG_ID,
TAG_NAME,
)
_LOGGER = logging.getLogger(__name__)
IGNORE = "IGNORE"
USE_CLOUD = "CLOUD"
LAN = "LAN"
def _unreachable_appliance_schema(
name: str,
):
return vol.Schema(
{
vol.Optional(CONF_WHAT_TO_DO, default=LAN): vol.In(
{
IGNORE: "Ignore appliance",
LAN: "Provide appliance's IPv4 address",
USE_CLOUD: "Use cloud API to poll devices",
}
),
vol.Optional(
CONF_IP_ADDRESS,
description={"suggested_value": IGNORED_IP_ADDRESS},
): str,
vol.Optional(CONF_NAME, default=name): str,
vol.Optional(CONF_TOKEN): str,
vol.Optional(CONF_TOKEN_KEY): str,
}
)
# pylint: disable=too-many-arguments
def _advanced_settings_schema(
username: str,
password: str,
appkey: str,
appid: int,
broadcast_address: str,
use_cloud: bool,
):
return vol.Schema(
{
vol.Required(CONF_USERNAME, default=username): str,
vol.Required(CONF_PASSWORD, default=password): str,
vol.Required(CONF_APPKEY, default=appkey): str,
vol.Required(CONF_APPID, default=appid): int,
vol.Optional(CONF_BROADCAST_ADDRESS, default=broadcast_address): str,
vol.Required(CONF_USE_CLOUD, default=use_cloud): bool,
vol.Required(CONF_DETECT_AC_APPLIANCES, default=False): bool,
}
)
def _reauth_schema(
password: str,
appkey: str,
appid: int,
):
return vol.Schema(
{
vol.Required(CONF_PASSWORD, default=password): str,
vol.Required(CONF_APPKEY, default=appkey): str,
vol.Required(CONF_APPID, default=appid): int,
}
)
def _user_schema(username: str, password: str, app: str):
return vol.Schema(
{
vol.Required(CONF_USERNAME, default=username): str,
vol.Required(CONF_PASSWORD, default=password): str,
vol.Optional(CONF_MOBILE_APP, default=app): vol.In(SUPPORTED_APPS.keys()),
vol.Required(CONF_ADVANCED_SETTINGS, default=False): bool,
}
)
class _FlowException(Exception):
def __init__(self, message, cause: str = None) -> None:
super().__init__()
self.message = message
self.cause = cause
# pylint: disable=too-many-instance-attributes
class MideaLocalConfigFlow(ConfigFlow, domain=DOMAIN):
"""
Configuration flow for Midea dehumidifiers on local network uses discovery based on
Midea cloud, so it first requires credentials for it.
If some appliances are registered in the cloud, but not discovered, configuration
flow will prompt for additional information.
"""
VERSION = CURRENT_CONFIG_VERSION
cloud: MideaCloud | None = None # type: ignore
appliance_idx = -1
appliances: list[LanDevice] = []
devices_conf: list[dict] = []
conf = {}
advanced_settings = False
client: Final = MideaClient()
error_cause: str = ""
errors: dict = {}
def _supported_appliance(self, appliance: LanDevice) -> bool:
"""Checks if appliance is supported by integration"""
aircon = False
if self.conf.get(CONF_DETECT_AC_APPLIANCES, False):
aircon = AirConditionerAppliance.supported(appliance.type)
return aircon or DehumidifierAppliance.supported(appliance.type)
def _validate_appliance(self, appliance: LanDevice, conf: dict):
"""
Validates that appliance configuration is correct and matches physical
device
"""
assert self.cloud
use_cloud = conf.get(CONF_USE_CLOUD, False)
if appliance.address == IGNORED_IP_ADDRESS or (
appliance.address is None and not use_cloud
):
_LOGGER.debug("Ignored appliance with id=%s", appliance.appliance_id)
return
try:
if use_cloud:
discovered = self.client.appliance_state(
cloud=self.cloud,
use_cloud=use_cloud,
appliance_id=appliance.appliance_id,
)
else:
try:
ipaddress.IPv4Address(appliance.address)
except Exception as ex:
raise _FlowException(
"invalid_ip_address", appliance.address
) from ex
discovered = self.client.appliance_state(
address=appliance.address,
cloud=self.cloud,
)
except ProtocolError as ex:
raise _FlowException("connection_error", str(ex)) from ex
except AuthenticationError as ex:
raise _FlowException("invalid_auth", str(ex)) from ex
except MideaNetworkError as ex:
raise _FlowException("cannot_connect", str(ex)) from ex
except MideaError as ex:
raise _FlowException("not_discovered", str(ex)) from ex
if discovered is None:
raise _FlowException("not_discovered", appliance.address)
appliance.update(discovered)
def _connect_and_discover(self: MideaLocalConfigFlow):
"""Validates that cloud credentials are valid and discovers local appliances"""
cloud = self.client.connect_to_cloud(
account=self.conf[CONF_USERNAME],
password=self.conf[CONF_PASSWORD],
appkey=self.conf[CONF_APPKEY],
appid=self.conf[CONF_APPID],
)
addresses = self.conf.get(CONF_BROADCAST_ADDRESS, [])
if isinstance(addresses, str):
addresses = [addresses]
if appliances := self.client.find_appliances(cloud, addresses=addresses):
self.devices_conf = [{} for _ in appliances]
else:
self.devices_conf = []
self.appliances = appliances
self.cloud = cloud
async def _validate_discovery_phase(self, user_input: dict[str, Any] | None):
assert user_input is not None
if self.advanced_settings:
assert self.conf is not None
self.conf[CONF_APPKEY] = user_input[CONF_APPKEY]
self.conf[CONF_APPID] = user_input[CONF_APPID]
if address := user_input.get(CONF_BROADCAST_ADDRESS):
try:
ipaddress.IPv4Address(address)
except Exception as ex:
raise _FlowException("invalid_ip_address", address) from ex
self.conf[CONF_BROADCAST_ADDRESS] = address
self.conf[CONF_USE_CLOUD] = user_input[CONF_USE_CLOUD]
self.conf[CONF_DETECT_AC_APPLIANCES] = user_input[CONF_DETECT_AC_APPLIANCES]
else:
self.conf = user_input
self.conf[CONF_USE_CLOUD] = False
self.conf[CONF_DETECT_AC_APPLIANCES] = False
app = user_input.get(CONF_MOBILE_APP, DEFAULT_APP)
self.conf.update(SUPPORTED_APPS.get(app, SUPPORTED_APPS[DEFAULT_APP]))
if user_input.get(CONF_ADVANCED_SETTINGS):
return await self.async_step_advanced_settings()
self.appliance_idx = -1
await self.hass.async_add_executor_job(self._connect_and_discover)
if self.conf[CONF_USE_CLOUD]:
for i, appliance in enumerate(self.appliances):
self.devices_conf[i][CONF_USE_CLOUD] = True
else:
for i, appliance in enumerate(self.appliances):
if self._supported_appliance(appliance):
if not appliance.address:
self.appliance_idx = i
break
if self.appliance_idx >= 0:
return await self.async_step_unreachable_appliance()
return await self._async_add_entry()
def _process_exception(self, ex: Exception):
if isinstance(ex, _FlowException):
self.error_cause = str(ex.cause)
self.errors["base"] = ex.message
elif isinstance(ex, CloudAuthenticationError):
self.error_cause = f"{ex.error_code} - {ex.message}"
self.errors["base"] = "invalid_auth"
elif isinstance(ex, CloudError):
self.error_cause = f"{ex.error_code} - {ex.message}"
self.errors["base"] = "midea_client"
elif isinstance(ex, RetryLaterError):
self.error_cause = f"{ex.error_code} - {ex.message}"
self.errors["base"] = "retry_later"
elif isinstance(ex, MideaError):
self.error_cause = f"{ex.message}"
self.errors["base"] = "midea_client"
else:
raise ex
async def _do_validate(self, user_input: dict[str, Any]):
try:
return await self._validate_discovery_phase(user_input)
except Exception as ex: # pylint: disable=broad-except
self._process_exception(ex)
return None
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> data_entry_flow.FlowResult:
self.advanced_settings = False
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
self.errors = {}
self.error_cause = ""
username = DEFAULT_USERNAME
password = DEFAULT_PASSWORD
app = DEFAULT_APP
if user_input is not None:
username = user_input.get(CONF_USERNAME, username)
password = user_input.get(CONF_PASSWORD, password)
app = user_input.get(CONF_MOBILE_APP, app)
res = await self._do_validate(user_input)
if res:
return res
return self.async_show_form(
step_id="user",
data_schema=_user_schema(username=username, password=password, app=app),
description_placeholders=self._placeholders(),
errors=self.errors,
)
async def async_step_advanced_settings(
self, user_input: dict[str, Any] | None = None
):
"""Step for managing advanced settings"""
self.errors = {}
self.error_cause = ""
self.advanced_settings = True
if user_input is not None:
res = await self._do_validate(user_input)
if res:
return res
else:
user_input = {}
username = user_input.get(
CONF_USERNAME, self.conf.get(CONF_USERNAME, DEFAULT_USERNAME)
)
password = user_input.get(
CONF_PASSWORD, self.conf.get(CONF_PASSWORD, DEFAULT_PASSWORD)
)
appkey = user_input.get(CONF_APPKEY, DEFAULT_APPKEY)
appid = user_input.get(CONF_APPID, DEFAULT_APP_ID)
broadcast_address = user_input.get(
CONF_BROADCAST_ADDRESS, self.conf.get(CONF_BROADCAST_ADDRESS, "")
)
use_cloud = user_input.get(CONF_USE_CLOUD, self.conf.get(CONF_USE_CLOUD, False))
return self.async_show_form(
step_id="advanced_settings",
data_schema=_advanced_settings_schema(
username=username,
password=password,
appkey=appkey,
appid=appid,
broadcast_address=broadcast_address,
use_cloud=use_cloud,
),
description_placeholders=self._placeholders(),
errors=self.errors,
)
async def async_step_unreachable_appliance(
self, user_input: dict[str, Any] | None = None
):
"""Manage the appliances that were not discovered automatically on LAN."""
errors: dict = {}
self.error_cause = ""
appliance = self.appliances[self.appliance_idx]
device_conf = self.devices_conf[self.appliance_idx]
if user_input is not None:
what_to_do = user_input.get(CONF_WHAT_TO_DO, LAN)
appliance.address = (
user_input.get(CONF_IP_ADDRESS, IGNORED_IP_ADDRESS)
if what_to_do == LAN
else IGNORED_IP_ADDRESS
)
appliance.name = user_input.get(CONF_NAME, appliance.name)
appliance.token = user_input.get(CONF_TOKEN, "")
appliance.key = user_input.get(CONF_TOKEN_KEY, "")
device_conf[CONF_USE_CLOUD] = what_to_do == USE_CLOUD
try:
await self.hass.async_add_executor_job(
self._validate_appliance,
appliance,
device_conf,
)
# Find next unreachable appliance
self.appliance_idx = self.appliance_idx + 1
while self.appliance_idx < len(self.appliances):
if self._supported_appliance(appliance):
if self.appliances[self.appliance_idx].address is None:
return await self.async_step_unreachable_appliance()
self.appliance_idx = self.appliance_idx + 1
# If no unreachable appliances, create entry
if self.appliance_idx >= len(self.appliances):
return await self._async_add_entry()
appliance = self.appliances[self.appliance_idx]
except _FlowException as ex:
self.error_cause = str(ex.cause)
errors["base"] = ex.message
name = appliance.name
return self.async_show_form(
step_id="unreachable_appliance",
data_schema=_unreachable_appliance_schema(name),
description_placeholders=self._placeholders(appliance=appliance),
errors=errors,
)
def _placeholders(self, appliance: LanDevice = None):
placeholders = {
TAG_CAUSE: self.error_cause or "",
}
if appliance:
placeholders[TAG_ID] = appliance.unique_id
placeholders[TAG_NAME] = appliance.name
return placeholders
async def _async_add_entry(self):
assert self.conf is not None
for i, appliance in enumerate(self.appliances):
if not self._supported_appliance(appliance):
continue
if self.devices_conf[i].get(CONF_USE_CLOUD, False) or (
appliance.address and appliance.address != IGNORED_IP_ADDRESS
):
self.devices_conf[i].update(
{
CONF_IP_ADDRESS: appliance.address,
CONF_UNIQUE_ID: appliance.unique_id,
CONF_ID: appliance.appliance_id,
CONF_NAME: appliance.name,
CONF_TYPE: appliance.type,
CONF_TOKEN: appliance.token,
CONF_TOKEN_KEY: appliance.key,
CONF_API_VERSION: appliance.version,
}
)
self.conf[CONF_DEVICES] = self.devices_conf
existing_entry = await self.async_set_unique_id(self.conf[CONF_USERNAME])
if existing_entry:
self.hass.config_entries.async_update_entry(
entry=existing_entry,
data=self.conf,
)
# Reload the config entry otherwise devices will remain unavailable
self.hass.async_create_task(
self.hass.config_entries.async_reload(existing_entry.entry_id)
)
return self.async_abort(reason="reauth_successful")
if len(self.devices_conf) == 0:
return self.async_abort(reason="no_configured_devices")
return self.async_create_entry(
title="Midea Dehumidifiers",
data=self.conf,
)
async def async_step_reauth(self, config):
"""Handle reauthorization request from Abode."""
self.conf = {**config}
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input: dict[str, Any] | None = None):
"""Handle reauthorization flow."""
self.errors = {}
username = self.conf.get(CONF_USERNAME, DEFAULT_USERNAME)
password = ""
appkey = self.conf.get(CONF_APPKEY, DEFAULT_APPKEY)
appid = self.conf.get(CONF_APPID, DEFAULT_APP_ID)
if user_input is not None:
password = user_input.get(CONF_PASSWORD, "")
appkey = user_input.get(CONF_APPKEY, DEFAULT_APPKEY)
appid = user_input.get(CONF_APPID, DEFAULT_APP_ID)
try:
self.client.connect_to_cloud(
account=username,
password=password,
appkey=appkey,
appid=appid,
)
except Exception as ex: # pylint: disable=broad-except
self._process_exception(ex)
else:
self.conf[CONF_USERNAME] = username
self.conf[CONF_PASSWORD] = password
self.conf[CONF_APPKEY] = appkey
self.conf[CONF_APPID] = appid
return await self._async_add_entry()
return self.async_show_form(
step_id="reauth_confirm",
data_schema=_reauth_schema(
password=password,
appkey=appkey,
appid=appid,
),
description_placeholders=self._placeholders(),
errors=self.errors,
)
| 18,847 | 5,667 |
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
#######################################################################################################################
# DESCRIPTION:
#######################################################################################################################
# Solver for N-Queens using Minisat
#######################################################################################################################
# AUTHORS:
#######################################################################################################################
# Carlos Serrada, 13-11347, <cserradag96@gmail.com>
# Juan Ortiz, 13-11021 <ortiz.juan14@gmail.com>
#######################################################################################################################
# PATH:
#######################################################################################################################
from sys import path # System path
from os import getcwd # Current path
from os.path import join # Join paths
# Add custom lib path to application path
path.append(join(getcwd(), "lib"))
#######################################################################################################################
# DEPENDENCIES:
#######################################################################################################################
import sys
from nqueens import *
#######################################################################################################################
# MAIN:
#######################################################################################################################
if __name__ == "__main__":
size = sys.argv[1]
printStatus("Generando CNF")
puzzle = NQueens(readNQ(size))
printStatus("Guardando CNF")
writeFile(puzzle.cnf, "input.txt")
printStatus("Ejecutando minisat")
minisat("input.txt", "output.txt")
printStatus("Generando imagen")
writeFile(puzzle.genBitmap("output.txt"), namePBM(size))
#######################################################################################################################
# :)
#######################################################################################################################
| 2,297 | 471 |
"""
Django settings for codango project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from __future__ import absolute_import
import os
import cloudinary
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
from django.contrib.messages import constants as message_constants
from celery.schedules import crontab
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
BOWER_INSTALLED_APPS = (
'mdi',
'moment',
'jquery',
'bootstrap',
'ace-builds',
)
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'static')
# context processor for django-endless-pagination
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
ENDLESS_PAGINATION_LOADING = """<img src="/static/img/ajax-loader.gif" alt="loading"/>"""
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'account',
'resources',
'userprofile',
'comments',
'votes',
'bootstrapform',
'cloudinary',
'djangobower',
'endless_pagination',
'djcelery'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'codango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKEND = (
'django.contrib.auth.backends.ModelBackend',
)
WSGI_APPLICATION = 'codango.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Africa/Lagos'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_URL = '/static/'
APPEND_SLASH = False
STATIC_ROOT = 'staticfiles'
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
cloudinary.config(
cloud_name=os.getenv('cloud_name'),
api_key=os.getenv('api_key'),
api_secret=os.getenv('api_secret')
)
# custom message tag for django messaging middleware
MESSAGE_TAGS = {
message_constants.ERROR: 'danger'
}
# Custom Email
ADMIN_EMAIL = 'olufunmilade.oshodi@andela.com'
CODANGO_EMAIL = 'noreply@codango.com'
# Celery configuration
# The backend used to store task results using RabbitMQ as a broker
# This sends results back as AMQP messages
CELERY_RESULT_BACKEND = 'amqp'
# Scheduling periodic task with Celery
CELERYBEAT_SCHEDULE = {
# Executes every sunday midnight
'popular-post-updates': {
'task': 'resources.tasks.send_recent_posts',
'schedule': crontab(),
'args': (ADMIN_EMAIL,),
},
}
# Celery Test Runner for unit tests
TEST_RUNNER = 'djcelery.contrib.test_runner.CeleryTestSuiteRunner'
| 4,793 | 1,659 |
#
# PySNMP MIB module DGS1100-24P-MGMT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DGS1100-24P-MGMT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:30:19 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint")
dgs1100_24P, = mibBuilder.importSymbols("DGS1100PRIMGMT-MIB", "dgs1100-24P")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, ObjectIdentity, MibIdentifier, NotificationType, IpAddress, TimeTicks, Unsigned32, Gauge32, Counter32, Bits, Counter64, Integer32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "ObjectIdentity", "MibIdentifier", "NotificationType", "IpAddress", "TimeTicks", "Unsigned32", "Gauge32", "Counter32", "Bits", "Counter64", "Integer32", "iso")
TextualConvention, DisplayString, MacAddress, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "MacAddress", "RowStatus")
swL2MgmtMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 134, 9, 9))
if mibBuilder.loadTexts: swL2MgmtMIB.setLastUpdated('201404260000Z')
if mibBuilder.loadTexts: swL2MgmtMIB.setOrganization('D-Link Corp.')
class PortList(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 127)
class VlanIndex(Unsigned32):
pass
class VlanId(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 4094)
mibBuilder.exportSymbols("DGS1100-24P-MGMT-MIB", VlanId=VlanId, swL2MgmtMIB=swL2MgmtMIB, VlanIndex=VlanIndex, PYSNMP_MODULE_ID=swL2MgmtMIB, PortList=PortList)
| 2,343 | 950 |
from .metrics import _get_score_metric | 38 | 12 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools,os
with open("README.md", "r") as fh:
long_description = fh.read()
thelibFolder = os.path.dirname(os.path.realpath(__file__))
requirementPath = thelibFolder + '/requirements.txt'
install_requires = [] # Examples: ["gunicorn", "docutils>=0.3", "lxml==0.5a7"]
if os.path.isfile(requirementPath):
with open(requirementPath) as f:
install_requires = f.read().splitlines()
setuptools.setup(
name="wappalyze",
version="1.6",
author="Shaddy Garg",
author_email="shaddygarg1@gmail.com",
description="Framework Identifier tool",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/shaddygarg/framework-identifier",
packages=setuptools.find_packages(),
package_dir={'wappalyze': 'wappalyze'},
package_data={'wappalyze': ['apps.json']},
install_requires=install_requires,
scripts=['wapp'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 1,136 | 388 |
import math
import sqlite3
import re
DATABASE_PATH = "line1.db"
class Convert:
model_altium = {"desc": 0, "designator": (1, 10), "position": (2, 3), "rotation": 9}
model_kicad = {"desc": 0, "designator": (1, 2), "position": (3, 4), "rotation": 5}
model_mnt = {"desc": 0, "designator": (4, 5), "position": (1, 2), "rotation": 3}
model_ultiboard = {"desc": 0, "designator": (1, 7), "position": (2, 3), "rotation": 4}
def __init__(self, path: str):
self.path = path
def parse(self) -> list:
"""Parse data from pick and place file"""
# TODO Work only for 4 types of file
# TODO Read already generated file
with open(self.path, "r") as file:
lines = file.readlines()
lines = [line.replace("\n", "") for line in lines] # Remove new line
if lines[0].split() == ['Designator', 'Footprint', 'Mid', 'X', 'Mid', 'Y',
'Ref', 'X', 'Ref', 'Y', 'Pad', 'X', 'Pad', 'Y', 'TB', 'Rotation', 'Comment']:
file_model = self.model_altium
lines = [[s.strip() for s in line.split(" ") if s] for line in lines]
lines = lines[2:] # Remove header
elif lines[0] == "Ref,Val,Package,PosX,PosY,Rot,Side\n":
file_model = self.model_kicad
lines = [line.replace('"', "") for line in lines]
lines = [line.split(",") for line in lines]
lines = lines[1:] # Remove header
elif lines[0] == "Ultiboard Information Export File":
file_model = self.model_ultiboard
lines = lines[9:] # Remove header
lines = [line.split() for line in lines]
elif self.path[-3:] == "mnt":
file_model = self.model_mnt
lines = [line.replace("-", " ") for line in lines]
lines = [line.split() for line in lines]
else:
raise ValueError("Unknown file model")
data = list()
for line in lines:
if len(line) > 0:
d = dict()
d["desc"] = "".join(re.findall("[a-zA-Z]", line[file_model["desc"]]))
digit = re.findall(r"\d+", line[file_model["desc"]])
if len(digit) > 0:
d["desc"] += digit[0].zfill(3)
d["designator"] = f"{line[int(file_model['designator'][0])]} {line[int(file_model['designator'][1])]}"
d["position"] = [float(re.findall(r"\d+\.\d+|\d+", line[i])[0]) for i in file_model["position"]]
d["rotation"] = float(line[file_model["rotation"]])
data.append(d)
return data
@staticmethod
def search(designator: str) -> list:
"""Look in database"""
matches = re.split("[_ :]", designator)
conn = sqlite3.connect(DATABASE_PATH)
cur = conn.cursor()
dat = cur.execute("SELECT * FROM chip_lib;").fetchall()
# chip_name = [d[2] for d in dat] # Select chip_name
for matche in matches:
temp = list()
for d in dat:
if matche.upper() in d[2].upper():
temp.append(d)
if len(temp) > 0:
dat = temp
return dat
@staticmethod
def panel_dimensions(points: list) -> tuple:
"""Get panel dimensions"""
x = [p[0] for p in points]
y = [p[1] for p in points]
x_min, y_min = min(x), min(y)
x_max, y_max = max(x), max(y)
return round(abs(x_max - x_min), 3), round(abs(y_max - y_min), 3)
@staticmethod
def rotate(origin: tuple, points: list, angle: int) -> list:
"""Rotate a list of points clockwise by a given angle around a given origin"""
new_pos = list()
angle = math.radians(-angle)
for p in points:
ox, oy = origin
px, py = p
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
new_pos.append([qx, qy])
return new_pos
| 4,089 | 1,369 |
from logging import Logger
import paho.mqtt.client as mqtt
from ircodec.command import CommandSet
import os
from exceptions import CommandNotFound, \
CommandFileAccess
class Device():
# Constants
STATUS_TOPIC = 'status'
CMD_TOPIC = 'command'
RESULT_TOPIC = 'result'
ONLINE_MSG = 'ONLINE'
OFFLINE_MSG = 'OFFLINE'
SUCCESS_MSG = 'done'
ERROR_MSG = 'unsupported'
def __init__(self, logger, appConfig, devConfig, isNew=False):
"""
Constructor.
Params:
logger: The logger.
appConfig: The application configuration.
devConfig: The device configuration.
isNew: The flag indicating if the device is a new one,
or an existing commande set exists.
"""
self.config = devConfig
self.logger = logger.getLogger(f"{devConfig['location']}."
f"{devConfig['name']}")
if isNew:
self.logger.info('Creating new device')
name = self.config['commandSet']['model']
emitter = self.config['commandSet']['emitterGpio']
receiver = self.config['commandSet']['receiverGpio']
description = self.config['commandSet']['description']
self.commandSet = CommandSet(name, emitter_gpio=emitter,
receiver_gpio=receiver,
description=description)
else:
self.logger.info('Loading existing device')
manufacturer = self.config['commandSet']['manufacturer']
model = self.config['commandSet']['model']
try:
self.commandSet = CommandSet.load(os.path.join('./commandSets',
manufacturer, f"{model}."
f"json"))
except Exception:
raise CommandFileAccess('unable to access the command file.')
self.baseTopic = f"{self.config['topicPrefix']}/{self.config['location']}/{self.config['name']}/" # noqa: E501
self._initMqttClient(appConfig.getUserName(),
appConfig.getUserPassword(),
appConfig.getBrokerHostname(),
appConfig.getBrokerPort())
def _initMqttClient(self, userName, userPassword,
brokerIp, brokerPort):
"""
Initialize the MQTT client.
Params:
userName: The user name for connecting to the broker.
userPassword: The user password for connecting to the broker.
brokerHostname: The broker hostname.
brokerPort: The broker port.
"""
self.client = mqtt.Client(client_id=f"{self.config['location']}."
f"{self.config['name']}")
self.client.on_connect = self._on_connect
self.client.on_disconnect = self._on_disconnect
self.client.on_message = self._on_message
self.client.on_publish = self._on_publish
self.client.on_subscribe = self._on_subscribe
self.client.on_log = self._on_log
willTopic = self.baseTopic + self.STATUS_TOPIC
self.client.will_set(willTopic, self.OFFLINE_MSG,
self.config['lastWill']['qos'],
self.config['lastWill']['retain'])
self.client.username_pw_set(userName, userPassword)
# TODO: Implement switch for secure or not.
# self.client.tls_set()
# self.client.tls_insecure_set(True)
self.logger.info(f"Connecting to {brokerIp}:{brokerPort}")
self.logger.debug(f"Connecting as {userName} with password "
f"{userPassword}")
self.client.connect(brokerIp, port=brokerPort)
def _publishCmdResult(self, success):
"""
Publish a command result.
Params:
success: The flag indicating to send success
or fail result.
"""
resultTopic = self.baseTopic + self.RESULT_TOPIC
if success:
self.logger.info('Command sent')
self.client.publish(resultTopic, payload=self.SUCCESS_MSG)
else:
self.logger.warning('Command unsupported')
self.client.publish(resultTopic, payload=self.ERROR_MSG)
def _on_connect(self, client, usrData, flags, rc):
"""
The on connect callback.
Params:
client: The mqtt client.
usrData: User data.
flags: The connection flags.
rc: The connection result.
"""
self.logger.info('Connected')
self.logger.debug(f"rc {rc}")
statusTopic = self.baseTopic + self.STATUS_TOPIC
self.client.publish(statusTopic, payload=self.ONLINE_MSG,
qos=1, retain=True)
cmdTopic = self.baseTopic + self.CMD_TOPIC
self.client.subscribe(cmdTopic)
def _on_disconnect(self, client, usrData, rc):
"""
The on disconnect callback.
Params:
client: The mqtt client.
usrData: User data.
flags: The connection flags.
rc: The connection result.
"""
self.logger.info('Disconnected')
self.logger.debug(f"rc {rc}")
def _on_message(self, client, usrData, msg):
"""
The on message callback.
Params:
client: The mqtt client.
usrData: User data.
msg: The message data.
"""
reuslt = True
receivedMsg = msg.payload.decode('utf-8')
self.logger.info(f"Message recieved {receivedMsg}")
try:
for i in range(0, 4):
self.logger.debug(f"Sending packet #{i}")
gap = self.config['commandSet']['packetGap']
self.commandSet.emit(receivedMsg, emit_gap=gap)
except KeyError as e:
self.logger.warning(str(e))
reuslt = False
self._publishCmdResult(reuslt)
def _on_publish(self, client, usrData, mid):
"""
The on publish callback.
Params:
client: The mqtt client.
usrData: User data.
mid: The message ID that have been published.
"""
self.logger.info('Message published')
self.logger.debug(f"mid {mid}")
def _on_subscribe(self, client, usrData, mid, grantedQoS):
"""
The on subscribe callback.
Params:
client: The mqtt client.
usrData: User data.
mid: The message ID that have been published.
grantedQoS: The granted QoS for the subcription.
"""
self.logger.info(f"Subscibed with QoS {grantedQoS}")
self.logger.debug(f"mid {mid}")
def _on_log(self, client, usrData, logLevel, logMsg):
"""
The on log callback.
Params:
client: The mqtt client.
usrData: User data.
logLevel: The level of the log message.
logMsg: The log message.
"""
switcher = {
mqtt.MQTT_LOG_INFO: self.logger.info,
mqtt.MQTT_LOG_NOTICE: self.logger.info,
mqtt.MQTT_LOG_WARNING: self.logger.warning,
mqtt.MQTT_LOG_ERR: self.logger.error,
mqtt.MQTT_LOG_DEBUG: self.logger.debug,
}
switcher[logLevel](logMsg)
def startLoop(self):
"""
Start the network loop.
"""
self.client.loop_start()
def stopLoop(self):
"""
Stop the network loop.
"""
self.client.loop_stop()
self.client.disconnect()
def getName(self):
"""
Get the device name.
Return:
The device name.
"""
return self.config['name']
def getLocation(self):
"""
Get the device location,
Return:
The device location.
"""
return self.config['location']
def getConfig(self):
"""
Get the device configuration.
Return:
The device configuration.
"""
self.logger.debug('Getting device config')
return self.config
def setConfig(self, config):
"""
Set the device configuration.
Params:
config: The device configuration.
"""
self.logger.debug(f"Setting device config to {config}")
self.config = config
def getCommandList(self):
"""
Get the device command list.
Return:
The device command list.
"""
self.logger.debug('Getting command list')
cmdSetJson = self.commandSet.to_json()
return cmdSetJson['commands'].keys()
def addCommand(self, command, description):
""""
Add a command to the device.
Params:
command: The command name.
description: The command description.
"""
self.logger.debug(f"Adding command {command} to command set")
self.commandSet.add(command, description=description)
def deleteCommand(self, command):
"""
Delete a command from the device.
Params:
command: The command name.
Raise:
CommandNotFound if the requested command is not supported.
"""
self.logger.debug(f"Deleting command {command} from command set")
try:
self.commandSet.remove(command)
except KeyError:
raise CommandNotFound(command)
def saveCommandSet(self):
"""
Save the device command set.
Raise:
CommandFileAccess if the save operation fail.
"""
try:
self.commandSet.save_as(os.path.join('./commandSets',
self.config['commandSet']['manufacturer'],
f"{self.config['commandSet']['model']}"
f".json"))
except Exception:
raise CommandFileAccess('unable to access the command file.')
| 10,490 | 2,823 |
inventory_var = "from_inventory"
hosts = ["host1"]
| 52 | 21 |
import os
import datetime
from fitparse import *
import pandas as pd
import numpy as np
from tqdm import tqdm
from datetime import datetime
import re
# import matplotlib.pyplot as plt (not needed I guess)
directory = 'fitfiles' # may want to make this more flexible--ie: not just in the directory of the code...works for now tho and not bad.
#Note^: may need to rename, perhaps make easy tkinter interface for picking a local directory or have part of the program move the fit files to the project directory automatically
# if files need renaming INTEGRATE IN THE FUTURE!!!! ESPECIALLY WITH FULL GARMIN->OUTPUT WORKFLOW AUTO!!!
def fitfile_decapitalization():
rename_dict = {'FIT': 'fit'}
for filename in os.listdir(directory):
base_file, ext = os.path.splitext(filename)
ext = ext.replace('.','')
if ext in rename_dict:
new_ext = rename_dict[ext]
new_file = base_file + '.' + new_ext
old_path = os.path.join(directory, filename)
new_path = os.path.join(directory, new_file)
os.rename(old_path, new_path)
fitfile_decapitalization()
# HRSS Calc--PERSONAL INFO--REQUIRED FOR CALCULATIONS TO BE ACCURATE
lthr = 191.0 # heart rate(bpm) at lactate threshold
my_maxhr = 212 # max heart rate(bpm)
my_rhr = 50 # resting heart rate(bpm)
my_sex = "MALE"
eulersNum = 2.7182818 # duh
if my_sex == "MALE":
my_baseconstant = .64
my_yvalue = 1.92
else:
my_yvalue = 1.67
my_baseconstant = .86
# component calcs of the multi-part exponential HRSS equation:
my_hrrAtLT = ((lthr - my_rhr) / (my_maxhr - my_rhr))
sixtyatLTHR_SS = 60 * my_hrrAtLT * my_baseconstant * (
eulersNum ** (my_yvalue * my_hrrAtLT)) # aka "N" in relevant equations
N_ova_hundy = sixtyatLTHR_SS / 100
hundy_ova_N = 100 / sixtyatLTHR_SS
def load_workout(workout_file):
"""
Load fitfile and transforms
it into a pandas Dataframe.
Nan Values are replaced.
"""
fitfile = FitFile(workout_file)
# This is an ugly hack to avoid timing issues
while True:
try:
fitfile.messages
break
except KeyError:
continue
# Get all data messages that are of type "record"
workout = []
for record in fitfile.get_messages('record'):
r = {}
# Go through all the data entries in this record
for record_data in record:
r[record_data.name] = record_data.value
# add the record(s) to the workout file
workout.append(r)
# not used, don't remember why, but im not touching it.
"""workout_df = pd.DataFrame(workout)
workout_df.fillna(method='ffill', inplace=True)
workout_df.fillna(method='backfill', inplace=True)"""
# save as a df (specifically a numpy array)
workout = np.array(workout)
return workout
def get_date(workout_df):
# pass the workout df, returns the date
workout_date = workout_df['timestamp'][0].date()
return workout_date
def gett_date(string):
# splits a date that is input for future timestamp parsing
split = []
for i in re.split("-|T|:| ", string)[:-1]:
if (i[0] == '0'):
i = i[1:]
split.append(eval(i))
date = datetime(split[0], split[1], split[2], split[3], split[4], split[5])
return date
def difference_between_dates(date1, date2):
# parses timestamps (which are still stored as date data (haha)) for changes in time between recordings
secs = (date2 - date1).seconds
mins = (secs / 60)
return round(mins, 4) # NEW, ROUNDS TO 4 DP
# Loop through fitfile directory, load hr data, calculate HRSS
for filename in tqdm(os.listdir(directory)):
if filename.endswith('.fit'):
workout = load_workout((os.path.join(directory, filename)))
if 'heart_rate' in workout[0]:
# printing first 2 rows to manually check presence/forms
print(workout[0])
print(workout[1])
print(filename)
# for HRSS: form is SUM (Ti*HRRi*baseconst * e^(yval*HRRi) ) * 100/(60*HHRlt*basconst * e^(yval*HRRlt) )
# simplified: SUM (ATERM) * BTERM
# workflow is: calc aterm*bterm indiv, then sum
instantChT = [] # list of "instantaneous" changes in time
for i in range(len(workout) - 1):
# print(workout[i])
instantChT.append(difference_between_dates(workout[i]["timestamp"], workout[i + 1]["timestamp"]))
print(instantChT) # this works
instantHr = [] # list of (hopefully corresponding) instantaneous heart rate readings
for i in range(len(workout) - 1):
instantHr.append(workout[i]["heart_rate"])
print(instantHr)
HRRi = [] # list of instantaneous heart rate reserve values
for i in range(len(instantHr)):
HRRi.append((instantHr[i] - my_rhr) / (my_maxhr - my_rhr))
print(HRRi)
AtermBterm = [] # see simplified equation roughly 20 lines above
for i in range(len(instantChT)):
AtermBterm.append(
(instantChT[i] * HRRi[i] * my_baseconstant * (eulersNum ** (my_yvalue * HRRi[i]))) * hundy_ova_N)
print(AtermBterm)
print(sum(AtermBterm))
else:
print("issue w HR in: " + filename + " :_(...either lacking HR data or is mislabeled, i think.")
continue
| 5,459 | 1,829 |
from behave import fixture
@fixture
def pit(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_LOGIN": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
},
"STG_CUSTOMER_PROFILE": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DASHBOARD_COLOUR", "DISPLAY_NAME"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_LOGIN": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_PROFILE": {
"EFFECTIVE_FROM": "LOAD_DATE"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
"STG_CUSTOMER_LOGIN",
"STG_CUSTOMER_PROFILE"],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN": {
"source_model": "STG_CUSTOMER_LOGIN",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["LAST_LOGIN_DATE", "DEVICE_USED"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_PROFILE": {
"source_model": "STG_CUSTOMER_PROFILE",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DASHBOARD_COLOUR", "DISPLAY_NAME"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_PROFILE": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE",
"STG_CUSTOMER_PROFILE": "LOAD_DATE"
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"]
,
"RAW_STAGE_LOGIN":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATE",
"SOURCE"]
,
"RAW_STAGE_PROFILE":
["CUSTOMER_ID",
"DASHBOARD_COLOUR",
"DISPLAY_NAME",
"LOAD_DATE",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_LOGIN": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "VARCHAR",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_PROFILE": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"DASHBOARD_COLOUR": "VARCHAR",
"DISPLAY_NAME": "VARCHAR",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_LOGIN": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DEVICE_USED": "VARCHAR",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_PROFILE": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DASHBOARD_COLOUR": "VARCHAR",
"DISPLAY_NAME": "VARCHAR",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME",
"SAT_CUSTOMER_PROFILE_PK": "BINARY(16)",
"SAT_CUSTOMER_PROFILE_LDTS": "DATETIME"
}
}
}
@fixture
def pit_one_sat(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_DETAILS_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_DETAILS_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"HUB_CUSTOMER_TS": {
"source_model": ["STG_CUSTOMER_DETAILS_TS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS_TS": {
"source_model": "STG_CUSTOMER_DETAILS_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
},
"PIT_CUSTOMER_TS": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_LG": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_HG": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_DETAILS_TS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATETIME",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_DETAILS_TS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"HUB_CUSTOMER_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_DETAILS_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_TS": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_LG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_HG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME"
}
}
}
@fixture
def pit_two_sats(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_DETAILS_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_LOGIN": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
},
"STG_CUSTOMER_LOGIN_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_DETAILS_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
},
"STG_CUSTOMER_LOGIN": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_LOGIN_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"HUB_CUSTOMER_TS": {
"source_model": ["STG_CUSTOMER_DETAILS_TS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS_TS": {
"source_model": "STG_CUSTOMER_DETAILS_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN": {
"source_model": "STG_CUSTOMER_LOGIN",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN_TS": {
"source_model": "STG_CUSTOMER_LOGIN_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE"
},
"src_ldts": "LOAD_DATE"
},
"PIT_CUSTOMER_TS": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
},
"SAT_CUSTOMER_LOGIN_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
"STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_LG": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
},
"SAT_CUSTOMER_LOGIN_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
"STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_HG": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_DETAILS_TS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATETIME",
"SOURCE"],
"RAW_STAGE_LOGIN":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_LOGIN_TS":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATETIME",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_DETAILS_TS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_LOGIN": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "VARCHAR",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_LOGIN_TS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "VARCHAR",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"HUB_CUSTOMER_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_DETAILS_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_LOGIN": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DEVICE_USED": "VARCHAR",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_LOGIN_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DEVICE_USED": "VARCHAR",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_TS": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_LG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_HG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME"
}
}
}
@fixture
def pit_bigquery(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_LOGIN": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
},
"STG_CUSTOMER_PROFILE": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DASHBOARD_COLOUR", "DISPLAY_NAME"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_LOGIN": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_PROFILE": {
"EFFECTIVE_FROM": "LOAD_DATE"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
"STG_CUSTOMER_LOGIN",
"STG_CUSTOMER_PROFILE"],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN": {
"source_model": "STG_CUSTOMER_LOGIN",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["LAST_LOGIN_DATE", "DEVICE_USED"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_PROFILE": {
"source_model": "STG_CUSTOMER_PROFILE",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DASHBOARD_COLOUR", "DISPLAY_NAME"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_PROFILE": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE",
"STG_CUSTOMER_PROFILE": "LOAD_DATE"
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"]
,
"RAW_STAGE_LOGIN":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATE",
"SOURCE"]
,
"RAW_STAGE_PROFILE":
["CUSTOMER_ID",
"DASHBOARD_COLOUR",
"DISPLAY_NAME",
"LOAD_DATE",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATETIME",
"SOURCE": "STRING"
}
},
"RAW_STAGE_LOGIN": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "STRING",
"LOAD_DATE": "DATETIME",
"SOURCE": "STRING"
}
},
"RAW_STAGE_PROFILE": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"DASHBOARD_COLOUR": "STRING",
"DISPLAY_NAME": "STRING",
"LOAD_DATE": "DATETIME",
"SOURCE": "STRING"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"CUSTOMER_ID": "STRING",
"LOAD_DATE": "DATETIME",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_LOGIN": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"DEVICE_USED": "STRING",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_PROFILE": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"DASHBOARD_COLOUR": "STRING",
"DISPLAY_NAME": "STRING",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "STRING"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "STRING",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME",
"SAT_CUSTOMER_PROFILE_PK": "STRING",
"SAT_CUSTOMER_PROFILE_LDTS": "DATETIME"
}
}
}
@fixture
def pit_one_sat_bigquery(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_DETAILS_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_DETAILS_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"HUB_CUSTOMER_TS": {
"source_model": ["STG_CUSTOMER_DETAILS_TS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS_TS": {
"source_model": "STG_CUSTOMER_DETAILS_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
},
"PIT_CUSTOMER_TS": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_LG": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_HG": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_DETAILS_TS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATETIME",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"RAW_STAGE_DETAILS_TS": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"CUSTOMER_ID": "STRING",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"HUB_CUSTOMER_TS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"CUSTOMER_ID": "STRING",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_DETAILS_TS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_TS": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_LG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_HG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME"
}
}
}
@fixture
def pit_two_sats_bigquery(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_DETAILS_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_LOGIN": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
},
"STG_CUSTOMER_LOGIN_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_DETAILS_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
},
"STG_CUSTOMER_LOGIN": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_LOGIN_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"HUB_CUSTOMER_TS": {
"source_model": ["STG_CUSTOMER_DETAILS_TS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS_TS": {
"source_model": "STG_CUSTOMER_DETAILS_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN": {
"source_model": "STG_CUSTOMER_LOGIN",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN_TS": {
"source_model": "STG_CUSTOMER_LOGIN_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE"
},
"src_ldts": "LOAD_DATE"
},
"PIT_CUSTOMER_TS": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
},
"SAT_CUSTOMER_LOGIN_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
"STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_LG": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
},
"SAT_CUSTOMER_LOGIN_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
"STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_HG": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_DETAILS_TS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATETIME",
"SOURCE"],
"RAW_STAGE_LOGIN":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_LOGIN_TS":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATETIME",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"RAW_STAGE_DETAILS_TS": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"RAW_STAGE_LOGIN": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "STRING",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"RAW_STAGE_LOGIN_TS": {
"+column_types": {
"CUSTOMER_ID": "STRING",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "STRING",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"CUSTOMER_ID": "STRING",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"HUB_CUSTOMER_TS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"CUSTOMER_ID": "STRING",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_DETAILS_TS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"CUSTOMER_NAME": "STRING",
"CUSTOMER_ADDRESS": "STRING",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_LOGIN": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"DEVICE_USED": "STRING",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "STRING"
}
},
"SAT_CUSTOMER_LOGIN_TS": {
"+column_types": {
"CUSTOMER_PK": "STRING",
"HASHDIFF": "STRING",
"DEVICE_USED": "STRING",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "STRING"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "STRING",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_TS": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_TS_PK": "STRING",
"SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_LG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_TS_PK": "STRING",
"SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_HG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "STRING",
"SAT_CUSTOMER_DETAILS_PK": "STRING",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "STRING",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME"
}
}
}
@fixture
def pit_sqlserver(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_LOGIN": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
},
"STG_CUSTOMER_PROFILE": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DASHBOARD_COLOUR", "DISPLAY_NAME"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_LOGIN": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_PROFILE": {
"EFFECTIVE_FROM": "LOAD_DATE"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
"STG_CUSTOMER_LOGIN",
"STG_CUSTOMER_PROFILE"],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN": {
"source_model": "STG_CUSTOMER_LOGIN",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["LAST_LOGIN_DATE", "DEVICE_USED"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_PROFILE": {
"source_model": "STG_CUSTOMER_PROFILE",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DASHBOARD_COLOUR", "DISPLAY_NAME"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_PROFILE": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE",
"STG_CUSTOMER_PROFILE": "LOAD_DATE"
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"]
,
"RAW_STAGE_LOGIN":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATE",
"SOURCE"]
,
"RAW_STAGE_PROFILE":
["CUSTOMER_ID",
"DASHBOARD_COLOUR",
"DISPLAY_NAME",
"LOAD_DATE",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(5)",
"CUSTOMER_NAME": "VARCHAR(10)",
"CUSTOMER_ADDRESS": "VARCHAR(30)",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"RAW_STAGE_LOGIN": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(5)",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "VARCHAR(10)",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"RAW_STAGE_PROFILE": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(5)",
"DASHBOARD_COLOUR": "VARCHAR(10)",
"DISPLAY_NAME": "VARCHAR(10)",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR(5)",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR(10)",
"CUSTOMER_ADDRESS": "VARCHAR(30)",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"SAT_CUSTOMER_LOGIN": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DEVICE_USED": "VARCHAR(10)",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"SAT_CUSTOMER_PROFILE": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DASHBOARD_COLOUR": "VARCHAR(10)",
"DISPLAY_NAME": "VARCHAR(10)",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR(10)"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME",
"SAT_CUSTOMER_PROFILE_PK": "BINARY(16)",
"SAT_CUSTOMER_PROFILE_LDTS": "DATETIME"
}
}
}
@fixture
def pit_one_sat_sqlserver(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_DETAILS_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_DETAILS_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"HUB_CUSTOMER_TS": {
"source_model": ["STG_CUSTOMER_DETAILS_TS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS_TS": {
"source_model": "STG_CUSTOMER_DETAILS_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
},
"PIT_CUSTOMER_TS": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_LG": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_HG": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_DETAILS_TS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATETIME",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(50)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"RAW_STAGE_DETAILS_TS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(50)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR(50)",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"HUB_CUSTOMER_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR(50)",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"SAT_CUSTOMER_DETAILS_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_TS": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_LG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_HG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME"
}
}
}
@fixture
def pit_two_sats_sqlserver(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_DETAILS_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_LOGIN": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
},
"STG_CUSTOMER_LOGIN_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_DETAILS_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
},
"STG_CUSTOMER_LOGIN": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_LOGIN_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"HUB_CUSTOMER_TS": {
"source_model": ["STG_CUSTOMER_DETAILS_TS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS_TS": {
"source_model": "STG_CUSTOMER_DETAILS_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN": {
"source_model": "STG_CUSTOMER_LOGIN",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN_TS": {
"source_model": "STG_CUSTOMER_LOGIN_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE"
},
"src_ldts": "LOAD_DATE"
},
"PIT_CUSTOMER_TS": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
},
"SAT_CUSTOMER_LOGIN_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
"STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_LG": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
},
"SAT_CUSTOMER_LOGIN_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
"STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_HG": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_DETAILS_TS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATETIME",
"SOURCE"],
"RAW_STAGE_LOGIN":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_LOGIN_TS":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATETIME",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(50)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"RAW_STAGE_DETAILS_TS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(50)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"RAW_STAGE_LOGIN": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(50)",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "VARCHAR(50)",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"RAW_STAGE_LOGIN_TS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR(50)",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "VARCHAR(50)",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR(50)",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"HUB_CUSTOMER_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR(50)",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"SAT_CUSTOMER_DETAILS_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR(50)",
"CUSTOMER_ADDRESS": "VARCHAR(50)",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"SAT_CUSTOMER_LOGIN": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DEVICE_USED": "VARCHAR(50)",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR(50)"
}
},
"SAT_CUSTOMER_LOGIN_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DEVICE_USED": "VARCHAR(50)",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR(50)"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_TS": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_LG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_HG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME"
}
}
}
| 87,762 | 29,857 |
#!/usr/bin/env python
r"""
Panflute filter supporting \textquote and \foreigntextquote in LaTeX
Issues:
- Nested parens with pandoc-citeproc
Usage:
- Use Pandoc markdown bracketed Spans:
- [Ganz Gallien ist von den Römern besetzt]{.textquote cite="[vgl. @Goscinny_Asterix_1967, 1\psqq]"}
- [Toute la Gaule est occupée par les Romains]{.textquote lang="francais" punct="..." cite="[vgl. @Goscinny_Asterix_1967, 1\psqq]"}
- This filter will emit \{textquote/foreigntextquote}[<cite>][<punct>]{<text>} commands
"""
from jinja2tex import latex_env
import panflute as pf
QUOTE = latex_env.from_string(r"""
<%- if lang %>\foreigntextquote{<< lang >>}<% else %>\textquote<% endif -%>
<% if cite %>[{<< cite >>}]<% endif -%>
<% if punct %>[<< punct >>]<% endif -%>
{<< text >>}""")
def prepare(doc):
pass
def action(e, doc):
if not doc.format == 'latex':
return None
if isinstance(e, pf.Span) and 'textquote' in e.classes:
cite = e.attributes.get('cite')
if cite:
cite = pf.convert_text(cite,
extra_args=['--biblatex'],
input_format='markdown',
output_format='latex')
text = pf.convert_text(pf.Plain(e),
extra_args=['--biblatex'],
input_format='panflute',
output_format='latex')
values = {
'lang': e.attributes.get('lang'),
'cite': cite,
'punct': e.attributes.get('punct'),
'text': text
}
tex = QUOTE.render(values)
return pf.RawInline(tex, format='latex')
else:
return None
def finalize(doc):
pass
def main(doc=None):
return pf.run_filter(action, prepare=prepare, finalize=finalize, doc=doc)
if __name__ == '__main__':
main()
| 1,908 | 661 |
"""
Adds version number sequence id of a GFF file.
"""
import sys
ACC=sys.argv[1]
VER=sys.argv[2]
for line in sys.stdin:
line = line.strip()
elems = line.split()
if elems and elems[0] == ACC:
elems[0] = f'{elems[0]}.{VER}'
if line.startswith("#"):
print (line)
else:
print("\t".join(elems)) | 335 | 133 |
from .return_class import AbstractApiClass
from .refresh_schedule import RefreshSchedule
class Deployment(AbstractApiClass):
"""
A model deployment
"""
def __init__(self, client, deploymentId=None, name=None, status=None, description=None, deployedAt=None, createdAt=None, projectId=None, modelId=None, modelVersion=None, featureGroupId=None, featureGroupVersion=None, callsPerSecond=None, autoDeploy=None, regions=None, error=None, refreshSchedules={}):
super().__init__(client, deploymentId)
self.deployment_id = deploymentId
self.name = name
self.status = status
self.description = description
self.deployed_at = deployedAt
self.created_at = createdAt
self.project_id = projectId
self.model_id = modelId
self.model_version = modelVersion
self.feature_group_id = featureGroupId
self.feature_group_version = featureGroupVersion
self.calls_per_second = callsPerSecond
self.auto_deploy = autoDeploy
self.regions = regions
self.error = error
self.refresh_schedules = client._build_class(
RefreshSchedule, refreshSchedules)
def __repr__(self):
return f"Deployment(deployment_id={repr(self.deployment_id)}, name={repr(self.name)}, status={repr(self.status)}, description={repr(self.description)}, deployed_at={repr(self.deployed_at)}, created_at={repr(self.created_at)}, project_id={repr(self.project_id)}, model_id={repr(self.model_id)}, model_version={repr(self.model_version)}, feature_group_id={repr(self.feature_group_id)}, feature_group_version={repr(self.feature_group_version)}, calls_per_second={repr(self.calls_per_second)}, auto_deploy={repr(self.auto_deploy)}, regions={repr(self.regions)}, error={repr(self.error)}, refresh_schedules={repr(self.refresh_schedules)})"
def to_dict(self):
return {'deployment_id': self.deployment_id, 'name': self.name, 'status': self.status, 'description': self.description, 'deployed_at': self.deployed_at, 'created_at': self.created_at, 'project_id': self.project_id, 'model_id': self.model_id, 'model_version': self.model_version, 'feature_group_id': self.feature_group_id, 'feature_group_version': self.feature_group_version, 'calls_per_second': self.calls_per_second, 'auto_deploy': self.auto_deploy, 'regions': self.regions, 'error': self.error, 'refresh_schedules': self._get_attribute_as_dict(self.refresh_schedules)}
def refresh(self):
self.__dict__.update(self.describe().__dict__)
return self
def describe(self):
return self.client.describe_deployment(self.deployment_id)
def update(self, description=None):
return self.client.update_deployment(self.deployment_id, description)
def rename(self, name):
return self.client.rename_deployment(self.deployment_id, name)
def set_auto(self, enable=None):
return self.client.set_auto_deployment(self.deployment_id, enable)
def set_model_version(self, model_version):
return self.client.set_deployment_model_version(self.deployment_id, model_version)
def set_feature_group_version(self, feature_group_version):
return self.client.set_deployment_feature_group_version(self.deployment_id, feature_group_version)
def start(self):
return self.client.start_deployment(self.deployment_id)
def stop(self):
return self.client.stop_deployment(self.deployment_id)
def delete(self):
return self.client.delete_deployment(self.deployment_id)
def create_batch_prediction(self, name=None, global_prediction_args=None, explanations=False, output_format=None, output_location=None, database_connector_id=None, database_output_config=None, refresh_schedule=None, csv_input_prefix=None, csv_prediction_prefix=None, csv_explanations_prefix=None):
return self.client.create_batch_prediction(self.deployment_id, name, global_prediction_args, explanations, output_format, output_location, database_connector_id, database_output_config, refresh_schedule, csv_input_prefix, csv_prediction_prefix, csv_explanations_prefix)
def wait_for_deployment(self, wait_states={'PENDING', 'DEPLOYING'}, timeout=480):
return self.client._poll(self, wait_states, timeout=timeout)
def get_status(self):
return self.describe().status
def create_refresh_policy(self, cron: str):
return self.client.create_refresh_policy(self.name, cron, 'DEPLOYMENT', deployment_ids=[self.id])
def list_refresh_policies(self):
return self.client.list_refresh_policies(deployment_ids=[self.id])
| 4,626 | 1,427 |
import logging
import platform
import os
ROOT_DIR = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
DEFAULT_DEPS = os.path.join(ROOT_DIR, 'build', 'deps')
DEFAULT_OUT = os.path.join(ROOT_DIR, 'out')
logger = logging.getLogger(__name__)
def prepend_envvar(env, k, v, sep=os.pathsep):
old = env.get(k)
env[k] = sep.join([v, old]) if old else v
# def disable_werror(env, warns):
# for w in warns:
# prepend_envvar(env, 'CPPFLAGS', '-Wno-error=%s' % w, ' ')
def prepend_libdir(env, libdir):
if platform.system() == 'Windows':
prepend_envvar(env, 'PATH', libdir)
prepend_envvar(env, 'LIB', libdir)
prepend_envvar(env, 'LIBPATH', libdir)
else:
prepend_envvar(env, 'LD_LIBRARY_PATH', libdir)
prepend_envvar(env, 'LIBRARY_PATH', libdir)
prepend_envvar(env, 'LDFLAGS', '-L%s' % libdir, ' ')
def prepend_include_dir(env, include):
if platform.system() == 'Windows':
prepend_envvar(env, 'INCLUDE', include)
else:
prepend_envvar(env, 'C_INCLUDE_PATH', include)
prepend_envvar(env, 'CPLUS_INCLUDE_PATH', include)
def setup_env(root, env=os.environ):
logging.info('Setting up environment variable for root directory [%s]' % root)
prepend_envvar(env, 'PATH', os.path.join(root, 'bin'))
prepend_include_dir(env, os.path.join(root, 'include'))
prepend_libdir(env, os.path.join(root, 'lib'))
for k, v in env.items():
logging.debug('%s: %s' % (k ,v))
| 1,426 | 575 |
# -*- coding: utf-8 -*-
"""
table of content directive.
"""
import attr
from .base import Directive
@attr.s
class TableOfContent(Directive):
"""
``.. contents::`` directive.
parameter definition see here: http://docutils.sourceforge.net/docs/ref/rst/directives.html#table-of-contents
:param title: str, required.
:param depth: int, optional.
:param local: bool, optional.
:type backlinks: str
:param backlinks: optional. one of
:attr:`TableOfContent.BacklinksOptions`.
Example::
toc = TableOfContent(title="Table of Contents", depth=2)
toc.render()
Output::
.. contents:: Table of Contents
:depth: 2
"""
title = attr.ib(default=None) # type: str
depth = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
) # type: int
local = attr.ib(
default=False,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
) # type: bool
backlinks = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(str)),
)
meta_directive_keyword = "contents"
meta_not_none_fields = tuple()
class BacklinksOptions(object):
"""
``backlinks`` argument choices.
- ``TableOfContent.BacklinksOptions.entry``: ``"entry"``
- ``TableOfContent.BacklinksOptions.top``: ``"top"``
- ``TableOfContent.BacklinksOptions.none``: ``"none"``
"""
entry = "entry"
top = "top"
none = "none"
@backlinks.validator
def check_backlinks(self, attribute, value): # pragma: no cover
if value not in [None, "entry", "top", "none"]:
raise ValueError(
"TableOfContent.backlinks has to be one of 'entry', 'top', 'none'!"
)
@property
def arg(self):
if self.title is None:
return ""
else:
return self.title
| 2,010 | 619 |
from flask import *
import databaseModel
from functools import wraps
import time
bp = Blueprint("StaffPage", __name__, url_prefix='/StaffPage')
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
UserName = session.get('UserName')
if(UserName is not None):
g.UserName = UserName
return func(*args, **kwargs)
else:
return redirect(url_for("index"))
return wrapper
@bp.route("/OrderToStore",methods=["POST"])
@login_required
def OrderToStore():
try:
OrderNum = request.form["OrderNum"]
method = request.form["way"]
order = databaseModel.Orders.query.filter_by(OrderNum = OrderNum).first()
if(order != None):
if(method == "inStore"): #如果是入库操作
neworder_ = databaseModel.HistoryOrders(OrderNum=OrderNum, StaffNum=g.UserName) #像historyorder表里添加一行,表示该快递员经手过这个快递
databaseModel.db.session.add(neworder_)
orderstaff_ = databaseModel.OrderStaffs.query.filter_by(OrderNum=OrderNum).first() #修改orderstaff 表的staffnum,表示当前快递已更换配送者
orderstaff_.StaffNum = g.UserName
databaseModel.db.session.commit()
if(method == "sigh"): #如果是签收操作
order_ = databaseModel.Orders.query.filter_by(OrderNum=OrderNum).first()
order_.StagNum = 2 #更改为已签收
neworder_ = databaseModel.HistoryOrders(OrderNum=OrderNum, StaffNum=g.UserName) # 像historyorder表里添加一行,表示该快递员经手过这个快递
databaseModel.db.session.add(neworder_)
orderstaff_ = databaseModel.OrderStaffs.query.filter_by(OrderNum=OrderNum).first() # 修改orderstaff 表的staffnum,表示当前快递已更换配送者
orderstaff_.StaffNum = "0"
databaseModel.db.session.commit()
except Exception as e:
return {"code":"0"}
return {"code":"200"}
@bp.route("/HistoryOrders",methods=["GET"])
@login_required
def HistoryOrders():
historyorders = databaseModel.HistoryOrders.query.filter_by(StaffNum= g.UserName).all()
jsondata = {}
for i,c in enumerate(historyorders):
ordernum_ = c.OrderNum
order = databaseModel.Orders.query.filter_by(OrderNum=ordernum_).first()
recvaddr_ = order.RecvAddr
if(order.StagNum == 1):
orderstag_ = '已寄出'
if(order.StagNum == 2):
orderstag_ = "已签收"
data = {"OrderNum":ordernum_,"RecvAddr":recvaddr_,"OrderStag":orderstag_}
jsondata[i] = data
return jsondata
@bp.route("/StaffInfo",methods=["POST"])
@login_required
def StaffInfo():
try:
staffinfo = databaseModel.Staffs.query.filter_by(UserName=g.UserName).first()
jsondata = {}
StaffName =staffinfo.StaffName
StaffNum = staffinfo.UserName
StaffTele = staffinfo.StaffTele
StaffIdCard = staffinfo.StaffIdCard
print(StaffIdCard,StaffName)
jsondata["code"] = "1"
jsondata["StaffName"]= StaffName
jsondata["StaffNum"] = StaffNum
jsondata["StaffTele"] = StaffTele
jsondata["StaffIdCard"] = StaffIdCard
except Exception as e:
current_app.logger.debug(e)
return {"code":"0"}
return jsondata
| 3,256 | 1,110 |
"""
Given the head to a singly linked list, where each node also has a “random” pointer
that points to anywhere in the linked list, deep clone the list.
"""
from typing import Tuple
class Node:
def __init__(self, data: int) -> None:
self.data = data
self.next = None
self.random = None
def deep_clone_ll(ll_head: Node) -> Tuple[Node, Node]:
"""
Time Complexity: O(n)
Space Complexity: O(1)
"""
runner = ll_head
# get nodes of new linked list
while runner:
node = Node(runner.data)
node.next = runner.next
runner.next = node
runner = runner.next.next
# get random pointers of new ll
runner = ll_head
while runner:
runner.next.random = runner.random.next
runner = runner.next.next
# detach two lists and fix their next pointers
runner = ll_head
new_head = ll_head.next
while runner.next:
next_node = runner.next
if next_node.next:
runner.next = next_node.next
next_node.next = next_node.next.next
runner = runner.next
else:
runner.next = None
return ll_head, new_head
if __name__ == "__main__":
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.random = head.next.next
head.next.random = head
head.next.next.random = head.next.next.next.next
head.next.next.next.random = head.next.next
head.next.next.next.next.random = head.next
original, copied = deep_clone_ll(head)
while original:
print("orig node:", original.data, ", random:", original.random.data)
print("copied node:", copied.data, ", random:", copied.random.data)
original = original.next
copied = copied.next
| 1,849 | 581 |
import tkinter as tk
from tkinter import ttk
raw_data_label_y_location = 20
raw_data_entry_location = raw_data_label_y_location + 20
encode_button_location = raw_data_entry_location + 20
decode_button_location = encode_button_location + 20
base64_data_label_y_location = decode_button_location + 20
base64_data_entry_y_location = base64_data_label_y_location + 20
class Base64:
def create_base64_tab(self, tab_control):
tab = ttk.Frame(tab_control)
# raw data
raw_data_label = tk.Label(tab, text="raw data")
raw_data_label.place(x=500, y=0)
raw_data_entry = tk.Entry(tab)
raw_data_entry.place(x=500, y=100)
encode_btn = tk.Button(tab, text="encode", width=5)
encode_btn.place(x=500, y=200)
decode_btn = tk.Button(tab, text="decode", width=5)
decode_btn.place(x=500, y=300)
# base64
base64_data_label = tk.Label(tab, text="base64 data")
base64_data_label.place(x=500, y=400)
base64_data_entry = tk.Entry(tab)
base64_data_entry.place(x=500, y=500)
return tab
| 1,095 | 433 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""edsr_slim.py"""
from src import common
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
class EDSR(nn.Cell):
"""[EDSR]
Args:
nn ([type]): [description]
"""
def __init__(self, args):
super(EDSR, self).__init__()
self.n_colors = args.n_colors
n_resblocks = args.n_resblocks
self.n_feats = args.n_feats
self.kernel_size = 3
scale = args.scale[0]
act = nn.ReLU()
self.rgb_range = args.rgb_range
self.sub_mean = common.MeanShift(self.rgb_range)
self.add_mean = common.MeanShift(self.rgb_range, sign=1)
self.head = common.conv(args.n_colors, self.n_feats, self.kernel_size, padding=self.kernel_size//2)
m_body = [
common.ResidualBlock(
self.n_feats, self.kernel_size, act=act, res_scale=args.res_scale
) for _ in range(n_resblocks)
]
self.body = nn.CellList(m_body)
self.body_conv = common.conv(self.n_feats, self.n_feats, self.kernel_size, padding=self.kernel_size//2)
self.upsampler = common.Upsampler(scale, self.n_feats)
self.tail_conv = common.conv(self.n_feats, args.n_colors, self.kernel_size, padding=self.kernel_size//2)
def construct(self, x, width_mult=Tensor(1.0)):
"""construct"""
width_mult = width_mult.asnumpy().item()
feature_width = int(self.n_feats * width_mult)
conv2d = ops.Conv2D(out_channel=feature_width, kernel_size=self.kernel_size, mode=1, pad_mode='pad',
pad=self.kernel_size // 2)
biasadd = ops.BiasAdd()
x = self.sub_mean(x)
weight = self.head.weight[:feature_width, :self.n_colors, :, :]
bias = self.head.bias[:feature_width]
x = conv2d(x, weight)
x = biasadd(x, bias)
residual = x
for block in self.body:
residual = block(residual, width_mult)
weight = self.body_conv.weight[:feature_width, :feature_width, :, :]
bias = self.body_conv.bias[:feature_width]
residual = conv2d(residual, weight)
residual = biasadd(residual, bias)
residual += x
x = self.upsampler(residual, width_mult)
weight = self.tail_conv.weight[:self.n_colors, :feature_width, :, :]
bias = self.tail_conv.bias[:self.n_colors]
conv2d = ops.Conv2D(out_channel=self.n_colors, kernel_size=self.kernel_size,
mode=1, pad_mode='pad', pad=self.kernel_size//2)
x = conv2d(x, weight)
x = biasadd(x, bias)
x = self.add_mean(x)
return x
| 3,307 | 1,121 |
import os
import sys
import json
from pathlib import Path
import pandas as pd
import random
import tensorflow as tf
import io
import argparse
from PIL import Image
from collections import namedtuple
from object_detection.utils import dataset_util, label_map_util
import logging
logging.basicConfig(stream=sys.stdout, format='',
level=logging.INFO, datefmt=None)
logger = logging.getLogger('NJDD-prepare-data')
# Initiate argument parser
parser = argparse.ArgumentParser(
description="Sample TensorFlow json-to-TFRecord converter")
parser.add_argument("-json",
"--json_path",
help="Path to the input .json files.",
type=str)
# parser.add_argument("-subset",
# "--subset",
# help="Type of the subset: train, validation, test", type=str)
parser.add_argument("-l",
"--labels_path",
help="Path to the labels (.pbtxt) file.", type=str)
parser.add_argument("-o",
"--output_dir",
help="Path of the output dir for storing TFRecord (.record) file.", type=str)
parser.add_argument("-i",
"--image_dir",
help="Path to the folder where the input image files are stored. "
"Defaults to the same directory as JSON_DIR.",
type=str, default=None)
parser.add_argument("-c",
"--csv_path",
help="Path of output .csv file. If none provided, then no file will be "
"written.",
type=str, default=None)
args = parser.parse_args()
if args.image_dir is None:
args.image_dir = args.json_dir
label_map = label_map_util.load_labelmap(args.labels_path)
label_map_dict = label_map_util.get_label_map_dict(label_map)
def bbox_dict_to_df(bbox_dict):
"""
This function assumes that the objects list contains one element (v['objects'][0])
"""
log_index = 'bbox_dict_to_df>'
df_ls = []
for k, v in bbox_dict.items():
filename = k
height = v['size']['height']
width = v['size']['width']
ym = v['objects'][0]['bbox'][0]
xm = v['objects'][0]['bbox'][1]
yM = v['objects'][0]['bbox'][2]
xM = v['objects'][0]['bbox'][3]
values = (filename, height, width, ym, xm, yM, xM, v['objects'][0]['name'])
df_ls.append(values)
logger.info(f'{log_index} Collected {len(df_ls)} objects')
df = pd.DataFrame(df_ls, columns=['fname', 'height', 'width', 'ym', 'xm', 'yM', 'xM', 'class'])
return df
def split_dataset(df, perc=0.9):
log_index = 'split_dataset>'
df = df.sample(frac=1).reset_index(drop=True)
num_train = int(perc * len(df))
df_train = df.iloc[0:num_train]
df_val = df.iloc[num_train:]
logger.info(f'{log_index} TRAINING EXAMPLES: {len(df_train)} - VALIDATION EXAMPLES: {len(df_val)}')
return df_train, df_val
def class_text_to_int(row_label):
return label_map_dict[row_label]
def split(df, group):
data = namedtuple('data', ['fname', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path):
log_index = 'create_tf_example>'
with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.fname)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
logger.info(f'{log_index} Retrived image with size: {width, height} - (w,h)')
filename = group.fname.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for _, row in group.object.iterrows():
xmins.append(row['xm'])
xmaxs.append(row['xM'])
ymins.append(row['ym'])
ymaxs.append(row['yM'])
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class']))
logger.info(f'{log_index} Collected {len(xmins)} rows')
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main():
log_index = 'main>'
logger.info(f'{log_index} Reading bbox at {args.json_path} ...')
with open(args.json_path, 'r') as f:
bbox_dict = json.load(f)
df = bbox_dict_to_df(bbox_dict)
df_train, df_val = split_dataset(df)
for df_s, subset in zip([df_train, df_val], ['train', 'val']):
logger.info(f'{log_index} Writing TFRecords for subset: {subset}')
writer = tf.io.TFRecordWriter(os.path.join(args.output_dir, f'dywidag_{subset}.records'))
path = Path(args.image_dir)
grouped = split(df_s, 'fname')
for group in grouped:
tf_example = create_tf_example(group, path)
writer.write(tf_example.SerializeToString())
writer.close()
logger.info(f'{log_index} Successfully created the TFRecord file: {args.output_dir}')
if args.csv_path is not None:
df.to_csv(args.csv_path, index=None)
logger.info(f'{log_index} Successfully created the CSV file: {args.csv_path}')
if __name__ == '__main__':
main() | 6,317 | 2,175 |
"""
Interface for interesting actions.
"""
from dataclasses import dataclass
import logging
from typing import Union, List, Optional, Iterable
from .call_hooks.win_api import constants as wc
logger = logging.getLogger(__name__)
@dataclass
class Action:
ip: int
class ActionList:
"""
Represents a reverse linked list of actions that have occurred up
to a specific ProcessorContext.
"""
def __init__(self, *actions: Action):
self.tail: Optional[ActionNode] = None
for action in actions:
self.add(action)
def __repr__(self):
return f"ActionList({repr(self.tail) if self.tail else ''})"
def __deepcopy__(self, memo):
copy = ActionList()
copy.tail = self.tail
return copy
def __iter__(self):
if self.tail:
yield from self.tail
def __reversed__(self):
if self.tail:
yield from reversed(self.tail)
def __getitem__(self, index: int):
return list(self)[index]
def __len__(self):
return len(list(self))
def __bool__(self):
return bool(self.tail)
def __contains__(self, item):
return any(item == action for action in self)
def add(self, action: Action):
self.tail = ActionNode(action, prev=self.tail)
class ActionNode:
"""
Represents a node of a reverse linked list of actions that have occurred up
to a specific ProcessorContext.
"""
def __init__(self, action: Action, prev: Optional["ActionNode"] = None):
self.action = action
self.prev = prev
def __repr__(self):
if self.prev:
return f"{self.prev!r} -> {self.action}"
else:
return f"{self.action}"
def __iter__(self):
"""
Iterates actions from the least recent action that has occurred to
the most recent action that has occurred.
"""
if self.prev:
yield from self.prev
yield self.action
def __reversed__(self):
"""
Iterates actions from the most recent action that has occurred to
the least recent action that has occurred.
"""
yield self.action
if self.prev:
yield from reversed(self.prev)
@dataclass
class CommandExecuted(Action):
command: str
visibility: wc.Visibility = None
@dataclass
class DirectoryCreated(Action):
path: str
@dataclass
class FileCreated(Action):
handle: int
path: str
mode: str
@dataclass
class FileOpened(Action):
handle: int
path: str
mode: str
@dataclass
class FileTruncated(Action):
handle: int
path: str
mode: str
@dataclass
class FileDeleted(Action):
handle: int
path: str
@dataclass
class FileMoved(Action):
handle: int
old_path: str
new_path: str
@dataclass
class FileClosed(Action):
handle: int
@dataclass
class FileWritten(Action):
handle: int
data: bytes
@dataclass
class RegKeyOpened(Action):
handle: int
path: str
root_key: str
sub_key: str
@dataclass
class RegKeyDeleted(Action):
handle: int
path: str
@dataclass
class RegKeyValueDeleted(Action):
handle: int
path: str
value_name: str
@dataclass
class RegKeyValueSet(Action):
handle: int
path: str
data_type: str
data: Union[bytes, str, List[str], int, None]
@dataclass
class ServiceCreated(Action):
handle: int
name: str
access: wc.ServiceAccess
service_type: wc.ServiceType
start_type: wc.ServiceStart
display_name: str
binary_path: str
@dataclass
class ServiceOpened(Action):
handle: int
name: str
@dataclass
class ServiceDeleted(Action):
handle: int
@dataclass
class ServiceDescriptionChanged(Action):
handle: int
description: str
@dataclass
class ShellOperation(Action):
operation: str
path: str
parameters: str
directory: str
visibility: wc.Visibility = None
| 3,962 | 1,191 |
from __future__ import absolute_import, division, print_function
import sys
import tensorflow as tf
import tensorlayer as tl
import numpy as np
from tensorflow import convert_to_tensor as to_T
sess = tf.Session()
tl.layers.initialize_global_variables(sess)
def conv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
# input has shape [batch, in_height, in_width, in_channels]
input_dim = bottom.get_shape().as_list()[-1]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, in_channels, out_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
conv = tf.nn.conv2d(bottom, filter=weights,
strides=[1, stride, stride, 1], padding=padding)
if bias_term:
conv = tf.nn.bias_add(conv, biases)
return conv
def conv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
# input has shape [batch, in_height, in_width, in_channels]
input_dim = bottom.get_shape().as_list()[-1]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, in_channels, out_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
conv = tf.nn.conv2d(bottom, filter=weights,
strides=[1, stride, stride, 1], padding=padding)
if bias_term:
conv = tf.nn.bias_add(conv, biases)
relu = tf.nn.relu(conv)
return relu
def deconv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None):
# input_shape is [batch, in_height, in_width, in_channels]
input_shape = bottom.get_shape().as_list()
batch_size, input_height, input_width, input_dim = input_shape
output_shape = [batch_size, input_height*stride, input_width*stride, output_dim]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, out_channels, in_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, output_dim, input_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
net = tl.layers.InputLayer(inputs=bottom, name=name+'input')
deconv = tl.layers.DeConv2dLayer(net, act=tf.identity, shape=[kernel_size, kernel_size, output_dim, input_dim],
output_shape=output_shape, strides=[1, stride, stride, 1],
padding=padding, W_init=weights_initializer, b_init=biases_initializer,
name=name+'deconv2d')
return deconv.outputs
def deconv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
deconv = deconv_layer(name, bottom, kernel_size, stride, output_dim, padding,
bias_term, weights_initializer, biases_initializer, reuse=reuse)
# relu = tl.layers.PReluLayer(deconv)
relu = tf.nn.relu(deconv)
return relu
def pooling_layer(name, bottom, kernel_size, stride):
#pool = tf.nn.max_pool(bottom, ksize=[1, kernel_size, kernel_size, 1],
# strides=[1, stride, stride, 1], padding='SAME', name=name)
net = tl.layers.InputLayer(inputs=bottom, name=name+'input')
pool = tl.layers.PoolLayer(net, ksize=[1, kernel_size, kernel_size, 1],
strides=[1, stride, stride, 1], padding='SAME', pool=tf.nn.max_pool, name=name+'pool')
return pool.outputs
def fc_layer(name, bottom, output_dim, bias_term=True, weights_initializer=None,
biases_initializer=None, reuse=None):
# flatten bottom input
# input has shape [batch, in_height, in_width, in_channels]
shape = bottom.get_shape().as_list()
input_dim = 1
for d in shape[1:]:
input_dim *= d
# flat_bottom = tf.reshape(bottom, [-1, input_dim])
net = tl.layers.InputLayer(inputs=bottom, name=name+'input')
flat_bottom = tl.layers.ReshapeLayer(net, [-1, input_dim], name=name+'reshape').outputs
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# weights has shape [input_dim, output_dim]
weights = tf.get_variable("weights", [input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
if bias_term:
fc = tf.nn.xw_plus_b(flat_bottom, weights, biases)
else:
fc = tf.matmul(flat_bottom, weights)
return fc
def fc_relu_layer(name, bottom, output_dim, bias_term=True,
weights_initializer=None, biases_initializer=None, reuse=None):
fc = fc_layer(name, bottom, output_dim, bias_term, weights_initializer,
biases_initializer, reuse=reuse)
relu = tf.nn.relu(fc)
return relu
# convnet built for shapes dataset
def shapes_convnet(input_batch, hidden_dim=64, output_dim=64,
scope='shapes_convnet', reuse=None):
with tf.variable_scope(scope, reuse=reuse):
conv_1 = conv_relu_layer('conv_1', input_batch, kernel_size=10, stride=10,
output_dim=hidden_dim, padding='VALID')
conv_2 = conv_relu_layer('conv_2', conv_1, kernel_size=1, stride=1,
output_dim=output_dim)
return conv_2
# following convnet are safe even for empty data
def empty_safe_1x1_conv(name, bottom, output_dim, reuse=None):
# use this for 1x1 convolution in modules to avoid the crash.
bottom_shape = tf.shape(bottom)
input_dim = bottom.get_shape().as_list()[-1]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
weights_initializer = tf.contrib.layers.xavier_initializer()
biases_initializer = tf.constant_initializer(0.)
weights = tf.get_variable('weights', [input_dim, output_dim],
initializer=weights_initializer)
biases = tf.get_variable('biases', output_dim,
initializer=biases_initializer)
conv_flat = tf.matmul(tf.reshape(bottom, [-1, input_dim]), weights) + biases
conv = tf.reshape(conv_flat, to_T([bottom_shape[0], bottom_shape[1], bottom_shape[2], output_dim]))
return conv
# use this for arbitrary convolution in modules to avoid the crash.
def empty_safe_conv(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None,
biases_initializer=None, reuse=None):
g = tf.get_default_graph()
with g.gradient_override_map({'Conv2D': 'Conv2D_handle_empty_batch'}):
return conv_layer(name, bottom, kernel_size, stride, output_dim,
padding, bias_term, weights_initializer,
biases_initializer, reuse=reuse)
@tf.RegisterGradient('Conv2D_handle_empty_batch')
def _Conv2DGrad(op, grad):
with tf.device('/cpu:0'):
filter_grad = tf.nn.conv2d_backprop_input( # compute gradient_filter
tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'),
op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'), op.get_attr('data_format'))
input_grad = tf.nn.conv2d_backprop_filter( # compute gradient_input
op.inputs[0], tf.shape(op.inputs[1]), grad, op.get_attr('strides'),
op.get_attr('padding'),op.get_attr('use_cudnn_on_gpu'), op.get_attr('data_format'))
return [filter_grad, input_grad]
| 10,037 | 3,209 |
"""
SUMMARY
Reinforcement learning via q-learning on the provided data, using previous data if requested
"""
import argparse
import json
import random
import sys
import matplotlib.pyplot as plt
import numpy as np
import auxiliary as aux
from vehicle import Vehicle
# Sets seed for reproducibility
random.seed(0)
# Processes arguments
parser = argparse.ArgumentParser()
required_flags = parser.add_argument_group(title="Required")
required_flags.add_argument("--epochs",required=True, help="Number of epochs", type=int)
required_flags.add_argument("--explore-probability",required=True, help="Explore probability [0, 1]", type=float)
required_flags.add_argument("--learning-rate",required=True, help="Learning rate [0, 1]", type=float)
required_flags.add_argument("--discount-factor",required=True, help="Discount factor [0, 1]", type=float)
required_flags.add_argument("--data",required=True, help="JSON filepath to read Q, rewards matrices and other information", type=str)
required_flags.add_argument("--positive-demonstration", help="JSON filepath to read Q matrix updates after a number of positive demonstrations (already processed)", type=str)
required_flags.add_argument("--negative-demonstration", help="JSON filepath to read Q matrix updates after a number of negative demonstrations (already processed)", type=str)
required_flags.add_argument("--good-advice-decay", help="Training epochs good advice is remembered (50 by defaulr)", type=int)
required_flags.add_argument("--bad-advice-decay", help="Training epochs bad advice is remembered (5 by defaulr)", type=int)
required_flags.add_argument("--output", required=True, help="JSON filepath to output the results", type=str)
parser.add_argument("--show", help="Show output reward vs. epoch plot", action="store_true")
args = parser.parse_args()
p_exp = args.explore_probability
α = args.learning_rate
γ = args.discount_factor
assert (0 <= p_exp) and (p_exp <= 1), "Explore probability must be between 0 and 1"
assert (0 <= α) and (α <= 1), "Learning rate must be between 0 and 1"
assert (0 <= γ) and (γ <= 1), "Discount factor must be between 0 and 1"
good_advice_decay_epochs = 50
good_decay_ratio = 1/good_advice_decay_epochs
bad_advice_decay_epochs = 5
bad_decay_ratio = 1/bad_advice_decay_epochs
if args.good_advice_decay:
assert args.good_advice_decay >= 0, "Good advice decay cannot be negative epochs"
good_advice_retention_epochs = args.good_advice_decay
if args.bad_advice_decay:
assert args.bad_advice_decay >= 0, "Bad advice decay cannot be negative epochs"
bad_advice_retention_epochs = args.bad_advice_decay
#-----------------------------------------------------
# DATA PREPROCESSING
#-----------------------------------------------------
# Loads original data
with open(args.data, "r") as jf:
original_data = json.load(jf)
R = original_data["rewards matrix"]
Q = original_data["Q matrix"]
nx = original_data["nx"]
ny = original_data["ny"]
possible_speeds = original_data["possible speeds"]
speed_max = possible_speeds - 1
valid_positions = original_data["valid positions"]
orientations = [i for i in range(0, len(original_data["orientations"]))]
actions = [j for j in range(0, len(original_data["actions"]))]
num_actions = len(actions)
#-----------------------------------------------------
# NECESSARY VARIABLES
#-----------------------------------------------------
β_good = 0.2
β_bad = 0.2
ξ_0 = 1
δ_0 = 0
Φ_0 = 0
R_expert_good = 1
R_expert_bad = -1
# Sets the Φ(s, a), R^{expert}
# Always 0
Φ = np.zeros((nx, ny, len(orientations), possible_speeds, len(actions)))
R_expert = np.zeros((nx, ny, len(orientations), possible_speeds, len(actions)))
#-----------------------------------------------------
# ADVICE PROCESSING
#-----------------------------------------------------
# Stores advice actions
# "good":{"x, y, o, v":True, ...}
# "bad": {"x, y, o, v":True, ...}
advice_locations = {"good":{}, "bad":{}}
# From Useful Policy Invariant Shaping from Arbitrary Advice (Behboudian et al.)
# Utilizes positive demonstration data
# Positive intent -> Intentionally good demonstrations (although perhaps the user is incompetent)
if args.positive_demonstration:
# Retrieves demonstration data
with open(args.positive_demonstration, "r") as jf:
original_demonstration_data = json.load(jf)
action_sets_taken = original_demonstration_data["actions taken"]
# Simply take the data as is, modify the appropriate Q matrix value, adding +1 to the appropiate Q[s, a] location
for an_action_path in action_sets_taken:
# Goes step by step
for a_step in an_action_path:
step_x = a_step[0]
step_y = a_step[1]
step_o = a_step[2]
step_v = a_step[3]
step_a = a_step[4]
advice_locations["good"][aux.state_to_str(step_x, step_y, step_o, step_v)] = [good_advice_decay_epochs, step_a]
R_expert[step_x][step_y][step_o][step_v][step_a] = R_expert_good
# Utilizes negative demonstration data
# Negative intent -> Intentionally poor or misleading demonstrations
if args.negative_demonstration:
# Retrieves demonstration data
with open(args.negative_demonstration, "r") as jf:
original_demonstration_data = json.load(jf)
action_sets_taken = original_demonstration_data["actions taken"]
for an_action_path in action_sets_taken:
# Goes step by step
for a_step in an_action_path:
step_x = a_step[0]
step_y = a_step[1]
step_o = a_step[2]
step_v = a_step[3]
step_a = a_step[4]
advice_locations["bad"][aux.state_to_str(step_x, step_y, step_o, step_v)] = [bad_advice_decay_epochs, step_a]
R_expert[step_x][step_y][step_o][step_v][step_a] = R_expert_bad
#-----------------------------------------------------
# NECESSARY FUNCTIONS
#-----------------------------------------------------
# Tests with the current Q matrix
# Each epoch tests a starting location with a random orientation but always zero speed
# Each reward in the array is: max(Reward - steps, 0)
# Up to 100 steps can be used
# Returns an array containing rewards
max_testing_iterations = 100
def test_Q():
results = []
# Reshuffles the valid starting locations
random.shuffle(valid_positions)
# Goes through every valid position
for a_valid_position in valid_positions:
xloc, yloc = a_valid_position
starting_orientation = random.randint(0, 3)
tested_vehicle = Vehicle(nx, ny, xloc, yloc, starting_orientation, 0, speed_max, R)
reward_so_far = 0
for an_iteration in range(0, max_testing_iterations):
# Gets the current location
v_x = tested_vehicle.xloc
v_y = tested_vehicle.yloc
v_orientation = tested_vehicle.orientation_index
v_speed = tested_vehicle.speed
# Adds the penalty/reward corresponding to this location
reward_so_far += R[v_x][v_y]
# If this is a reward, obstacle, or outside the circuit (unless it is outside the borders) add the reward and then exit this iteration
if R[v_x][v_y] != -1:
break
# Chooses the action index with the maximum reward in Q
# If two actions have the same optimal Q-value, the first one will be chosen
Q_values_to_choose = Q[v_x][v_y][v_orientation][v_speed]
best_Q_value = max(Q_values_to_choose)
action_index = Q_values_to_choose.index(best_Q_value)
# Makes the vehicle attempt it
tested_vehicle.execute_action(action_index, modify_self=True, get_copy_there=False, get_end_location=False)
results.append(max(0, reward_so_far))
return results
# Trains starting with the current Q matrix, which is updated at each step
# Each epoch tests a starting location with a random orientation but always zero speed
# Each reward in the array is: max(Reward - steps, 0)
# Up to 100 steps can be used
# Does not return anything
max_training_iterations = 100
def train_Q():
# Reshuffles the valid starting locations
random.shuffle(valid_positions)
# Stores the good and bad advice states reached this round
good_advice_states_seen = {}
bad_advice_states_seen = {}
# Goes through every valid position
for a_valid_position in valid_positions:
xloc, yloc = a_valid_position
starting_orientation = random.randint(0, 3)
tested_vehicle = Vehicle(nx, ny, xloc, yloc, starting_orientation, 0, speed_max, R)
for an_iteration in range(0, max_training_iterations):
# Gets the current location
v_x = tested_vehicle.xloc
v_y = tested_vehicle.yloc
v_orientation = tested_vehicle.orientation_index
v_speed = tested_vehicle.speed
# If this is a reward, obstacle, or outside the circuit (unless it is outside the borders) then exit this iteration
if R[v_x][v_y] != -1:
break
# Gets a random probability
what_to_do = random.random()
# If below the explore probability, explore, choose an action at random
if what_to_do <= p_exp:
chosen_action_index = random.randint(0, 4)
given_reward = R[v_x][v_y]
expert_opinion_used = False
α_used = α
else:
# Chooses the action index with the maximum reward in Q
# If two actions have the same optimal Q-value, the first one will be chosen
Q_values_to_choose = Q[v_x][v_y][v_orientation][v_speed]
# Selects the best actions a priori
a_priori_best_Q_value = max(Q_values_to_choose)
a_priori_best_action = Q_values_to_choose.index(a_priori_best_Q_value)
# Checks if this state was considered good or bad
s_as_state = aux.state_to_str(v_x, v_y, v_orientation, v_speed)
if (s_as_state in advice_locations["good"]) and (advice_locations["good"][s_as_state][1] == a_priori_best_action) and (advice_locations["good"][s_as_state][0] > 0):
if s_as_state not in good_advice_states_seen:
good_advice_states_seen[s_as_state] = True
expert_opinion_used = True
advice_followed_times = good_advice_decay_epochs - advice_locations["good"][s_as_state][0]
decay_ratio = good_decay_ratio
α_used = 0.05
β_used = β_good
elif (s_as_state in advice_locations["bad"]) and (advice_locations["bad"][s_as_state][1] == a_priori_best_action) and (advice_locations["bad"][s_as_state][0] > 0):
if s_as_state not in bad_advice_states_seen:
bad_advice_states_seen[s_as_state] = True
expert_opinion_used = True
advice_followed_times = bad_advice_decay_epochs - advice_locations["bad"][s_as_state][0]
decay_ratio = bad_decay_ratio
α_used = 0.1
β_used = β_bad
else:
# Action not provided as advice
best_Q_value = a_priori_best_Q_value
chosen_action_index = a_priori_best_action
given_reward = R[v_x][v_y]
expert_opinion_used = False
α_used = α
if expert_opinion_used:
# Q(s, a) - ξ_t*Φ_t(s, a)
policies_to_choose = [0 for a in range(0, num_actions)]
# Stores Φ_t(s, a), Φ_t(s', a') values before the update
pu_Φ_t_sa = np.zeros((num_actions))
pu_Φ_t_snan = np.zeros((num_actions))
for an_action in range(0, num_actions):
# Gets the next location but does not move there yet if no expert was provided using a priori data
[_0, possible_next_sa] = tested_vehicle.execute_action(a_priori_best_action,
modify_self=False,
get_copy_there=False,
get_end_location=True)
sn_x = possible_next_sa[0][0]
sn_y = possible_next_sa[0][1]
sn_o = possible_next_sa[1]
sn_v = possible_next_sa[2]
Q_sn = Q[sn_x][sn_y][sn_o][sn_v]
sn_a = Q_sn.index(max(Q_sn))
# Φ_t(s, a)
Φ_t_sa = Φ[v_x][v_y][v_orientation][v_speed][an_action]
pu_Φ_t_sa[an_action] = Φ_t_sa
# Assumption to avoid BFS
# Φ_{t+1}(s', a') = Φ_t(s', a')
# Φ_t(s', a')
Φ_t_snan = Φ[sn_x][sn_y][sn_o][sn_v][sn_a]
pu_Φ_t_snan[an_action] = Φ_t_snan
# δ_t^Φ
δ_t_Φ = -R_expert[v_x][v_y][v_orientation][v_speed][an_action] + γ*Φ_t_snan - Φ_t_sa
# ξ_t
# Counts how many times this particular advice has been followed
ξ_t = 1 - advice_followed_times*decay_ratio
# generates the local policies to choose from
policies_to_choose[an_action] = Q_values_to_choose[an_action] - ξ_t*Φ_t_sa
# Generates Φ_{t+1}(s, a)
Φ[v_x][v_y][v_orientation][v_speed][an_action] = Φ_t_sa + β_used*δ_t_Φ
# Chooses the optimal policy action
chosen_action_index = policies_to_choose.index(max(policies_to_choose))
given_reward = R[v_x][v_y] + γ*pu_Φ_t_snan[chosen_action_index] - pu_Φ_t_sa[chosen_action_index]
# Makes the vehicle attempt the action
[_1, location_end] = tested_vehicle.execute_action(chosen_action_index,
modify_self=True,
get_copy_there=False,
get_end_location=True)
# Updates the Q matrix
# Q[s, a] = Q[s, a] + α*(R[s] + γ*max(Q[s', a'], a') - Q[s, a])
v_x_new = location_end[0][0]
v_y_new = location_end[0][1]
v_orientation_new = location_end[1]
v_speed_new = location_end[2]
Q_apostrophe_max = max(Q[v_x_new][v_y_new][v_orientation_new][v_speed_new])
Q_sa = Q[v_x][v_y][v_orientation][v_speed][chosen_action_index]
s_as_state = aux.state_to_str(v_x, v_y, v_orientation, v_speed)
Q[v_x][v_y][v_orientation][v_speed][chosen_action_index] = Q_sa + α_used*(given_reward + γ*Q_apostrophe_max - Q_sa)
# Marks certain states as seen this round
for a_good_seen_state in good_advice_states_seen:
# Good advice reward decays
advice_locations["good"][a_good_seen_state][0] -= 1
for a_bad_seen_state in bad_advice_states_seen:
# Bad advice reward rises
advice_locations["bad"][a_bad_seen_state][0] -= 1
#-----------------------------------------------------
# Q-LEARNING
#-----------------------------------------------------
# [[epoch index, RMS reward], ...]
epoch_rewards = []
for an_epoch in range(0, args.epochs):
# Tests
tested_rewards = test_Q()
# Calculates and appends the RMS reward to results
epoch_rewards.append([an_epoch, aux.RMS(tested_rewards)])
# Trains (unless it is the last epoch)
if an_epoch != (args.epochs - 1):
train_Q()
#-----------------------------------------------------
# OUTPUTS RESULTS
#-----------------------------------------------------
with open(args.output, "w") as jf:
jf.write(json.dumps({"Q matrix":Q, "epoch rewards":epoch_rewards}, indent=4))
#-----------------------------------------------------
# SHOWS PLOT WITH RESULTS IF REQUESTED
#-----------------------------------------------------
if not args.show:
sys.exit()
plt.figure()
epochs_used = []
rewards_obtained = []
for mt in range(0, len(epoch_rewards)):
epochs_used.append(epoch_rewards[mt][0])
rewards_obtained.append(epoch_rewards[mt][1])
plt.plot(epochs_used, rewards_obtained, "k-")
plt.xlabel("Epoch")
plt.ylabel("Reward")
plt.title("Reward vs. Epoch")
plt.show()
| 16,641 | 5,423 |
from itertools import permutations
for i, p in enumerate(permutations([x for x in '0123456789'])):
if i == 10**6 - 1:
print ''.join(p)
break
| 162 | 64 |
# ---------------------------------------------------------
"""
This file is a part of the "SARK110 Antenna Vector Impedance Analyzer" software
MIT License
@author Copyright (c) 2020 Melchor Varela - EA4FRB
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# ---------------------------------------------------------
import os
import struct
import time
if os.name == 'nt':
import pywinusb.hid as hid
import threading
elif os.name == 'posix':
import hid
else:
raise ImportError("Error: no implementation for your platform ('{}') available".format(os.name))
SARK110_VENDOR_ID = 0x0483
SARK110_PRODUCT_ID = 0x5750
WAIT_HID_DATA_MS = 1000
class Sark110:
_handler = 0
_is_connect = 0
_max_freq = 0
_min_freq = 0
_dev_name = ""
_fw_version = ""
_fw_protocol = -1
@property
def fw_version(self) -> str:
return self._fw_version
@property
def fw_protocol(self) -> int:
return self._fw_protocol
@property
def dev_name(self) -> str:
return self._dev_name
@property
def max_freq(self) -> int:
return self._max_freq
@property
def min_freq(self) -> int:
return self._min_freq
@property
def is_connected(self) -> bool:
return self._is_connect
def __init__(self):
self._handler = 0
self._is_connect = 0
def open(self) -> int:
"""
Opens the device
:return: <0 err; >0 ok
"""
# Windows: pywinusb
if os.name == 'nt':
target_vendor_id = SARK110_VENDOR_ID
target_product_id = SARK110_PRODUCT_ID
hid_filter = hid.HidDeviceFilter(vendor_id=target_vendor_id, product_id=target_product_id)
try:
self._handler = hid_filter.get_devices()[0]
if not self._handler:
return -1
else:
self._handler.open()
self._handler.set_raw_data_handler(self._rx_handler)
return 1
except:
return -2
# Linux: hidapi
else:
self._handler = hid.device()
try:
self._handler.open(SARK110_VENDOR_ID, SARK110_PRODUCT_ID)
self._handler.set_nonblocking(0)
return 1
except IOError as ex:
return -1
def connect(self) -> int:
"""
Connect to the device and get its characteristics
:return: <0 err; >0 ok
"""
if not self._handler:
return -1
if self._cmd_version() < 0:
return -2
self._is_connect = 1;
return 1
def close(self):
"""
Closes the device
:return:
"""
if self._handler:
self._handler.close()
self._handler = 0
self._is_connect = 0
def measure(self, freq: int, rs: float, xs: float, cal=True, samples=1) -> int:
"""
Takes one measurement sample at the specified frequency
:param freq: frequency in hertz; 0 to turn-off the generator
:param cal: True to get OSL calibrated data; False to get uncalibrated data
:param samples: number of samples for averaging
:param rs real part of the impedance
:param xs imag part of the impedance
:return: <0 err; >0 ok
"""
if not self._is_connect:
return -1
snd = [0x0] * 19
snd[1] = 2
b = self._int2bytes(freq)
snd[2] = b[0]
snd[3] = b[1]
snd[4] = b[2]
snd[5] = b[3]
if cal:
snd[6] = 1
else:
snd[6] = 0
snd[7] = samples
rcv = self._send_rcv(snd)
if rcv[0] != 79:
return -2
b = bytearray([0, 0, 0, 0])
b[0] = rcv[1]
b[1] = rcv[2]
b[2] = rcv[3]
b[3] = rcv[4]
rs[0] = struct.unpack('f', b)
b[0] = rcv[5]
b[1] = rcv[6]
b[2] = rcv[7]
b[3] = rcv[8]
xs[0] = struct.unpack('f', b)
return 1
def buzzer(self, freq=0, duration=0) -> int:
"""
Sounds the sark110 buzzer.
:param device: handler
:param freq: frequency in hertz
:param duration: duration in ms
:return: <0 err; >0 ok
"""
if not self._is_connect:
return -1
snd = [0x0] * 19
snd[1] = 20
b = self._short2bytes(freq)
snd[2] = b[0]
snd[3] = b[1]
b = self._short2bytes(duration)
snd[4] = b[0]
snd[5] = b[1]
rcv = self._send_rcv(snd)
if duration == 0:
time.sleep(.2)
else:
time.sleep(duration / 1000)
if rcv[0] == 79:
return 1
return -2
def reset(self) -> int:
"""
Resets the device
:return: <0 err; >0 ok
"""
if not self._is_connect:
return -1
snd = [0x0] * 19
snd[1] = 50
rcv = self._send_rcv(snd)
if rcv == 79:
return 1
return -2
def measure_ext(self, freq: int, step: int, rs: float, xs: float, cal=True, samples=1) -> int:
"""
Takes four measurement samples starting at the specified frequency and incremented at the specified step
Uses half float, so a bit less precise
:param device: handler
:param freq: frequency in hertz; 0 to turn-off the generator
:param step: step in hertz
:param cal: True to get OSL calibrated data; False to get uncalibrated data
:param samples: number of samples for averaging
:param rs real part of the impedance (four vals)
:param xs imag part of the impedance (four vals)
:return: <0 err; >0 ok
"""
if not self._is_connect:
return -1
snd = [0x0] * 19
snd[1] = 12
b = self._int2bytes(freq)
snd[2] = b[0]
snd[3] = b[1]
snd[4] = b[2]
snd[5] = b[3]
b = self._int2bytes(step)
snd[8] = b[0]
snd[9] = b[1]
snd[10] = b[2]
snd[11] = b[3]
if cal:
snd[6] = 1
else:
snd[6] = 0
snd[7] = samples
rcv = self._send_rcv(snd)
if rcv[0] != 79:
return -2
rs[0] = self._half2float(rcv[1], rcv[2])
xs[0] = self._half2float(rcv[3], rcv[4])
rs[1] = self._half2float(rcv[5], rcv[6])
xs[1] = self._half2float(rcv[7], rcv[8])
rs[2] = self._half2float(rcv[9], rcv[10])
xs[2] = self._half2float(rcv[11], rcv[12])
rs[3] = self._half2float(rcv[13], rcv[14])
xs[3] = self._half2float(rcv[15], rcv[16])
return 1
# ---------------------------------------------------------
# Get version command: used to check the connection and dev params
def _cmd_version(self):
if not self._handler:
return -1
self._fw_protocol = 0
self._fw_version = ""
snd = [0x0] * 19
snd[1] = 1
rcv = self._send_rcv(snd)
if rcv[0] != 79:
return -2
self._fw_protocol = (rcv[2] << 8) & 0xFF00
self._fw_protocol += rcv[1] & 0xFF
ver = [0x0] * 15
ver[:] = rcv[3:]
# Identifies the device
if (self._fw_protocol & 0xff00) == 0x0100:
self._max_freq = 200000000
self._min_freq = 100000
self._dev_name = "sark110 (100k to 200M)"
elif (self._fw_protocol & 0xff00) == 0x0200:
self._max_freq = 230000000
self._min_freq = 10000
self._dev_name = "sark110 (10k to 230M)"
elif (self._fw_protocol & 0xff00) == 0x0300:
self._max_freq = 230000000
self._min_freq = 10000
self._dev_name = "sark110 mk1"
elif (self._fw_protocol & 0xff00) == 0x0a00:
self._max_freq = 1000000000
self._min_freq = 100000
self._dev_name = "sark110 ulm"
else:
self._max_freq = 230000000
self._min_freq = 100000
self._dev_name = "sark110"
# Converts version to str
for i in range(15):
if ver[i] == 0:
break
elif ver[i] == 46:
self._fw_version += "."
else:
self._fw_version += "%c" % (ver[i])
return 1
# ---------------------------------------------------------
# half float decompress
def _half2float(self, byte1, byte2):
hfs = (byte2 << 8) & 0xFF00
hfs += byte1 & 0xFF
temp = self.__half2float(hfs)
res_pack = struct.pack('I', temp)
return struct.unpack('f', res_pack)[0]
def __half2float(self, float16):
s = int((float16 >> 15) & 0x00000001) # sign
e = int((float16 >> 10) & 0x0000001f) # exponent
f = int(float16 & 0x000003ff) # fraction
if e == 0:
if f == 0:
return int(s << 31)
else:
while not (f & 0x00000400):
f = f << 1
e -= 1
e += 1
f &= ~0x00000400
# print(s,e,f)
elif e == 31:
if f == 0:
return int((s << 31) | 0x7f800000)
else:
return int((s << 31) | 0x7f800000 | (f << 13))
e = e + (127 - 15)
f = f << 13
return int((s << 31) | (e << 23) | f)
# ---------------------------------------------------------
def _short2bytes(self, n):
"""
short to buffer array
:param n:
:return:
"""
b = bytearray([0, 0])
b[0] = n & 0xFF
n >>= 8
b[1] = n & 0xFF
return b
def _int2bytes(self, n):
"""
int to buffer array
:param n:
:return:
"""
b = bytearray([0, 0, 0, 0])
b[0] = n & 0xFF
n >>= 8
b[1] = n & 0xFF
n >>= 8
b[2] = n & 0xFF
n >>= 8
b[3] = n & 0xFF
return b
# ---------------------------------------------------------
def _send_rcv(self, snd):
# Windows: pywinusb
if os.name == 'nt':
try:
report = self._handler.find_output_reports()[0]
self.event.clear()
report.set_raw_data(snd)
report.send()
self.event.wait()
return _g_rcv[1:18]
except:
return [0] * 18
# Linux: hidapi
else:
try:
self._handler.write(snd)
return self._handler.read(18, WAIT_HID_DATA_MS)
except:
return [0] * 18
def _rx_handler(self, data):
"""
Handler called when a report is received
:param data:
:return:
"""
global _g_rcv
_g_rcv = data.copy()
self.event.set()
# ---------------------------------------------------------
_g_rcv = [0xff] * 19
if os.name == 'nt':
event = threading.Event()
| 12,260 | 4,349 |
#!/usr/bin/python
import sqlite3
path = '/Users/Utkarsh/PycharmProjects/SCILLA/Experiments/circuits.db'
def select_all_tasks(conn):
"""
Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
cur.execute("SELECT * FROM tasks")
rows = cur.fetchall()
for row in rows:
print(row)
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except sqlite3.Error as e:
print(e)
return conn
if __name__ == '__main__':
data = create_connection(path)
print("Opened database successfully")
select_all_tasks(data)
# import sqlite3
# from sqlite3 import Error
#
#
# def create_connection(db_file):
# """ create a database connection to the SQLite database
# specified by the db_file
# :param db_file: database file
# :return: Connection object or None
# """
# conn = None
# try:
# conn = sqlite3.connect(db_file)
# except Error as e:
# print(e)
#
# return conn
#
#
# def select_all_tasks(conn):
# """
# Query all rows in the tasks table
# :param conn: the Connection object
# :return:
# """
# cur = conn.cursor()
# cur.execute("SELECT * FROM tasks")
#
# rows = cur.fetchall()
#
# for row in rows:
# print(row)
#
#
# def select_task_by_priority(conn, priority):
# """
# Query tasks by priority
# :param conn: the Connection object
# :param priority:
# :return:
# """
# cur = conn.cursor()
# cur.execute("SELECT * FROM tasks WHERE priority=?", (priority,))
#
# rows = cur.fetchall()
#
# for row in rows:
# print(row)
#
#
# def main():
# database = '/Users/Utkarsh/PycharmProjects/SCILLA/Experiments/circuits.db'
#
# # create a database connection
# conn = create_connection(database)
# with conn:
# print("1. Query task by priority:")
# select_task_by_priority(conn, 1)
#
# print("2. Query all tasks")
# select_all_tasks(conn)
#
#
# if __name__ == '__main__':
# main() | 2,295 | 742 |
#
# @lc app=leetcode id=377 lang=python3
#
# [377] Combination Sum IV
#
# @lc code=start
from collections import defaultdict
class Solution:
def combinationSum4(self, nums: list[int], target: int) -> int:
dp = defaultdict(int, {0: 1})
dp.update(map(lambda i: (i, sum(dp[i - n] for n in nums)), range(1, target + 1)))
return dp[target]
# @lc code=end
| 383 | 154 |
import argparse
import numpy as np
from paddle import fluid
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str,
help="Path of __model__ and __params__")
parser.add_argument("--use_cuda", action="store_true")
args = parser.parse_args()
print("Args:", args)
place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
# Load inference model
inference_program, feed_target_names, fetch_targets = load_model(args.model_path, exe)
print("Feed target names:", feed_target_names)
print("Fetch targets:", fetch_targets)
# A temp sample
B, N, K, Q = 4, 2, 5, 5
max_length = 512
totalQ = np.array([N * Q], dtype=np.int32)
support = np.random.randint(0, high=1000, size=[B, N, K, max_length])
support_len = np.random.randint(10, high=max_length, size=[B, N, K])
query = np.random.randint(0, high=1000, size=[B, N * Q, max_length])
query_len = np.random.randint(10, high=max_length, size=[B, N * Q])
# Run inference model
pred_label, = exe.run(inference_program,
feed={
feed_target_names[0]: totalQ,
feed_target_names[1]: support,
feed_target_names[2]: support_len,
feed_target_names[3]: query,
feed_target_names[4]: query_len
},
fetch_list=fetch_targets)
print("The predict label is:", pred_label) # [B, totalQ]
def load_model(model_path, exe):
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(dirname=model_path, executor=exe,
params_filename="__params__"))
return inference_program, feed_target_names, fetch_targets
if __name__ == "__main__":
main() | 1,839 | 624 |
# Generated by Django 3.2.5 on 2021-08-23 04:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0031_participantgraduation_updated_at'),
]
operations = [
migrations.AlterField(
model_name='infosourceppdb',
name='info_source',
field=models.CharField(db_index=True, max_length=100, unique=True, verbose_name='Sumber Info Primaseru'),
),
migrations.AlterField(
model_name='participantcount',
name='count',
field=models.CharField(db_index=True, max_length=10),
),
]
| 659 | 215 |
import paho.mqtt.client as paho
import time
import Queue as queue
import json
import real_time_manipulator_math_utils
import pprint
pp = pprint.PrettyPrinter(indent=4)
# import rbdl
# import manip motion
def on_connect(client, userdata, flags, rc):
client.subscribe(topic, qos)
def on_message(client, userdata, message):
msg = message.payload.decode("utf-8")
q.put(msg)
# print("Received: ", msg)
broker = "test.mosquitto.org"
topic = "fyp/sensors"
qos = 0
client = paho.Client("client_001")
client.on_connect=on_connect
client.on_message = on_message
client.connect(broker)
client.loop_start()
JOINTS = 1
q = queue.Queue()
JOINTS = 4
SPLINE = 1
WINDOWSIZE = 5
math_utils_obj = real_time_manipulator_math_utils.manipulator_math_utils(JOINTS)
timestamps = []
angles = []
[angles.append([]) for j in range(JOINTS)]
torques = []
[torques.append([]) for j in range(JOINTS)]
padded_angles = []
first_val = True
laukik_tatti = True
while loop_flag==1:
message = q.get()
msg = json.loads(message)
if laukik_tatti:
laukik_tatti = False
continue
if first_val:
init_timestamp = msg['timestamp']
first_val = False
else:
# collect till SPLINE
if(msg['timestamp'] - init_timestamp > SPLINE):
# print(timestamps)
init_timestamp = msg['timestamp']
# padding for smooth transition in moving average
for j in range(JOINTS):
if padded_angles == []:
angles[j] = [angles[j][0]]*(WINDOWSIZE-1) + angles[j]
else:
angles[j] = padded_angles[j] + angles[j]
# moving average with length similar to timestamps
print("raw angles")
pp.pprint(angles)
padded_angles = [ angles[j][-(WINDOWSIZE-1):] for j in range(JOINTS) ]
angles = math_utils_obj.real_time_moving_average(angles)
print("angles after moving avg")
pp.pprint(angles)
# torques = get torque from rbdl (timestamp, angles)
# convert angles to steps
transformation = [[1,0]]*JOINTS
angles = math_utils_obj.angles_to_steps(angles, transformation)
print("angles to steps")
pp.pprint(angles)
# call to get coeffs
angle_coeffs = math_utils_obj.calculate_coefficients_angles(timestamps, angles)
print("angles coefficients")
pp.pprint(angle_coeffs)
# set motion on manipulator
# empty
timestamps = []
angles = []
[angles.append([]) for j in range(JOINTS)]
torques = []
[torques.append([]) for j in range(JOINTS)]
timestamps.append(msg['timestamp'] - init_timestamp)
angles[0].append(msg['shoulder']['pitch'])
angles[1].append(msg['shoulder']['roll'])
angles[2].append(msg['shoulder']['yaw'])
angles[3].append(msg['elbow']['pitch'])
client.disconnect()
client.loop_stop(
) | 3,125 | 991 |
import os
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.http import HttpResponse, JsonResponse, Http404
from django.shortcuts import render, get_object_or_404
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views import View
from django.views.generic.base import TemplateView
from .models import Article, Comment
class ArticleView(TemplateView):
template_name = 'article/index.html'
extra_context = {
'context_for_js': {
'article_html_base_uri': reverse_lazy('article:htmls'),
'comments_uri': reverse_lazy('article:comments'),
'bibs_uri': reverse_lazy('article:bibs'),
'names_uri': reverse_lazy('article:names'),
'search_uri': reverse_lazy('search:index')
}
}
def get(self, request, name_or_filename, *args, **kwargs):
response = super().get(request, *args, **kwargs)
# These context data cannot define in class field.
response.context_data['context_for_js']['article_base_uri'] = \
reverse('article:index', kwargs=dict(name_or_filename="temp")).replace('temp', '')
response.context_data['context_for_js']['is_authenticated'] = request.user.is_authenticated
response.context_data['context_for_js']['target'] = request.GET.get('target', 'article')
response.context_data['target'] = request.GET.get('target', 'article')
return response
class ArticleIndexView(View):
def get(self, request):
return JsonResponse({'index': [
dict(name=article.name) for article in Article.objects.all()
]})
class ArticleHtmlView(View):
def get(self, request, *args, **kwargs):
if 'article_name' in request.GET:
article = get_object_or_404(Article, name=request.GET.get('article_name'))
return render(request, article.template_url)
else:
raise Http404()
class BibView(View):
def get(self, request):
if 'article_name' in request.GET:
article_name = request.GET.get("article_name")
bib_file_path = os.path.join(settings.MML_FMBIBS_DIR, f'{article_name}.bib')
if os.path.exists(bib_file_path):
with open(bib_file_path, "r") as f:
bib_text = f.read()
else:
bib_text = f"{bib_file_path} not found"
return JsonResponse({"bib_text": bib_text})
class ProofView(View):
def get(self, request, article_name, proof_name):
return HttpResponse(
open(os.path.join(settings.MML_HTML_DIR, 'proofs',
article_name, proof_name)).read(),
content_type='application/xml'
)
class RefView(View):
def get(self, request, article_name, ref_name):
return HttpResponse(
open(os.path.join(settings.MML_HTML_DIR,
'refs', article_name, ref_name)).read(),
content_type='application/xml'
)
class CommentView(View):
def get(self, request, *args, **kwargs):
query = Comment.objects
if 'article_name' in request.GET:
query = query.filter(
article=Article.objects.get(
name=request.GET.get("article_name"))
)
if 'block' in request.GET:
query = query.filter(
block=request.GET.get('block')
)
if 'block_order' in request.GET:
query = query.filter(
block_order=int(request.GET.get("block_order"))
)
return HttpResponse(
serializers.serialize('json', query.all()), content_type='application/json'
)
@method_decorator(login_required)
def post(self, request):
article_name = request.POST.get('article_name', None)
block = request.POST.get('block', None)
block_order = request.POST.get("block_order", None)
text = request.POST.get('comment', None)
article = Article.objects.get(name=article_name)
if Comment.objects.filter(article=article, block=block, block_order=block_order).exists():
comment = Comment.objects.get(
article=article, block=block, block_order=block_order)
else:
comment = Comment(article=article, block=block,
block_order=block_order, text='')
comment.text = text
comment.save()
article.save_db2mizfile()
article.commit_mizfile(request.user.username)
return HttpResponse(status=201)
| 4,679 | 1,332 |
"""
The MIT License (MIT)
Copyright (c) 2013 Niko Skrypnik
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from kivy.graphics import Mesh as KivyMesh
from kivy3 import Vector3
from kivy3.core.object3d import Object3D
DEFAULT_VERTEX_FORMAT = [
(b'v_pos', 3, 'float'),
(b'v_normal', 3, 'float'),
(b'v_tc0', 2, 'float')
]
DEFAULT_MESH_MODE = 'lines'
class Lines(Object3D):
def __init__(self, geometry, material, **kw):
super(Lines, self).__init__(**kw)
self.geometry = geometry
self.material = material
self.mtl = self.material # shortcut for material property
self.vertex_format = kw.pop('vertex_format', DEFAULT_VERTEX_FORMAT)
self.mesh_mode = kw.pop('mesh_mode', DEFAULT_MESH_MODE)
self.create_mesh()
def create_mesh(self):
""" Create real mesh object from the geometry and material """
vertices = []
indices = []
idx = 0
for line in self.geometry.lines:
for i, k in enumerate(['a', 'b']):
v_idx = getattr(line, k)
vertex = self.geometry.vertices[v_idx]
vertices.extend(vertex)
try:
normal = line.vertex_normals[i]
except IndexError:
normal = Vector3([0, 0, 0])
vertices.extend(normal)
try:
tex_coords = self.geometry.face_vertex_uvs[0][idx]
vertices.extend(tex_coords)
except IndexError:
vertices.extend([0, 0])
indices.append(idx)
idx += 1
if idx >= 65535 - 1:
msg = 'Mesh must not contain more than 65535 indices, {} given'
raise ValueError(msg.format(idx + 1))
kw = dict(
vertices=vertices,
indices=indices,
fmt=self.vertex_format,
mode=self.mesh_mode
)
if self.material.map:
kw['texture'] = self.material.map
self._mesh = KivyMesh(**kw)
def custom_instructions(self):
yield self.material
yield self._mesh
| 3,131 | 984 |
#!/usr/bin/python
#################################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dmitry Sovetov
#
# https://github.com/dmsovetov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
import argparse, time, files, os, actions, tasks, unity
# substitute_variables
def substitute_variables( args, *variables ):
argv = vars( args )
for k, v in argv.items():
if isinstance( v, str ):
for var in variables:
argv[k] = argv[k].replace( '[' + var + ']', argv[var] )
# class TextureQuality
class TextureQuality:
HD = 'hd'
SD = 'sd'
Available = [HD, SD]
# class TargetPlatform
class TargetPlatform:
Win = 'win'
Mac = 'mac'
iOS = 'ios'
Android = 'android'
Available = [Win, Mac, iOS, Android]
# class TextureCompression
class TextureCompression:
Disabled = 'disabled'
Pvr = 'pvr'
Dxt = 'dxt'
Etc = 'etc'
# class TextureFormat
class TextureFormat:
Raw = 'raw'
Png = 'png'
Tga = 'tga'
@staticmethod
def convert_to(format):
if format == TextureFormat.Raw: return actions.convert_to_raw
if format == TextureFormat.Png: return actions.png_quant
if format == TextureFormat.Tga: return actions.compress
# class ExportError
class ExportError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
# Imports the project
def import_project(args, source, output):
# Parse project assets
assets = unity.project.parse_assets(args)
# Import scenes
if args.skip_scenes == 0:
unity.project.import_scenes(assets, source, output)
# Import prefabs
unity.project.import_prefabs(assets, source, output)
# Import materials
unity.project.import_materials(assets, source, output)
# Import assets
unity.project.import_assets(assets, source, output)
# Save the assets
assets.save(output)
# Builds the data to a specified folder
def build(args, source, output):
rules = {
'*.tga': TextureFormat.convert_to(args.texFormat)
, '*.png': TextureFormat.convert_to(args.texFormat)
, '*.fbx': actions.convert_fbx
}
queue = tasks.create(args.workers)
outdated = files.find_outdated(source)
files.build(queue, outdated, output, rules)
queue.start()
# Write the manifest file
with open(os.path.join(output, 'assets.json'), 'wt') as fh:
fh.write(files.generate_manifest(outdated))
fh.close()
# Entry point
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser( description = 'Dreemchest make data tool', formatter_class = argparse.ArgumentDefaultsHelpFormatter )
parser.add_argument( "-a", "--action", type = str, default = 'build', help = "Build action.", choices = ["clean", "build", "install", "import"] )
parser.add_argument( "-s", "--source", type = str, required = True, help = "Input resource path." )
parser.add_argument( "-o", "--output", type = str, required = True, help = "Output path." )
parser.add_argument( "-tc", "--compression", type = str, default = TextureCompression.Disabled, help = "Hardware texture compression." )
parser.add_argument( "-tf", "--texFormat", type = str, default = TextureFormat.Raw, help = "Exported image format." )
parser.add_argument( "-p", "--platform", type = str, default = TargetPlatform.Win, help = "Target platform.", choices = TargetPlatform.Available )
parser.add_argument( "-v", "--version", type = str, default = '1.0', help = "Resource version" )
parser.add_argument( "-w", "--workers", type = int, default = 8, help = "The number of concurrent workers." )
parser.add_argument( "-q", "--quality", type = str, default = TextureQuality.HD, help = "Texture quality.", choices = TextureQuality.Available )
parser.add_argument( "-c", "--cache", type = str, default = '[source]/[platform]/cache', help = "Cache file name." )
parser.add_argument( "--strip-unused", type = bool, default = False, help = "The unused assets won't be imported." )
parser.add_argument( "--use-uuids", type = int, default = 1, help = "The UUIDs will be used instead of file names." )
parser.add_argument( "--skip-scenes", type = int, default = 0, help = "Scenes wont be imported." )
args = parser.parse_args()
substitute_variables( args, 'version', 'compression', 'platform', 'quality', 'source' )
# Check the input arguments
if not os.path.exists(args.source):
raise AssertionError('the input folder does not exist')
# Create the output folder
if not os.path.exists(args.output):
os.makedirs(args.output)
print('--- Building [{0}] data package to [{1}] with [{2}] texture compression ---'.format(args.platform, args.output, args.compression) )
start = time.time()
try:
if args.action == 'build':
build(args, args.source, args.output)
elif args.action == 'import':
import_project(args, args.source, args.output)
except ExportError as e:
print(e.message)
print('--- {0} seconds ---'.format(int(time.time() - start))) | 6,634 | 2,012 |
from core.tools.tool import Tool
from core.editor import Editor
class Pencil(Tool):
def __init__(self, button, board):
super(Pencil, self).__init__(button, board)
self._changed = set()
Editor()["pre-pencil"] = board.copy()
self._fill = button
def _need_change(self, value):
return value != self._fill
def move(self, x, y):
editor = Editor()
radius = editor["radius"]
selection = editor["selection"]
pre_board = editor["pre-pencil"]
for i in range(-radius + 1, radius):
if y + i not in selection[1]:
continue
for j in range(-radius + 1, radius):
if x + j not in selection[0]:
continue
if self._need_change(pre_board[y + i, x + j]) and not (x + j, y + i) in self._changed:
self._changed.add((x + j, y + i))
pre_board[y + i, x + j] = not pre_board[y + i, x + j]
def execute(self):
Editor()["pre-pencil"] = None
for point in self._changed:
self._board[point] = not self._board[point]
def revoke(self):
self.execute()
def need_save(self):
return len(self._changed) != 0
| 1,262 | 388 |
# Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import responses
from gtmcore.workflows.gitlab import GitLabManager, ProjectPermissions, GitLabException
@pytest.fixture()
def gitlab_mngr_fixture():
"""A pytest fixture that returns a GitLabRepositoryManager instance"""
yield GitLabManager("repo.gigantum.io", "usersrv.gigantum.io", "fakeaccesstoken")
@pytest.fixture()
def property_mocks_fixture():
"""A pytest fixture that returns a GitLabRepositoryManager instance"""
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook',
json=[{
"id": 26,
"description": "",
}],
status=200)
yield
class TestGitLabManager(object):
@responses.activate
def test_user_token(self, gitlab_mngr_fixture):
"""test the user_token property"""
# Setup responses mock for this test
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
assert gitlab_mngr_fixture._gitlab_token is None
# Get token
token = gitlab_mngr_fixture.user_token
assert token == 'afaketoken'
assert gitlab_mngr_fixture._gitlab_token == 'afaketoken'
# Assert token is returned and set on second call and does not make a request
responses.add(responses.GET, 'https://usersrv.gigantum.io/key', status=400)
assert token == gitlab_mngr_fixture.user_token
@responses.activate
def test_user_token_error(self, gitlab_mngr_fixture):
"""test the user_token property"""
# Setup responses mock for this test
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'message': 'it failed'}, status=400)
# Make sure error is raised when getting the key fails and returns !=200
with pytest.raises(GitLabException):
_ = gitlab_mngr_fixture.user_token
def test_repository_id(self):
"""test the repository_id property"""
assert GitLabManager.get_repository_id("tester", "test-lb-1") == "tester%2Ftest-lb-1"
@responses.activate
def test_exists_true(self, property_mocks_fixture, gitlab_mngr_fixture):
"""test the exists method for a repo that should exist"""
assert gitlab_mngr_fixture.repository_exists("testuser", "test-labbook") is True
@responses.activate
def test_exists_false(self, gitlab_mngr_fixture):
"""test the exists method for a repo that should not exist"""
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fderp',
json=[{
"message": "404 Project Not Found"
}],
status=404)
assert gitlab_mngr_fixture.repository_exists("testuser", "derp") is False
@responses.activate
def test_create(self, gitlab_mngr_fixture, property_mocks_fixture):
"""test the create method"""
# Setup responses mock for this test
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects',
json={
"id": 27,
"description": "",
},
status=201)
responses.add(responses.POST, 'https://usersrv.gigantum.io/webhook/testuser/new-labbook',
json={
"success": True
},
status=201)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"message": "404 Project Not Found"
}],
status=404)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"id": 27,
"description": "",
}],
status=200)
gitlab_mngr_fixture.create_labbook("testuser", "new-labbook", visibility="private")
assert gitlab_mngr_fixture.repository_exists("testuser", "new-labbook") is True
@responses.activate
def test_create_errors(self, gitlab_mngr_fixture, property_mocks_fixture):
"""test the create method"""
# Should fail because the repo "already exists"
with pytest.raises(ValueError):
gitlab_mngr_fixture.create_labbook("testuser", "test-labbook", visibility="private")
# Should fail because the call to gitlab failed
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects',
json={
"id": 27,
"description": "",
},
status=400)
with pytest.raises(ValueError):
gitlab_mngr_fixture.create_labbook("testuser", "test-labbook", visibility="private")
@responses.activate
def test_get_collaborators(self, gitlab_mngr_fixture, property_mocks_fixture):
"""Test the get_collaborators method"""
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json=[
{
"id": 29,
"name": "Jane Doe",
"username": "janed",
"access_level": ProjectPermissions.OWNER.value,
"expires_at": None
},
{
"id": 30,
"name": "John Doeski",
"username": "jd",
"access_level": ProjectPermissions.READ_ONLY.value,
"expires_at": None
}
],
status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
status=400)
collaborators = gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook")
assert len(collaborators) == 2
assert collaborators[0] == (29, 'janed', ProjectPermissions.OWNER)
assert collaborators[1] == (30, 'jd', ProjectPermissions.READ_ONLY)
# Verify it fails on error to gitlab (should get second mock on second call)
with pytest.raises(ValueError):
gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook")
@responses.activate
def test_add_collaborator(self, gitlab_mngr_fixture, property_mocks_fixture):
"""Test the add_collaborator method"""
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
}
],
status=200)
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json={
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
},
status=201)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json=[
{
"id": 29,
"name": "Jane Doe",
"username": "janed",
"access_level": 40,
"expires_at": None
},
{
"id": 100,
"name": "New Person",
"username": "person100",
"access_level": 30,
"expires_at": None
}
],
status=200)
gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "person100",
ProjectPermissions.READ_WRITE)
collaborators = gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook")
assert len(collaborators) == 2
assert collaborators[0] == (29, 'janed', ProjectPermissions.OWNER)
assert collaborators[1] == (100, 'person100', ProjectPermissions.READ_WRITE)
@responses.activate
def test_add_collaborator_errors(self, gitlab_mngr_fixture, property_mocks_fixture):
"""Test the add_collaborator method exception handling"""
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
}
],
status=400)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
}
],
status=201)
responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json={
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
},
status=400)
with pytest.raises(ValueError):
_ = gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "person100", ProjectPermissions.OWNER)
with pytest.raises(ValueError):
_ = gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "person100", ProjectPermissions.READ_ONLY)
@responses.activate
def test_delete_collaborator(self, gitlab_mngr_fixture, property_mocks_fixture):
"""Test the delete_collaborator method"""
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
}
],
status=200)
responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members/100',
status=204)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json=[
{
"id": 29,
"name": "Jane Doe",
"username": "janed",
"access_level": 40,
"expires_at": None
}
],
status=200)
gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 'person100')
collaborators = gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook")
assert len(collaborators) == 1
assert collaborators[0] == (29, 'janed', ProjectPermissions.OWNER)
@responses.activate
def test_delete_collaborator_error(self, gitlab_mngr_fixture, property_mocks_fixture):
"""Test the delete_collaborator method exception handling"""
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100',
json=[
{
"id": 100,
"name": "New Person",
"username": "person100",
"state": "active",
}
],
status=200)
responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members/100',
status=204)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members',
json=[
{
"id": 29,
"name": "Jane Doe",
"username": "janed",
"access_level": 40,
"expires_at": None
}
],
status=400)
# What is this test even for?
# gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 'person100')
# with pytest.raises(TestGitLabManager):
# gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 'person100')
@responses.activate
def test_error_on_missing_repo(self, gitlab_mngr_fixture):
"""Test the exception handling on a repo when it doesn't exist"""
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook',
json=[{
"message": "404 Project Not Found"
}],
status=404)
with pytest.raises(ValueError):
gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook")
with pytest.raises(ValueError):
gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "test", ProjectPermissions.READ_ONLY)
with pytest.raises(ValueError):
gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 100)
@responses.activate
def test_configure_git_credentials(self, gitlab_mngr_fixture):
"""test the configure_git_credentials method"""
host = "test.gigantum.io"
username = "testuser"
# Setup responses mock for this test
responses.add(responses.GET, 'https://usersrv.gigantum.io/key',
json={'key': 'afaketoken'}, status=200)
# Check that creds are empty
token = gitlab_mngr_fixture._check_if_git_credentials_configured(host, username)
assert token is None
# Set creds
gitlab_mngr_fixture.configure_git_credentials(host, username)
# Check that creds are configured
token = gitlab_mngr_fixture._check_if_git_credentials_configured(host, username)
assert token == "afaketoken"
# Set creds
gitlab_mngr_fixture.clear_git_credentials(host)
# Check that creds are configured
token = gitlab_mngr_fixture._check_if_git_credentials_configured(host, username)
assert token is None
@responses.activate
def test_delete(self, gitlab_mngr_fixture, property_mocks_fixture):
"""test the create method"""
# Setup responses mock for this test
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"id": 27,
"description": "",
}],
status=200)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"id": 27,
"description": "",
}],
status=200)
responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json={
"message": "202 Accepted"
},
status=202)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"message": "404 Project Not Found"
}],
status=404)
responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook',
json=[{
"message": "404 Project Not Found"
}],
status=404)
responses.add(responses.DELETE, 'https://usersrv.gigantum.io/webhook/testuser/new-labbook',
json={},
status=204)
assert gitlab_mngr_fixture.repository_exists("testuser", "new-labbook") is True
gitlab_mngr_fixture.remove_repository("testuser", "new-labbook")
assert gitlab_mngr_fixture.repository_exists("testuser", "new-labbook") is False
with pytest.raises(ValueError):
gitlab_mngr_fixture.remove_repository("testuser", "new-labbook")
| 19,855 | 5,603 |
import logging
from pynder import errors
from random import randint
from PyQt5 import QtCore
class LikesBotThread(QtCore.QThread):
"""
This is a QThread which runs in the background as a PyQt Signal. It emits the matches object.
To access the matches object, you need to retrieve from the signal, which is named 'data_downloaded'.
For example:
instance = MatchesThread()
instance.data_downloaded.connect(yourMethod)
instance.start()
With the example above, yourMethod() will be called when the background thread has finished fetching the
matches data. The matches object will be passed in as the first parameter. Therefore, if you define your
method like this: yourMethod(matches), then the session object will be passed into 'matches'.
"""
data_downloaded = QtCore.pyqtSignal(object)
def __init__(self, session, likes_handler, decision_handler=None):
QtCore.QThread.__init__(self)
self.session = session
self.friends = session.get_fb_friends()
self.likes_handler = likes_handler
self.decision_handler = decision_handler
self.abort = False
self.logger = logging.getLogger(__name__)
def stop(self):
self.abort = True
def run(self):
while not self.abort:
if self.session.likes_remaining != 0:
nearby_users = self.session.nearby_users()
try:
user = next(nearby_users) # Iterate through generator object.
if self.decision_handler:
if not self.decision_handler.analyze(user, self.friends):
self.likes_handler.dislike_user(user, 'Bot')
continue
self.likes_handler.like_user(user, 'Bot')
self.logger.info(u'Liking ' + user.name + '.')
except StopIteration:
try:
# No more users to go through. Reset the distance filter to fetch the users again.
self.session.profile.distance_filter = self.session.profile.distance_filter
except errors.RequestError:
self.logger.error('Request timed out when trying to update distance filter in profile.')
except errors.RecsError:
self.logger.info('There are probably no more nearby users to fetch. '
'Increasing distance filter by 1 mile...')
self.session.profile.distance_filter += 1
self.sleep(randint(3, 5)) # Give it a break, 3 to 5 seconds between every swipe.
else:
try:
like_in_seconds = self.session.can_like_in
like_in_hours = like_in_seconds / 60 / 60
self.logger.info('Out of likes. Can like in: ' + str(like_in_seconds) + ' seconds (' +
str(like_in_hours) + ' hours).')
except errors.RequestError:
self.logger.info('Out of likes. Retrying in an hour...')
self.sleep(3600 * 6) # Out of likes, pausing for X hours.
| 3,248 | 847 |
import mindspore as ms
import mindspore.nn as nn
import mindspore.ops.operations as operator
import os
from lr_generator import get_lr
from CrossEntropy import CrossEntropy
import argparse
from inception_A import inception_A
from inception_B import inception_B
import numpy as np
from inception_C import inception_C
from network import Stem
from reduction_A import reduction_A
from reduction_B import reduction_B
from reduction_C import reduction_C
import mindspore.dataset as ds
from mindspore import context
from mindspore import Tensor
from mindspore.parallel._auto_parallel_context import auto_parallel_context
from mindspore.nn.optim.momentum import Momentum
import os
import urllib.request
from urllib.parse import urlparse
import gzip
import argparse
import mindspore.dataset as ds
import mindspore.nn as nn
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
from mindspore.train import Model
from mindspore.common.initializer import TruncatedNormal
import mindspore.dataset.transforms.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.transforms.vision import Inter
from mindspore.nn.metrics import Accuracy
from mindspore.common import dtype as mstype
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.train.model import Model, ParallelMode
from config import config
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.communication.management import init
import mindspore.nn as nn
import mindspore.common.initializer as weight_init
from dataloader import create_dataset
def unzipfile(gzip_path):
"""unzip dataset file
Args:
gzip_path: dataset file path
"""
open_file = open(gzip_path.replace('.gz', ''), 'wb')
gz_file = gzip.GzipFile(gzip_path)
open_file.write(gz_file.read())
gz_file.close()
def download_dataset():
"""Download the dataset from http://yann.lecun.com/exdb/mnist/."""
print("******Downloading the MNIST dataset******")
train_path = "./MNIST_Data/train/"
test_path = "./MNIST_Data/test/"
train_path_check = os.path.exists(train_path)
test_path_check = os.path.exists(test_path)
if train_path_check == False and test_path_check == False:
os.makedirs(train_path)
os.makedirs(test_path)
train_url = {"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"}
test_url = {"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"}
for url in train_url:
url_parse = urlparse(url)
# split the file name from url
file_name = os.path.join(train_path, url_parse.path.split('/')[-1])
if not os.path.exists(file_name.replace('.gz', '')):
file = urllib.request.urlretrieve(url, file_name)
unzipfile(file_name)
os.remove(file_name)
for url in test_url:
url_parse = urlparse(url)
# split the file name from url
file_name = os.path.join(test_path, url_parse.path.split('/')[-1])
if not os.path.exists(file_name.replace('.gz', '')):
file = urllib.request.urlretrieve(url, file_name)
unzipfile(file_name)
os.remove(file_name)
# def create_dataset(data_path, batch_size=32, repeat_size=1,
# num_parallel_workers=1):
# """ create dataset for train or test
# Args:
# data_path: Data path
# batch_size: The number of data records in each group
# repeat_size: The number of replicated data records
# num_parallel_workers: The number of parallel workers
# """
# # define dataset
# mnist_ds = ds.MnistDataset(data_path)
# # define operation parameters
# resize_height, resize_width = 299, 299
# rescale = 1.0 / 255.0
# shift = 0.0
# rescale_nml = 1 / 0.3081
# shift_nml = -1 * 0.1307 / 0.3081
# # define map operations
# resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Resize images to (32, 32)
# rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images
# rescale_op = CV.Rescale(rescale, shift) # rescale images
# hwc2chw_op = CV.HWC2CHW() # change shape from (height, width, channel) to (channel, height, width) to fit network.
# type_cast_op = C.TypeCast(mstype.int32) # change data type of label to int32 to fit network
# # apply map operations on images
# mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers)
# mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers)
# mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers)
# mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)
# mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)
# # apply DatasetOps
# buffer_size = 10000
# mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script
# mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
# mnist_ds = mnist_ds.repeat(repeat_size)
# return mnist_ds
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--run_distribute', type=bool, default=True, help='Run distribute')
parser.add_argument('--device_num', type=int, default=8, help='Device num.')
parser.add_argument('--do_train', type=bool, default=True, help='Do train or not.')
parser.add_argument('--do_eval', type=bool, default=False, help='Do eval or not.')
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
parser.add_argument('--data_url', default=None, help='Location of data.')
parser.add_argument('--train_url', default=None, help='Location of training outputs.')
opt = parser.parse_args()
dict = {}
i = 0
class InceptionV4(nn.Cell):
def __init__(self):
super().__init__()
self.Stem = Stem(3)
self.inception_A = inception_A(384)
self.reduction_A = reduction_A(384)
self.inception_B = inception_B(1024)
self.reduction_B = reduction_B(1024)
self.inception_C = inception_C(1536)
self.avgpool = nn.AvgPool2d(8)
#### reshape成2维
self.dropout = nn.Dropout(0.8)
self.linear = nn.Dense(1536, 1000)
def construct(self, x):
x = self.Stem(x)
x = self.inception_A(x)
x = self.inception_A(x)
x = self.inception_A(x)
x = self.inception_A(x)
x = self.reduction_A(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.reduction_B(x)
x = self.inception_C(x)
x = self.inception_C(x)
x = self.inception_C(x)
x = self.avgpool(x)
x = self.dropout(x)
x = nn.Flatten()(x)
x = self.linear(x)
return x
def generate_inception_module(self, input_channels, output_channels, block_num, block):
if block == 1:
layers = nn.SequentialCell([inception_A(input_channels)])
for i in range(block_num):
layers = nn.SequentialCell(inception_A(input_channels), layers)
input_channels = output_channels
if block == 2:
layers = nn.SequentialCell([inception_B(input_channels)])
for i in range(block_num):
layers = nn.SequentialCell(inception_B(input_channels), layers)
input_channels = output_channels
if block == 3:
layers = nn.SequentialCell([inception_C(input_channels)])
for i in range(block_num):
layers = nn.SequentialCell(inception_C(input_channels), layers)
input_channels = output_channels
return layers
def train(opt):
# device_id = int(os.getenv('DEVICE_ID'))
#
context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=False)
# context.set_context(enable_task_sink=True, device_id=device_id)
# context.set_context(enable_loop_sink=True)
# context.set_context(enable_mem_reuse=True)
#
# if not opt.do_eval and opt.run_distribute:
# context.set_auto_parallel_context(device_num=opt.device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
# mirror_mean=True, parameter_broadcast=True)
# auto_parallel_context().set_all_reduce_fusion_split_indices([107, 160])
# init()
loss = CrossEntropy(smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
mnist_path = "./MNIST_Data"
download_dataset()
dataset = create_dataset(os.path.join(mnist_path, "train"), 32, 1)
net = InceptionV4(4, 7, 3)
# net = LeNet5()
stepsize = 32
lr = 0.01
optt = nn.Momentum(net.trainable_params(), lr, momentum=0.9)
config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10)
# save the network model and parameters for subsequence fine-tuning
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck)
# group layers into an object with training and evaluation features
net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')
model = Model(net, net_loss, optt, metrics={"Accuracy": Accuracy()})
model.train(config.epoch_size, dataset, callbacks=[ckpoint_cb, LossMonitor()], dataset_sink_mode=False)
#########################################
def weight_variable():
"""Weight initial."""
return TruncatedNormal(0.02)
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
"""Conv layer weight initial."""
weight = weight_variable()
return nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
weight_init=weight, has_bias=False, pad_mode="valid")
def fc_with_initialize(input_channels, out_channels):
"""Fc layer weight initial."""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
class LeNet5(nn.Cell):
"""Lenet network structure."""
# define the operator required
def __init__(self):
super(LeNet5, self).__init__()
self.conv1 = conv(1, 6, 5)
self.conv2 = conv(6, 16, 5)
self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
self.fc2 = fc_with_initialize(120, 84)
self.fc3 = fc_with_initialize(84, 10)
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
# use the preceding operators to construct networks
def construct(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
def ans():
context.set_context(mode=context.GRAPH_MODE)
net = InceptionV4()
print("start")
ds = create_dataset('./dataset', True, config.epoch_size, config.batch_size)
lr = 0.01
optt = nn.Momentum(net.trainable_params(), lr, momentum=0.9)
config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10)
# save the network model and parameters for subsequence fine-tuning
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck)
# group layers into an object with training and evaluation features
net_loss = CrossEntropy(smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
lr = Tensor(get_lr(global_step=0, lr_init=config.lr_init, lr_end=0.0, lr_max=config.lr_max,
warmup_epochs=config.warmup_epochs, total_epochs=config.epoch_size, steps_per_epoch=config.batch_size,
lr_decay_mode='cosine'))
optt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum,
config.weight_decay, config.loss_scale)
model = Model(net, net_loss, optt, metrics={"Accuracy": Accuracy()})
model.train(config.epoch_size, ds, callbacks=[ckpoint_cb, LossMonitor()], dataset_sink_mode=False)
if __name__ == '__main__':
ans() | 13,081 | 4,564 |
import datetime
from flask_bcrypt import *
from flask import current_app
from flask_sqlalchemy import SQLAlchemy
from flask_security import UserMixin, RoleMixin
db = SQLAlchemy()
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))
)
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
registered_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
active = db.Column(db.Boolean, nullable=False, default=False)
confirmed_at = db.Column(db.DateTime, nullable=True)
admin = db.Column(db.Boolean, nullable=False, default=False)
notes = db.Column(db.Text, nullable=True)
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
company_id = db.Column(db.Integer(), db.ForeignKey('company.id'))
company = db.relationship('Company')
def __unicode__(self):
return("<User id={} email={}>".format(self.id, self.email))
def __repr__(self):
return("<User ID: {}, email: {}>".format(self.id, self.email))
def __init__(self, email, password, roles, active=False, admin=False, confirmed_at=None):
self.email = email
self.password = password
if admin:
self.roles = [Role.query.filter_by(name='admin').first()]
self.registered_on = datetime.datetime.now()
self.admin = admin
self.active = active
self.confirmed_at = confirmed_at
def get_id(self):
return self.id
class ServiceAgreement(db.Model):
id = db.Column(db.Integer, primary_key=True)
company_id = db.Column('company_id', db.Integer(), db.ForeignKey('company.id'), nullable=False)
company = db.relationship('Company')
name = db.Column(db.String(50), nullable=False)
started_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
text = db.Column(db.Text, nullable=True, default='')
def __unicode__(self):
return("<started = %>".format(self.started_on))
class Company(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True)
email = db.Column(db.String(255))
phone = db.Column(db.String(20))
bio = db.Column(db.String(255))
users = db.relationship("User", backref="user", lazy="dynamic")
def __repr__(self):
return("{}".format(self.name))
def __unicode__(self):
return("<name = %>".format(self.name))
class UserRequest(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column('user_id', db.Integer(), db.ForeignKey('user.id'), nullable=False)
user = db.relationship('User')
requested_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now)
title = db.Column(db.String(255), nullable=False)
request = db.Column(db.Text, nullable=False)
status = db.Column(db.Integer)
def __unicode__(self):
return("<User=% request=%>".format(self.user.email, self.request))
class Iteration(db.Model):
id = db.Column(db.Integer, primary_key=True)
project = db.Column('project_id', db.Integer(), db.ForeignKey('project.id'))
class Project(db.Model):
id = db.Column(db.Integer, primary_key=True)
company_id = db.Column('company_id', db.Integer(), db.ForeignKey('company.id'), nullable=False)
company = db.relationship('Company')
name = db.Column(db.String(255))
status = db.Column(db.Integer, nullable=False, default=0)
__table_args__ = tuple(db.UniqueConstraint('company', 'name', name='_company_projectname_uc'))
def save_changes(self, form, new=False):
self.name = form.name.data
if new:
db.session.add(project)
db.session.commit()
def get_id(self):
return self.id
def __unicode__(self):
return self.name
| 4,238 | 1,434 |
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
from setuptools.command.develop import develop as _develop
import os
def _post_install(libname, libpath):
from js9 import j
# add this plugin to the config
c = j.core.state.configGet('plugins', defval={})
c[libname] = libpath
j.core.state.configSet('plugins', c)
print("****:%s:%s" % (libname, libpath))
j.tools.jsloader.generate()
# not needed to do
# j.tools.jsloader.copyPyLibs()
class install(_install):
def run(self):
_install.run(self)
libname = self.config_vars['dist_name']
libpath = os.path.join(os.path.dirname(
os.path.abspath(__file__)), libname)
self.execute(_post_install, (libname, libpath),
msg="Running post install task")
class develop(_develop):
def run(self):
_develop.run(self)
libname = self.config_vars['dist_name']
libpath = os.path.join(os.path.dirname(
os.path.abspath(__file__)), libname)
self.execute(_post_install, (libname, libpath),
msg="Running post install task")
long_description = ""
try:
from pypandoc import convert
long_description = convert("README.md", 'rst')
except ImportError:
long_description = ""
setup(
name='JumpScale9AYS',
version='9.2.0',
description='Automation framework for cloud workloads ays lib',
long_description=long_description,
url='https://github.com/Jumpscale/ays9',
author='GreenItGlobe',
author_email='info@gig.tech',
license='Apache',
packages=find_packages(),
include_package_data=True,
install_requires=[
'JumpScale9>=9.2.0',
'JumpScale9Lib>=9.2.0',
'jsonschema>=2.6.0',
'python-jose==1.3.2',
'sanic>=0.5.4',
'aiohttp>=2.2.5'
],
cmdclass={
'install': install,
'develop': develop,
'developement': develop
},
scripts=['cmds/ays'],
)
| 2,034 | 673 |
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from google.protobuf import json_format as jsonpb
ROOT = os.path.dirname(os.path.dirname(__file__))
sys.path.append(os.path.join(ROOT, '.recipe_deps', '_pb'))
from PB.go.chromium.org.luci.buildbucket.proto.build import Build
sys.stdout.write(jsonpb.Parse(sys.stdin.read(), Build()).SerializeToString())
| 514 | 176 |
import argparse
import signal
import sys
import time
from concurrent.futures import ThreadPoolExecutor
from os import mkdir
from os.path import isdir
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
HEADERS = {
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
'DNT': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9'
}
class MangaInfo():
def __init__(self):
self.manga_url = ''
self.manga_name = ''
self.chapter_name_list = []
self.chapter_url_list = []
self.save_path = ''
self.list_of_download_chapter = []
class DownloadEngine():
def __init__(self):
self.stop_signal = 0
self.error403_signal = 0
def set_manga(self, manga):
self.current_manga = manga
self.image_formats = ['.jpg', '.jpeg', '.png', '.gif', '.tiff', '.bmp']
def stop_download(self, sig, frame):
self.stop_signal = 1
def run(self):
signal.signal(signal.SIGINT, self.stop_download)
self.crawl_chapter_data_list()
def crawl_chapter_data_list(self):
chapter_list = []
# Get each chapter info
for index in self.current_manga.list_of_download_chapter:
chapter_detail = {}
chapter_detail['chapter_url'] = self.current_manga.chapter_url_list[index]
chapter_detail['chapter_name'] = self.current_manga.chapter_name_list[index]
if ':' in chapter_detail['chapter_name']:
chapter_detail['chapter_name'] = chapter_detail['chapter_name'].split(':')[
0]
chapter_list.append(chapter_detail)
# Remove downloaded chapters | if not create directory
chapter_list = [i_chapter for i_chapter in chapter_list if not isdir(
self.current_manga.save_path + '/' + i_chapter['chapter_name'])]
chapter_list = list(reversed(chapter_list))
if chapter_list:
# Create directory and start to download
index = 0
print('Start download ..... Press Ctrl+C to stop.')
for chapter_data in chapter_list:
if self.stop_signal:
break
chapter_dir_path = self.current_manga.save_path + \
'/' + chapter_data['chapter_name']
mkdir(chapter_dir_path.replace('\"', '').replace(
'\'', '').replace('?', '').replace('!', ''))
chapter_data['chapter_dir_path'] = chapter_dir_path
self.get_chapter_contents(chapter_data)
index += 1
print('Download Done')
sys.exit(0)
def get_image_urls(self, soup):
contents = []
for content_url in soup.find('div', class_='reading-detail box_doc').find_all('img'):
if content_url not in contents:
if any(img_fm in content_url['src'] for img_fm in self.image_formats):
img_url = content_url['src']
elif content_url.has_attr('data-original'):
img_url = content_url['data-original']
elif content_url.has_attr('data-cdn') and any(img_fm in content_url['data-cdn'] for img_fm in self.image_formats):
img_url = content_url['data-cdn']
else:
img_url = content_url['src']
contents.append(self.format_img_url(img_url))
return contents
def format_img_url(self, url):
return url.replace('//', 'http://')
def get_image_paths(self, chapter_dir_path, contents):
img_path_list = []
image_index = 1
for img_url in contents:
img_name = img_url.split('/')[-1]
if any(img_fm in img_name[-4:] for img_fm in self.image_formats):
img_path_name = chapter_dir_path + '/image_' + img_name
else:
img_path_name = chapter_dir_path + \
'/image_' + '{0:0=3d}'.format(image_index) + '.jpg'
img_path_list.append(img_path_name)
image_index += 1
return img_path_list
def get_chapter_contents(self, chapter_data):
try:
# Request chapter url
request = requests.get(
chapter_data['chapter_url'], headers=HEADERS, timeout=10)
soup = BeautifulSoup(request.text, 'html.parser')
# Get image url
contents = self.get_image_urls(soup)
# Get image name
img_path_list = self.get_image_paths(
chapter_data['chapter_dir_path'], contents)
image_data_list = list(
map(lambda x, y: (x, y), img_path_list, contents))
# Update Dialog
chapter_name = 'Downloading ' + \
chapter_data['chapter_name'] + ' .....'
print(chapter_name)
# Threading for download each image
with ThreadPoolExecutor(max_workers=20) as executor:
executor.map(self.download_image, image_data_list)
if self.error403_signal:
print(chapter_data['chapter_name'] +
': Can not download some images. Please check again!')
self.error403_signal = 0
except Exception:
print('Error get chapter info. Please try again later.')
print('Finish ' + chapter_data['chapter_name'])
def download_image(self, image_data_list):
if not self.stop_signal:
img_path_name, img_url = image_data_list
# Limit download time of an image is 5 secs
start = time.time()
timeout = 10
while True:
try:
img_data = requests.get(
img_url, headers=HEADERS, timeout=10)
if img_data.status_code == 403:
self.error403_signal = 1
else:
with open(img_path_name, 'wb') as handler:
handler.write(img_data.content)
break
except Exception:
if time.time() - start > timeout:
print('Error download image: ' + img_path_name)
break
print('Retry download image: ' + img_url)
time.sleep(1)
continue
class Bridge():
current_manga = MangaInfo()
def start_download(self, manga_url, from_chapter_input, to_chapter_input):
self.manga_url = manga_url
self.from_chapter_input = from_chapter_input
self.to_chapter_input = to_chapter_input
self.download_chapter()
def download_chapter(self):
if self.check_valid_url() and self.get_chapter_input():
manga_save_path = self.current_manga.manga_name
manga_save_path = manga_save_path.replace(
'\"', '').replace('\'', '').replace('?', '').replace('!', '')
if not isdir(manga_save_path):
mkdir(manga_save_path)
self.current_manga.save_path = manga_save_path
engine = DownloadEngine()
engine.set_manga(self.current_manga)
engine.run()
else:
return
def check_valid_url(self):
current_manga_url = self.manga_url
result = False
domain = urlparse(current_manga_url)
referer_header = '{uri.scheme}://{uri.netloc}/'.format(uri=domain)
HEADERS['Referer'] = referer_header
if not any(substr in current_manga_url for substr in ['nhattruyen', 'nettruyen']):
print('Invalid manga url. Please try again.')
return result
else:
try:
request = requests.get(
current_manga_url, headers=HEADERS, timeout=5)
soup = BeautifulSoup(request.text, 'html.parser')
if not soup.find('div', id='nt_listchapter'):
print('Invalid manga url. Please try again.')
else:
self.current_manga.manga_url = str(current_manga_url)
self.crawl_manga_home_page()
result = True
return result
except Exception:
print('Error getting manga page. Please try again.')
return False
def crawl_manga_home_page(self):
try:
request = requests.get(
self.current_manga.manga_url, headers=HEADERS, timeout=10)
soup = BeautifulSoup(request.text, 'html.parser')
self.current_manga.manga_name = soup.find(
'h1', class_='title-detail').text
self.current_manga.chapter_name_list = [
i.find('a').text for i in soup.find_all('div', class_='chapter')]
chapter_url_list = []
for chapter in soup.find('div', id='nt_listchapter').find('ul').find_all('a'):
chapter_url_list.append(chapter['href'])
self.current_manga.chapter_url_list = chapter_url_list
except Exception:
print('Error getting manga page. Please try again.')
def get_chapter_index(self, chapter_input):
index = None
if chapter_input == 'start_chapter':
index = 0
elif chapter_input == 'end_chapter':
index = len(self.current_manga.chapter_name_list) - 1
else:
for chapter in self.current_manga.chapter_name_list:
chapter_name = chapter.split()[1]
if ':' in chapter_name:
chapter_name = chapter_name[:-1]
if chapter_input == chapter_name:
index = self.current_manga.chapter_name_list.index(
chapter)
return index
def get_chapter_input(self):
from_chapter_index = self.get_chapter_index(
self.from_chapter_input)
to_chapter_index = self.get_chapter_index(self.to_chapter_input)
if from_chapter_index is not None and to_chapter_index is not None:
if from_chapter_index > to_chapter_index:
from_chapter_index, to_chapter_index = to_chapter_index, from_chapter_index
self.current_manga.list_of_download_chapter = list(
range(from_chapter_index, to_chapter_index + 1))
return True
else:
print('Invalid manga chapter input. Please try again.')
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('manga_url', type=str,
help='url to the manga homepage')
parser.add_argument('-a', '--all', action='store_true',
help='download/update all chapter')
parser.add_argument('-f', '--fromto', nargs=2, metavar=('from_chapter', 'to_chapter'),
help='download from one chapter to another chapter')
parser.add_argument('-c', '--chapter', nargs=1, metavar=('chapter'),
help='download one chapter')
args = parser.parse_args()
bridge = Bridge()
if not (args.all or args.fromto or args.chapter):
parser.error('No action requested, add --all or --fromto or --chapter')
elif args.all:
bridge.start_download(args.manga_url, 'start_chapter', 'end_chapter')
elif args.fromto:
bridge.start_download(
args.manga_url, args.fromto[0], args.fromto[1])
elif args.chapter:
bridge.start_download(
args.manga_url, args.chapter[0], args.chapter[0])
| 12,246 | 3,747 |
from flask import jsonify
# redash __init__.py文件里
# setup_authentication(app)
# login_manager.init_app(app)
from flask_login import login_required
from redash.handlers.api import api
from redash.handlers.base import routes
from redash.monitor import get_status
from redash.permissions import require_super_admin
# Handler的总入口,放一个用于诊断的ping接口,最佳实践
# routes用的也不是内置的,而是总蓝图
@routes.route('/ping', methods=['GET'])
def ping():
return 'PONG.'
# 系统状态。redis内存,运行了多久,日志数量,任务队列情况,文章的数目等等等(需要超级运用权限)
@routes.route('/status.json')
@login_required
@require_super_admin
def status_api():
status = get_status()
return jsonify(status)
# 项目http总注册接口,暴露给__init__.py的create_app使用
def init_app(app):
# 蓝图以及一些分散的接口
from redash.handlers import embed, queries, static, authentication, admin, setup, organization # 仅仅初始化??
app.register_blueprint(routes) # 只注册了主的蓝图???
####总API注册入口
api.init_app(app)
# from flask_restful import Api
# api是API的实例
# 路由注册, 资源注册url的两种方式
# 第一种
#######################
# >> @resource.route('/local_resource)
# >> def resource():
# >> pass
# 第二种
#######################
# >> resource.add_url_rule("/local_resource", view_func=LocalResource.as_view(name="get_local_resource"))
| 1,237 | 556 |
"""Main executable."""
import argparse
import asyncio
from tenff.game import GameSettings, run_game
from tenff.terminal import TerminalInputHandler
from tenff.util import CORPORA_PATH, get_corpus_path, parse_corpus
DEFAULT_TIME = 60
PROLOG = (
"A certain typing contest site spin-off in CLI, without all the "
"advertisements, tracking and 10 megabytes of AJAX crap."
)
class CustomHelpFormatter(argparse.HelpFormatter):
"""A HelpFormatter that uses concise syntax for short and long options
help.
"""
def _format_action_invocation(self, action: argparse.Action) -> str:
"""Format action invocation.
Example of the default argparse formatting:
-c CORPUS, --corpus CORPUS
Example of the concise formatting:
-c, --corpus CORPUS
"""
if not action.option_strings or action.nargs == 0:
return super()._format_action_invocation(action)
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
return ", ".join(action.option_strings) + " " + args_string
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
prog="10ff", description=PROLOG, formatter_class=CustomHelpFormatter
)
parser.add_argument(
"-t",
"--time",
type=int,
default=DEFAULT_TIME,
help="how long to play the game for (in seconds)",
)
parser.add_argument(
"-c",
"--corpus",
type=str,
default="english",
help="path to the word list to play the game with",
)
parser.add_argument(
"-w",
"--width",
type=int,
default=80,
help="width of the terminal to play in",
)
parser.add_argument(
"-l", "--list", action="store_true", help="lists the built-in corpora"
)
parser.add_argument(
"-r",
"--rigorous-spaces",
action="store_true",
help="treat double space as an error",
)
return parser.parse_args()
def main() -> None:
"""Main program logic. Start the event loop, parse the CLI arguments and
run the game.
"""
loop = asyncio.new_event_loop()
args = parse_args()
if args.list:
for path in sorted(CORPORA_PATH.iterdir()):
if path.suffix == ".txt":
print(path.stem)
return
input_handler = TerminalInputHandler(loop)
with input_handler.enable_raw_terminal():
corpus_path = get_corpus_path(args.corpus)
corpus = parse_corpus(corpus_path)
settings = GameSettings(
corpus=corpus,
max_time=args.time,
rigorous_spaces=args.rigorous_spaces,
)
loop.run_until_complete(
run_game(
loop,
input_handler,
settings,
)
)
loop.close()
if __name__ == "__main__":
main()
| 3,017 | 903 |
"""76. Minimum Window Substring
https://leetcode.com/problems/minimum-window-substring/
Given a string S and a string T, find the minimum window in S which will
contain all the characters in T in complexity O(n).
Example:
Input: S = "ADOBECODEBANC", T = "ABC"
Output: "BANC"
Note:
If there is no such window in S that covers all characters in T, return the
empty string "".
If there is such window, you are guaranteed that there will always be only
one unique minimum window in S.
"""
class Solution:
def min_window(self, s: str, t: str) -> str:
def is_valid(d: dict):
for v in d.values():
if v > 0:
return False
return True
store = {}
for c in t:
if c not in store:
store[c] = 1
else:
store[c] = store[c] + 1
min_head = min_tail = 0
head = tail = -1
min_len = len(s) + 1
not_found = True
while head <= tail:
if not_found:
# if not found, move the cur_tail pointer.
if tail == len(s) - 1:
break
tail += 1
cur_char = s[tail]
if cur_char in store:
store[cur_char] = store[cur_char] - 1
if is_valid(store):
not_found = False
cur_len = tail - head
if cur_len < min_len:
min_head, min_tail, min_len = head, tail, cur_len
else:
# already found, move the cur_head pointer.
head += 1
cur_char = s[head]
cur_len = tail - head
if cur_char in store:
store[cur_char] = store[cur_char] + 1
if not is_valid(store):
not_found = True
else:
if cur_len < min_len:
min_head, min_tail, min_len = head, tail, cur_len
else:
if cur_len < min_len:
min_head, min_tail, min_len = head, tail, cur_len
return s[min_head + 1:min_tail + 1]
| 2,248 | 636 |
from django.urls import path
from . import views
app_name = 'inventory'
urlpatterns = [
path('assettypes/',
views.AssetTypesIndexView.as_view(), name='index_assettypes'),
path('assettypes/create/',
views.AssetTypeCreateView.as_view(), name='create_assettype'),
path('assettypes/update/<int:pk>',
views.AssetTypeUpdateView.as_view(), name='update_assettype'),
path('assettypes/read/<int:pk>',
views.AssetTypeReadView.as_view(), name='read_assettype'),
path('assettypes/delete/<int:pk>',
views.AssetTypeDeleteView.as_view(), name='delete_assettype'),
path('featured/',
views.AssetsFeaturedIndexView.as_view(), name='index_featured_assets'),
path('featured/read/<int:pk>',
views.AssetFeaturedReadView.as_view(), name='read_featured_asset'),
path('assets/',
views.AssetsIndexView.as_view(), name='index_assets'),
path('assets/create/',
views.AssetCreateView.as_view(), name='create_asset'),
path('assets/update/<int:pk>',
views.AssetUpdateView.as_view(), name='update_asset'),
path('assets/read/<int:pk>',
views.AssetReadView.as_view(), name='read_asset'),
path('assets/delete/<int:pk>',
views.AssetDeleteView.as_view(), name='delete_asset'),
path('reservations/',
views.ReservationsIndexView.as_view(), name='index_reservations'),
path('reservations/create/',
views.ReservationCreateView.as_view(), name='create_reservation'),
path('reservations/update/<int:pk>',
views.ReservationUpdateView.as_view(), name='update_reservation'),
path('reservations/edit/<int:pk>',
views.ReservationEditView.as_view(), name='edit_reservation'),
path('reservations/read/<int:pk>',
views.ReservationReadView.as_view(), name='read_reservation'),
path('reservations/delete/<int:pk>',
views.ReservationDeleteView.as_view(), name='delete_reservation'),
path('reservations/addbasket/<int:pk>',
views.ReservationAddBasket.as_view(), name='add_basket_reservation'),
path('reservations/addbasket/',
views.add_basket, name='add_basket'),
path('reservations/clearreservedassets/',
views.clear_reserved_assets, name='clear_reserved_assets'),
path('reservations/deletereservedasset/',
views.delete_reserved_asset, name='delete_reserved_asset'),
path('reservedassets/',
views.ReservedAssetsIndexView.as_view(), name='index_reservedassets'),
path('reservedassets/create/',
views.ReservedAssetCreateView.as_view(), name='create_reservedasset'),
path('reservedassets/update/<int:pk>',
views.ReservedAssetUpdateView.as_view(), name='update_reservedasset'),
path('reservedassets/read/<int:pk>',
views.ReservedAssetReadView.as_view(), name='read_reservedasset'),
path('reservedassets/delete/<int:pk>',
views.ReservedAssetDeleteView.as_view(), name='delete_reservedasset'),
path('loanedassets/',
views.LoanedAssetsIndexView.as_view(), name='index_loanedassets'),
path('loanedassets/create/',
views.LoanedAssetCreateView.as_view(), name='create_loanedasset'),
path('loanedassets/update/<int:pk>',
views.LoanedAssetUpdateView.as_view(), name='update_loanedasset'),
path('loanedassets/read/<int:pk>',
views.LoanedAssetReadView.as_view(), name='read_loanedasset'),
path('loanedassets/delete/<int:pk>',
views.LoanedAssetDeleteView.as_view(), name='delete_loanedasset'),
path('returnedassets/',
views.ReturnedAssetsIndexView.as_view(), name='index_returnedassets'),
path('returnedassets/create/',
views.ReturnedAssetCreateView.as_view(), name='create_returnedasset'),
path('returnedassets/update/<int:pk>',
views.ReturnedAssetUpdateView.as_view(), name='update_returnedasset'),
path('returnedassets/read/<int:pk>',
views.ReturnedAssetReadView.as_view(), name='read_returnedasset'),
path('returnedassets/delete/<int:pk>',
views.ReturnedAssetDeleteView.as_view(), name='delete_returnedasset'),
]
| 4,138 | 1,325 |