text string | size int64 | token_count int64 |
|---|---|---|
import numpy as np
from smcpy.smc.smc_sampler import SMCSampler
from smcpy.mcmc.mcmc_sampler import MCMCSampler
import sys
sys.path.append('../')
from spring_mass_model import SpringMassModel
# Initialize model
state0 = [0., 0.] #initial conditions
measure_t_grid = np.arange(0., 5., 0.2) #time
model = SpringMassModel(state0, measure_t_grid)
# Load data
noise_stddev = 0.5
displacement_data = np.genfromtxt('../noisy_data.txt')
# Define prior distributions
initial_guess = {'K': 1.0, 'g': 1.0}
param_priors = {'K': ['Uniform', 0.0, 10.0],
'g': ['Uniform', 0.0, 10.0]}
# SMC sampling
num_samples = 100000
num_samples_burnin = 5000
mcmc = MCMCSampler(displacement_data, model, param_priors)
mcmc.generate_pymc_model(q0=initial_guess, std_dev0=noise_stddev, fix_var=True)
mcmc.sample(num_samples, num_samples_burnin)
# Calculate means
Kmean = np.mean(mcmc.MCMC.trace('K')[:])
gmean = np.mean(mcmc.MCMC.trace('g')[:])
print '\nK mean = %s' % Kmean
print 'g mean = %s\n' % gmean
# Plot
mcmc.plot_pairwise(keys=['K', 'g'])
mcmc.plot_pdf(keys=['K', 'g'])
| 1,098 | 468 |
"""
This if for autoencoder not the RL-agent to learn
Just Serve as one buffer
"""
import os
import random
import logging
import numpy as np
from dqn.utils import save_npy, load_npy
# At first fill the replay buffer, then sample to learn.
import numpy as np
class SimpleDataSet(object):
"""A replay memory consisting of circular buffers for observed images,
actions, and rewards.
"""
def __init__(self, config, rng, data_format="NHWC"):
"""Construct a DataSet.
Arguments:
width, height - image size
max_steps - the number of time steps to store
phi_length - number of images to concatenate into a state
rng - initialized numpy random number generator, used to
choose random minibatches
"""
# TODO: Specify capacity in number of state transitions, not
self.width = config.ae_screen_width
self.height = config.ae_screen_height
self.max_steps = config.ae_memory_size
self.phi_length = config.history_length
self.rng = rng
self.data_format = data_format
# the memory to store is in float format.
self.imgs = np.zeros((self.max_steps, self.height, self.width), dtype='float32')
self.actions = np.zeros(self.max_steps, dtype='int32')
self.terminal = np.zeros(self.max_steps, dtype='bool')
self.bottom = 0
self.top = 0
self.size = 0
def add_sample(self, img, action, terminal):
"""Add a time step record.
Arguments:
img -- observed image
action -- action chosen by the agent
reward -- reward received after taking the action
terminal -- boolean indicating whether the episode ended
after this time step
"""
self.imgs[self.top] = img
self.actions[self.top] = action
self.terminal[self.top] = terminal
if self.size == self.max_steps:
self.bottom = (self.bottom + 1) % self.max_steps
else:
self.size += 1
self.top = (self.top + 1) % self.max_steps
def __len__(self):
"""Return an approximate count of stored state transitions."""
# TODO: Properly account for indices which can't be used, as in
# random_batch's check.
return max(0, self.size - self.phi_length)
def last_phi(self):
"""Return the most recent phi (sequence of image frames)."""
indexes = np.arange(self.top - self.phi_length, self.top)
phi = np.transpose(self.imgs.take(indexes, axis=0, mode='wrap'), [1, 2, 0])
return phi
def last_action(self):
index = (self.top - 1 + self.size) % self.size
return self.actions[index]
def random_batch(self, batch_size):
"""Return corresponding imgs, actions, rewards, and terminal status for
batch_size randomly chosen state transitions.
"""
# Allocate the response.
imgs = np.zeros((batch_size,
self.height,
self.width,
self.phi_length + 1),
dtype='float32')
actions = np.zeros(batch_size, dtype='int32')
count = 0
while count < batch_size:
# Randomly choose a time step from the replay memory.
# index = self.rng.randint(self.bottom,
# self.bottom + self.size - self.phi_length)
index = self.rng.randint(0, self.size - self.phi_length)
# Both the before and after states contain phi_length
# frames, overlapping except for the first and last.
all_indices = np.arange(index, index + self.phi_length + 1)
end_index = index + self.phi_length - 1
if np.any(self.terminal.take(all_indices[:-1], mode='wrap')):
continue
# Add the state transition to the response.
imgs[count] = np.transpose(self.imgs.take(all_indices, axis=0, mode='wrap'), [1, 2, 0])
actions[count] = self.actions.take(end_index, mode='wrap')
count += 1
if self.data_format == "NHWC":
s_t = imgs[..., :self.phi_length]
s_t_plus_1 = imgs[..., -1]
else:
imgs = np.transpose(imgs, [0, 3, 1, 2])
s_t = imgs[:, :self.phi_length, ...]
s_t_plus_1 = imgs[:, -1, ...]
return s_t, s_t_plus_1, actions
| 4,463 | 1,337 |
import os
# for wordLSTM target
command = 'python attack_classification.py --dataset_path data/mr/test.txt ' \
'--target_model wordLSTM --batch_size 128 ' \
'--target_model_path model_nondist.pt ' \
'--word_embeddings_path data/glove.6B.200d.txt ' \
'--counter_fitting_embeddings_path data/counter-fitted-vectors.txt ' \
'--counter_fitting_cos_sim_path data/cos_sim_counter_fitting.npy ' \
'--USE_cache_path tf_cache'
# for BERT target
#command = 'python attack_classification.py --dataset_path data/mr ' \
# '--target_model lstm ' \
# '--target_model model_nondist.pt' \
# '--max_seq_length 256 --batch_size 32 ' \
# '--counter_fitting_embeddings_path data/counter-fitted-vectors.txt ' \
# '--counter_fitting_cos_sim_path data/cos_sim_counter_fitting.npy ' \
# '--USE_cache_path tf_cache'
os.system(command)
| 932 | 335 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test ideal gas flow calculations
"""
import unittest
from utils.utils import deg_to_rad
from ideal_gas_flow import (isentropic, normal_shock, rayleigh,
prandtl_meyer, oblique_shock)
class TestIdealGasFlow(unittest.TestCase):
"""
"""
def test_isentropic(self):
"""Test Isentropic Flow Relations
"""
M = 2.0
gamma = 1.4
self.assertAlmostEqual(isentropic.A_Astar(M, gamma), 1.687, places=2)
self.assertAlmostEqual(isentropic.T0_T(M, gamma), 1.80, places=2)
self.assertAlmostEqual(isentropic.p0_p(M, gamma), 7.824, places=2)
self.assertAlmostEqual(isentropic.r0_r(M, gamma), 4.347, places=2)
# Inverse calculation
self.assertAlmostEqual(isentropic.mach_from_area_ratio(1.687, gamma)[1], 2.0, places=2)
def test_normal_shock(self):
"""Test Normal Shock Relations
"""
M = 2.0
gamma = 1.4
self.assertAlmostEqual(normal_shock.mach(M, gamma), 0.5774, places=2)
self.assertAlmostEqual(normal_shock.T2_T1(M, gamma), 1.687, places=2)
self.assertAlmostEqual(normal_shock.p2_p1(M, gamma), 4.50, places=2)
self.assertAlmostEqual(normal_shock.rho2_rho1(M, gamma), 2.667, places=2)
def test_rayleigh(self):
"""Test Rayleigh Flow Relations
"""
M = 2.0
gamma = 1.4
self.assertAlmostEqual(rayleigh.T0_T0star(M, gamma), 0.7934, places=2)
self.assertAlmostEqual(rayleigh.T_Tstar(M, gamma), 0.5289, places=2)
self.assertAlmostEqual(rayleigh.p_pstar(M, gamma), 0.3636, places=2)
self.assertAlmostEqual(rayleigh.rho_rhostar(M, gamma), 0.6875, places=2)
# Inverse calculations
self.assertAlmostEqual(rayleigh.mach(0.7934, gamma), 2, places=2)
def test_prandtl_meyer(self):
"""Test Prandtl Meyer Relations
"""
M = 2.0
gamma = 1.4
self.assertAlmostEqual(prandtl_meyer.nu(M, gamma), 0.4604, places=2)
# Inverse calculations
self.assertAlmostEqual(prandtl_meyer.mach_from_nu(1.1481, gamma), 4, places=2)
self.assertAlmostEqual(prandtl_meyer.mach_from_nu(0.4604, gamma), M, places=2)
def test_oblique_shock(self):
"""Test Oblique Shock Relations
"""
M = 2.0
gamma = 1.4
# From charts, for M = 2
beta = deg_to_rad(44.0)
theta = deg_to_rad(14.0)
# Test beta <-> theta map
self.assertAlmostEqual(oblique_shock.beta(M, theta, gamma), beta, places=2)
self.assertAlmostEqual(oblique_shock.theta(M, beta, gamma), theta, places=2)
# Test conditions behind the shock
self.assertAlmostEqual(oblique_shock.mach(M, beta, theta, gamma), 1.482, places=1)
self.assertAlmostEqual(oblique_shock.T2_T1(M, beta, gamma), 1.249, places=2)
self.assertAlmostEqual(oblique_shock.p2_p1(M, beta, gamma), 2.088, places=2)
self.assertAlmostEqual(oblique_shock.rho2_rho1(M, beta, gamma), 1.673, places=2)
self.assertAlmostEqual(oblique_shock.u2_u1(M, beta, gamma), 0.8304, places=2)
if __name__ == "__main__":
unittest.main()
| 3,242 | 1,320 |
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Tuple
import xarray as xr
from xcube.core.gridmapping import GridMapping
from xcube.core.normalize import encode_cube
from xcube.core.store import DataStorePool
from xcube.core.store import get_data_store_instance
from xcube.core.store import new_data_writer
from xcube.util.progress import observe_dask_progress
from ..config import OutputConfig
class CubeWriter:
def __init__(self,
output_config: OutputConfig,
store_pool: DataStorePool = None):
self._output_config = output_config
self._store_pool = store_pool
def write_cube(self,
cube: xr.Dataset,
gm: GridMapping) -> Tuple[str, xr.Dataset]:
output_config = self._output_config
dataset = encode_cube(cube, grid_mapping=gm)
with observe_dask_progress('writing cube', 100):
write_params = output_config.write_params or {}
store_params = output_config.store_params or {}
if output_config.store_id:
store_instance = get_data_store_instance(
output_config.store_id,
store_params=store_params,
store_pool=self._store_pool
)
writer = store_instance.store
write_params.update(
writer_id=output_config.writer_id,
**write_params
)
else:
writer = new_data_writer(output_config.writer_id)
write_params.update(**store_params, **write_params)
if not dataset.attrs.get('title'):
# Set fallback title, so we can distinguish
# datasets from stores in xcube-viewer
dataset = dataset.assign_attrs(title=output_config.data_id)
data_id = writer.write_data(
dataset,
data_id=output_config.data_id,
replace=output_config.replace or False,
**write_params
)
return data_id, dataset
| 3,221 | 925 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 19:57:23 2018
@author: dawnstear
"""
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import pandas as pd
from matplotlib.pyplot import scatter, figure, subplot, savefig
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import time
#import umap
import numpy as np
print('Starting to download 49,300 cell dataset')
#data49k = pd.read_csv('/Users/stearb/desktop/vae-scRNA-master/data/n_49300/GSE63472_P14Retina_merged_digital_expression.txt', sep='\t')
data49k = pd.read_csv('/scr1/users/stearb/scdata/n_493000/GSE63472_P14Retina_merged_digital_expression.txt',sep='\t')
#data49k = pd.read_csv('/Users/dawnstear/desktop/Mid_Atlantic_Poster/sc_data/n_49300/GSE63472_P14Retina_merged_digital_expression.txt',sep='\t')
#X = data49k_clipped.values
print('Done loading')
n_neighbors=30
n_components = 2
'''
########### tSNE ########################
start = time.time()
tsne = TSNE(learning_rate=100)
tsne_array = tsne.fit_transform(X)
time_elapsed = time.time() - start
plot2D(tsne_array,y,'tSNE','t-SNE: 27,500 cells with 15 cell subtypes',time_elapsed)
########################## PCA ########################
t = time.time()
pca = PCA(n_components=2, svd_solver='auto')
pca_array = pca.fit_transform(X)
time_elapsed = time.time() - t
print('pca time = %s ' % time_elapsed)
#plot2D(pca_array,y,'PCA','PCA: 27,500 cells with 15 subtypes', time_elapsed)
###################### LDA ###########################
t= time.time()
lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2)
lda_array = lda.fit_transform(X, y)
time_elapsed = int(time.time() - t)
#plot2D(lda_array,y,'LDA','LDA: 27,500 cells with 15 subtypes',time_elapsed)
########################Isomap ################################
t = time.time()
iso = manifold.Isomap(n_neighbors, n_components=2)
iso_array = iso.fit_transform(X)
time_elapsed = time.time() - t
print('iso time = %s ' % time_elapsed)
#plot2D(iso_array,y,'isomap','Isomap: 27,500 cells with 15 subtypes',time_elapsed)
####################### Spectral Embedding #########################
t = time.time()
spectral = manifold.SpectralEmbedding(n_components=2, random_state=0,eigen_solver="arpack")
spectral_array = spectral.fit_transform(X)
time_elapsed = time.time() - t
#plot2d(spectral_array,y,method='spectral',
# title='Spectral Embedding: 27,500 cells with 15 subtypes',time_elapsed)
print('spectral time = %s ' % time_elapsed)
###################### NNMF ##########################
t = time.time()
nnmf = decomposition.NMF(n_components=2, init='random', random_state=0)
nnmf_array = nnmf.fit_transform(X)
time_elapsed = time.time() - t
print('nnmf time = %s ' % time_elapsed)
#plot2D(nnmf_array,y,method='nnmf',
# title='Non negative matrix factorization:\n27,500 cells with 15 subtypes',time_elapsed)
################ UMAP ##############################
t = time.time()
umap_array = umap.UMAP().fit_transform(X)
time_elapsed = time.time() - t
print('umap time = %s ' % time_elapsed)
#plot2D(umap_array,y,'UMAP',
# 'Uniform Manifold Approximation and Projection:\n27,500 cells with 15 subtypes',time_elapsed)
'''
################ ZIFA ############################
| 3,276 | 1,301 |
from unittest import TestCase
import numpy as np
from athena.subspaces import Subspaces
class TestUtils(TestCase):
def test_init_W1(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.W1)
def test_init_W2(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.W2)
def test_init_evals(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.evals)
def test_init_evects(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.evects)
def test_init_evals_br(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.evals_br)
def test_init_subs_br(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.subs_br)
def test_init_dim(self):
ss = Subspaces(dim=1)
self.assertEqual(ss.dim, 1)
def test_fit(self):
ss = Subspaces(dim=1)
with self.assertRaises(NotImplementedError):
ss.fit()
def test_transform(self):
ss = Subspaces(dim=1)
with self.assertRaises(NotImplementedError):
ss.transform(42)
def test_inverse_transform(self):
ss = Subspaces(dim=1)
with self.assertRaises(NotImplementedError):
ss.inverse_transform(10, 10)
def test_partition_01(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
ss = Subspaces(dim=2)
ss.evects = matrix
ss._partition()
np.testing.assert_array_almost_equal(matrix[:, :2], ss.W1)
def test_partition_02(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
ss = Subspaces(dim=2)
ss.evects = matrix
ss._partition()
np.testing.assert_array_almost_equal(matrix[:, 2:], ss.W2)
def test_partition_03(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
ss = Subspaces(dim=2.0)
ss.evects = matrix
with self.assertRaises(TypeError):
ss._partition()
def test_partition_04(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
ss = Subspaces(dim=0)
ss.evects = matrix
with self.assertRaises(ValueError):
ss._partition()
def test_partition_05(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
ss = Subspaces(dim=4)
ss.evects = matrix
with self.assertRaises(ValueError):
ss._partition()
def test_bootstrap_replicate_01(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
weights = np.ones((3, 1)) / 3
ss = Subspaces(dim=1)
wei = ss._bootstrap_replicate(matrix, weights)[1]
np.testing.assert_array_almost_equal(weights, wei)
def test_bootstrap_replicate_02(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
weights = np.ones((3, 1)) / 3
ss = Subspaces(dim=1)
mat = ss._bootstrap_replicate(matrix, weights)[0]
true_matrix = np.array([[-0.88383278, 0.73235229, 0.20223002],
[0.19731697, -0.68796272, -0.68801096],
[-0.25091976, 0.90142861, 0.46398788]])
np.testing.assert_array_almost_equal(true_matrix, mat)
def test_plot_eigenvalues(self):
ss = Subspaces(dim=1)
with self.assertRaises(TypeError):
ss.plot_eigenvalues(figsize=(7, 7), title='Eigenvalues')
def test_plot_eigenvectors(self):
ss = Subspaces(dim=1)
with self.assertRaises(TypeError):
ss.plot_eigenvectors(n_evects=2, title='Eigenvectors')
def test_plot_sufficient_summary(self):
ss = Subspaces(dim=1)
inputs = np.diag(np.ones(3))
outputs = np.ones(3).reshape(3, 1)
with self.assertRaises(TypeError):
ss.plot_sufficient_summary(inputs,
outputs,
figsize=(7, 7),
title='Sufficient_summary_plots')
| 4,148 | 1,533 |
from django.conf.urls import re_path
from .views import (
AdminLicensesView,
AdminLicenseView,
AdminLicenseFillSeatsView,
AdminRemoveAllUsersFromLicenseView,
AdminLicenseUserView,
AdminLicenseLookupUsersView,
AdminCheckLicense,
)
app_name = "baserow_premium.api.license"
urlpatterns = [
re_path(r"^$", AdminLicensesView.as_view(), name="list"),
re_path(r"^(?P<id>[0-9]+)/$", AdminLicenseView.as_view(), name="item"),
re_path(
r"^(?P<id>[0-9]+)/lookup-users/$",
AdminLicenseLookupUsersView.as_view(),
name="lookup_users",
),
re_path(
r"^(?P<id>[0-9]+)/fill-seats/$",
AdminLicenseFillSeatsView.as_view(),
name="fill_seats",
),
re_path(
r"^(?P<id>[0-9]+)/remove-all-users/$",
AdminRemoveAllUsersFromLicenseView.as_view(),
name="remove_all_users",
),
re_path(
r"^(?P<id>[0-9]+)/check/$",
AdminCheckLicense.as_view(),
name="check",
),
re_path(
r"^(?P<id>[0-9]+)/(?P<user_id>[0-9]+)/$",
AdminLicenseUserView.as_view(),
name="user",
),
]
| 1,131 | 443 |
"""
This is redirect server for https://oauth.gitflic.ru/oauth/authorize
Base URL: https://gitflic.santaspeen.ru/
Author: @SantaSpeen
License: MIT
"""
import json
import random
from string import ascii_letters, digits
from flask import Flask, request, redirect, abort
app = Flask("gitflic oauth redirect")
cache = {}
@app.route("/favicon.ico")
def fav():
return redirect("https://gitflic.ru/static/image/favicon/android-icon-192x192.png", 301)
@app.route("/", methods=["POST"])
def save_code():
headers = request.headers
if headers.get('Cdn-Loop') == "cloudflare":
if headers['Cf-Connecting-Ip'] == "84.47.177.90": # Gitflic server ip
jsn = json.loads(request.get_data())
cache[jsn['state']].update({"code": jsn['code']})
return "ok", 200
abort(403)
@app.route("/<user_code>", methods=["GET"])
def redirect_to_localhost(user_code):
headers = request.headers
if headers.get('Cdn-Loop') == "cloudflare":
ip = headers['Cf-Connecting-Ip']
if cache.get(user_code) is None:
return "Unknown code.", 404
if cache[user_code]['ip'] != ip:
return "Cannot access from your IP.", 403
redirect_url = cache[user_code]['redirect'] + f"?code={cache[user_code]['code']}&state={user_code}"
del cache[user_code]
return redirect(redirect_url)
abort(403)
@app.route("/getstate", methods=["GET"])
def getcode():
headers = request.headers
if headers.get('Cdn-Loop') == "cloudflare":
ip = headers['Cf-Connecting-Ip']
port = request.args.get('port') or abort(401)
if port.isdigit() and 49152 <= int(port) <= 65535:
state = ''.join([random.choice(ascii_letters + digits) for _ in range(random.randint(10, 17))])
cache.update({state: {"ip": ip, "code": None, "redirect": f"http://localhost:{port}/"}})
return {"state": state, "allow_from": ip}, 201
abort(403)
if __name__ == '__main__':
app.run("0.0.0.0", 18948, True)
| 2,052 | 731 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Franck Cuny <franck@lumberjaph.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: cpanm
short_description: Manages Perl library dependencies.
description:
- Manage Perl library dependencies.
version_added: "1.6"
options:
name:
description:
- The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
required: false
default: null
aliases: ["pkg"]
from_path:
description:
- The local directory from where to install
required: false
default: null
notest:
description:
- Do not run unit tests
required: false
default: false
locallib:
description:
- Specify the install base to install modules
required: false
default: false
mirror:
description:
- Specifies the base URL for the CPAN mirror to use
required: false
default: false
mirror_only:
description:
- Use the mirror's index file instead of the CPAN Meta DB
required: false
default: false
examples:
- code: "cpanm: name=Dancer"
description: Install I(Dancer) perl package.
- code: "cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz"
description: Install version 0.99_05 of the I(Plack) perl package.
- code: "cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib"
description: "Install I(Dancer) (U(http://perldancer.org/)) into the specified I(locallib)"
- code: "cpanm: from_path=/srv/webapps/my_app/src/"
description: Install perl dependencies from local directory.
- code: "cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib"
description: Install I(Dancer) perl package without running the unit tests in indicated I(locallib).
- code: "cpanm: name=Dancer mirror=http://cpan.cpantesters.org/"
description: Install I(Dancer) perl package from a specific mirror
notes:
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
author: Franck Cuny
'''
def _is_package_installed(module, name, locallib, cpanm):
cmd = ""
if locallib:
os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
cmd = "%s perl -M%s -e '1'" % (cmd, name)
res, stdout, stderr = module.run_command(cmd, check_rc=False)
if res == 0:
return True
else:
return False
def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm):
# this code should use "%s" like everything else and just return early but not fixing all of it now.
# don't copy stuff like this
if from_path:
cmd = "{cpanm} {path}".format(cpanm=cpanm, path=from_path)
else:
cmd = "{cpanm} {name}".format(cpanm=cpanm, name=name)
if notest is True:
cmd = "{cmd} -n".format(cmd=cmd)
if locallib is not None:
cmd = "{cmd} -l {locallib}".format(cmd=cmd, locallib=locallib)
if mirror is not None:
cmd = "{cmd} --mirror {mirror}".format(cmd=cmd, mirror=mirror)
if mirror_only is True:
cmd = "{cmd} --mirror-only".format(cmd=cmd)
return cmd
def main():
arg_spec = dict(
name=dict(default=None, required=False, aliases=['pkg']),
from_path=dict(default=None, required=False),
notest=dict(default=False, type='bool'),
locallib=dict(default=None, required=False),
mirror=dict(default=None, required=False),
mirror_only=dict(default=False, type='bool'),
)
module = AnsibleModule(
argument_spec=arg_spec,
required_one_of=[['name', 'from_path']],
)
cpanm = module.get_bin_path('cpanm', True)
name = module.params['name']
from_path = module.params['from_path']
notest = module.boolean(module.params.get('notest', False))
locallib = module.params['locallib']
mirror = module.params['mirror']
mirror_only = module.params['mirror_only']
changed = False
installed = _is_package_installed(module, name, locallib, cpanm)
if not installed:
out_cpanm = err_cpanm = ''
cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm)
rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
if rc_cpanm != 0:
module.fail_json(msg=err_cpanm, cmd=cmd)
if err_cpanm and 'is up to date' not in err_cpanm:
changed = True
module.exit_json(changed=changed, binary=cpanm, name=name)
# import module snippets
from ansible.module_utils.basic import *
main()
| 5,241 | 1,749 |
import os, sys, pygame
White = (0xFF, 0xFF, 0xFF)
Red = (0xFF, 0x00, 0x00)
Yellow = (0xFF, 0xFF, 0x00)
Green = (0x00, 0xFF, 0x00)
Blue = (0x00, 0x00, 0xFF)
Black = (0x00, 0x00, 0x00)
BKG = Black
class Player(pygame.Rect):
def __init__(self):
pygame.Rect.__init__(self, (0, 0), PlayerSize)
self.center = ScreenWidth // 2, ScreenHeight - 24
self.speed = 2
def move_left(self):
self.x -= self.speed
if self.colliderect(ball): self.left = ball.right
if self.left < 0: self.left = 0
def move_right(self):
self.x += self.speed
if self.colliderect(ball): self.right = ball.left
if self.right > ScreenWidth: self.right = ScreenWidth
def update(self):
pygame.draw.rect(screen, Yellow, self)
class Ball(pygame.Rect):
def __init__(self):
pygame.Rect.__init__(self, (0, 0), BallSize)
self.center = ScreenWidth // 2, ScreenHeight // 2
self.xspeed = self.yspeed = 1
def move(self):
global score
posx = self.x
pboom = xboom = yboom = False
self.x += self.xspeed
i = self.collidelist(wall.bricks)
brick = wall.bricks.pop(i) if i >= 0 else None
if brick:
xboom = True; score += brick.score
if self.xspeed > 0: self.right = brick.left
if self.xspeed < 0: self.left = brick.right
if self.colliderect(player):
xboom = True; score += 1
if self.xspeed > 0: self.right = player.left
if self.xspeed < 0: self.left = player.right
if self.left < 0:
xboom = True; self.left = 0
if self.right > ScreenWidth:
xboom = True; self.right = ScreenWidth
newx = self.x; self.x = posx
self.y += self.yspeed
i = self.collidelist(wall.bricks)
brick = wall.bricks.pop(i) if i >= 0 else None
if brick:
yboom = True; score += brick.score
if self.yspeed > 0: self.bottom = brick.top
if self.yspeed < 0: self.top = brick.bottom
if self.colliderect(player):
pboom = yboom = True; score += 1
if self.yspeed > 0: self.bottom = player.top
if self.yspeed < 0: self.top = player.bottom
if self.top < 0:
yboom = True; self.top = 0
if self.bottom > ScreenHeight:
yboom = True; self.bottom = ScreenHeight; score -= 1
self.x = newx
if not xboom and not yboom:
i = self.collidelist(wall.bricks)
brick = wall.bricks.pop(i) if i >= 0 else None
if brick:
xboom = yboom = True; score += brick.score
if self.xspeed > 0: self.right = brick.left
if self.xspeed < 0: self.left = brick.right
if self.yspeed > 0: self.bottom = brick.top
if self.yspeed < 0: self.top = brick.bottom
if self.colliderect(player):
xboom = yboom = True; score += 1
if self.xspeed > 0: self.right = player.left
if self.xspeed < 0: self.left = player.right
if self.yspeed > 0: self.bottom = player.top
if self.yspeed < 0: self.top = player.bottom
if pboom and (self.yspeed > 0):
key = pygame.key.get_pressed()
if self.xspeed > 0 and key[pygame.K_LEFT ]: xboom = True
if self.xspeed < 0 and key[pygame.K_RIGHT]: xboom = True
if xboom: self.xspeed = - self.xspeed
if yboom: self.yspeed = - self.yspeed
def update(self):
pygame.draw.rect(screen, Red, self)
class Brick(pygame.Rect):
def __init__(self, x, y):
p = 255 * y // WallHeight
self.color = (0, p, 255 - p)
self.score = WallHeight - y
x = x * BrickWidth // 2 + 1
y = (y + WallPos) * BrickHeight + 1
pygame.Rect.__init__(self, (x, y), (BrickWidth - 2, BrickHeight - 2))
def update(self):
pygame.draw.rect(screen, self.color, self)
class Wall(object):
def __init__(self):
self.bricks = []
for y in range(0, WallHeight, 2):
for x in range( 0, WallWidth * 2, 2):
self.bricks.append(Brick(x, y))
for y in range(1, WallHeight, 2):
for x in range(-1, WallWidth * 2, 2):
self.bricks.append(Brick(x, y))
def update(self):
for brick in self.bricks:
brick.update()
def showInfo():
font = pygame.font.Font(None, 24)
text = font.render(str(score), 1, White)
rect = text.get_rect(); rect = rect.move(ScreenWidth - rect.right - 4, 4)
screen.blit(text, rect)
text = font.render(str(len(wall.bricks)), 1, White)
rect = text.get_rect(); rect = rect.move(rect.left + 4, 4)
screen.blit(text, rect)
def showEnd():
font = pygame.font.SysFont('Verdana', 32)
text = font.render("GAME OVER", 1, White)
rect = text.get_rect(); rect.center = ScreenWidth // 2, ScreenHeight // 2
screen.blit(text, rect)
pygame.display.update();
pygame.time.wait(1000)
def main():
global screen, player, ball, wall, score
screen = pygame.display.set_mode(ScreenSize)
player = Player()
ball = Ball()
wall = Wall()
score = 0
automat = False
while wall.bricks:
pygame.time.delay(3)
for e in pygame.event.get():
if e.type == pygame.QUIT: return
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE: return
if e.type == pygame.KEYDOWN and e.key == pygame.K_a: automat = True
if e.type == pygame.KEYDOWN and e.key == pygame.K_LEFT: automat = False
if e.type == pygame.KEYDOWN and e.key == pygame.K_RIGHT: automat = False
if automat:
if ball.x < player.x: player.move_left()
if ball.x + BallWidth > player.x + PlayerWidth: player.move_right()
else:
key = pygame.key.get_pressed()
if key[pygame.K_LEFT ]: player.move_left()
if key[pygame.K_RIGHT]: player.move_right()
ball.move()
screen.fill(BKG)
player.update()
ball.update()
wall.update()
showInfo()
pygame.display.update()
showEnd()
ScreenSize = ScreenWidth, ScreenHeight = 640, 480
BrickSize = BrickWidth, BrickHeight = 32, 16
WallPos = 3
WallSize = WallWidth, WallHeight = ScreenWidth // BrickWidth, 10
BallSize = BallWidth, BallHeight = 8, 8
PlayerSize = PlayerWidth, PlayerHeight = 64, 4
if __name__ == "__main__":
os.environ["SDL_VIDEO_CENTERED"] = "1"
os.environ["SDL_VIDEODRIVER"] = "windib"
pygame.init()
pygame.display.set_caption("PONG!")
main()
pygame.quit()
| 5,877 | 2,602 |
from .ships import *
from .battlegrid import * | 46 | 14 |
import pygame
import random
import random
import itertools
import json
import os
import matplotlib.pyplot as plt
from IPython import display
plt.ion()
def plot(scores, mean_scores,save=False):
display.clear_output(wait=True)
display.display(plt.gcf())
plt.clf()
plt.title('Training...')
plt.xlabel('Number of Games')
plt.ylabel('Score')
plt.plot(scores)
plt.plot(mean_scores)
plt.ylim(ymin=0)
plt.text(len(scores)-1, scores[-1], str(scores[-1]))
plt.text(len(mean_scores)-1, mean_scores[-1], str(mean_scores[-1]))
plt.show(block=False)
plt.pause(.1)
if save==True:
plt.savefig('qlearning_main.png')
###### Initialization ######
# For screen
dimension_x=200
dimension_y=200
# For snake
snake_color=(0,0,255) #blue
head_x=int(dimension_x/2)
head_y=int(dimension_y/2)
snake_body=[(head_x,head_y)]
# For food
food_color=(255,0,0) #Red
food_x=random.randrange(0,dimension_x,10)
food_y=random.randrange(0,dimension_y,10)
# For RL Agent
states = list(itertools.product(*[(0, 1)] * 11))
actions=[0,1,2]
q_table={}
for state in states:
for action in actions:
q_table[(*state,action)]=0
try:
with open('./q_table_main.json') as json_file:
if os.stat("./q_table_main.json").st_size != 0:
q_table = json.load(json_file)
q_table=dict((tuple(map(int,k.split(","))), v) for k,v in q_table.items())
else:
print("File empty")
except OSError:
print("File not found")
epsilon=0.7
LR=0.1
discount=0.6
state_next=()
reward_next=0
action=1 # initially right
t=1
max_games=10000
# For game
black=(0,0,0)
scores=[]
mean_scores=[]
record=0
score=0
n_games=1
total_score=0
game_over= False
current_direction = pygame.K_RIGHT
try:
with open('./savedparams_main.json') as json_file:
if os.stat("./savedparams_main.json").st_size != 0:
saved_params = json.load(json_file)
epsilon=saved_params["epsilon"]
LR=saved_params["LR"]
discount=saved_params["discount"]
state_next=saved_params["state_next"]
reward_next=saved_params["reward_next"]
action=saved_params["action"]
max_games=saved_params["max_games"]
scores=saved_params["scores"]
mean_scores=saved_params["mean_scores"]
record=saved_params["record"]
score=saved_params["score"]
n_games=saved_params["n_games"]
total_score=saved_params["total_scores"]
current_direction=saved_params["current_direction"]
head_x=saved_params["head_x"]
head_y=saved_params["head_y"]
snake_body=saved_params["snake_body"]
food_x=saved_params["food_x"]
food_y=saved_params["food_y"]
else:
print("File empty")
except OSError:
print("File not found")
pygame.init()
display_board=pygame.display.set_mode((dimension_x,dimension_y))
pygame.display.update()
###### Game ######
# place food on screen
def place_food():
global dimension_x,dimension_y,display_board,food_color,food_x,food_y,snake_body
food_x=random.randrange(0,dimension_x,10)
food_y=random.randrange(0,dimension_y,10)
if((food_x,food_y) in snake_body) : place_food()
pygame.draw.rect(display_board,food_color,[food_x,food_y,10,10])
pygame.display.update()
def show_score():
global score,display_board
"""system font"""
font = pygame.font.SysFont("Segoe UI", 35)
textsurface = font.render("Score :{} ".format(score), False,(0,255,0)) # "text", antialias, color
display_board.blit(textsurface, (0, 0))
# move snake on screen
def move_snake(speed=10):
global game_over,black,head_x,head_y,snake_body,snake_color,food_x,food_y,current_direction,record,score,display_board
reward=0
# remove previous snake from screen
for (x,y) in snake_body:
pygame.draw.rect(display_board,black,[x,y,10,10])
pygame.display.update()
if current_direction == pygame.K_LEFT:
head_x -= speed
if current_direction == pygame.K_RIGHT:
head_x += speed
if current_direction == pygame.K_UP:
head_y -= speed
if current_direction == pygame.K_DOWN:
head_y += speed
if snake_collided(head_x,head_y):
reward-=10
init_game()
if head_x==food_x and head_y==food_y:
snake_body.insert(0,(head_x,head_y))
score+=1
reward+=10
display_board.fill(black)
place_food()
else:
reward+=(-0.01)
snake_body.insert(0,(head_x,head_y))
snake_body.pop()
# add new snake from screen
for (x,y) in snake_body:
pygame.draw.rect(display_board,snake_color,[x,y,10,10])
pygame.display.update()
return reward
def snake_collided(x,y):
global dimension_x,dimension_y,snake_body
if x<0 or y<0 or x>dimension_x or y>dimension_y or (x,y) in snake_body[1:]:
return True
return False
def init_game():
global head_x,head_y,snake_body,food_x,food_y,current_direction,display_board,score,mean_scores,scores,total_score,epsilon,n_games,record,max_games
if score>record:
record=score
print('Game ', n_games, 'Score', score, 'Record:', record)
scores.append((score))
total_score += score
mean_score = total_score / n_games
n_games+=1
mean_scores.append(mean_score)
plot(scores, mean_scores)
if(epsilon>(n_games/max_games)): epsilon-=(n_games/max_games)
display_board.fill(black)
head_x=int(dimension_x/2)
head_y=int(dimension_y/2)
snake_body=[(head_x,head_y)]
food_x=random.randrange(0,dimension_x,10)
food_y=random.randrange(0,dimension_y,10)
current_direction = pygame.K_RIGHT
score=0
if(epsilon>0): epsilon-=0.0001
place_food()
###### RL Agent ######
def play_agent(state):
global q_table,epsilon,actions
print("Agent : ")
# step 2: choose action e-greedy
if random.random()<epsilon:
max_action = random.randint(0,len(actions)-1)
print("Exploration : ",max_action)
return max_action
else:
max_action=-1
max_qvalue=0
# t_exploit+=1
for action in actions:
if(q_table[(*state,action)]>max_qvalue):
max_qvalue=q_table[(*state,action)]
max_action=action
if max_action==-1:
max_action = random.randint(0,len(actions)-1)
print("Exploitation : ",max_action)
# if t_exploit>100:
# if n_games>100:
# epsilon=0.1
# else:
# epsilon=0.4
# t_exploit=1
return max_action
def update_qtable(state,action,state_next,reward_next):
global q_table,epsilon,actions,LR,discount
# step 3: update q_table
max_action=0
max_qvalue=0
for act in actions:
if(q_table[(*state_next,act)]>max_qvalue):
max_qvalue=q_table[(*state_next,act)]
max_action=act
q_next = q_table[(*state_next,max_action)]
q_table[(*state,action)] = q_table[(*state,action)] + LR * (reward_next + discount*q_next - q_table[(*state,action)] )
def take_action(action):
global current_direction
key_actions=[pygame.K_LEFT,pygame.K_UP,pygame.K_RIGHT,pygame.K_DOWN]
if(action==0): pass
elif(action==1):
current_direction=key_actions[(key_actions.index(current_direction)-1) % len(key_actions)]
elif(action==2):
current_direction=key_actions[(key_actions.index(current_direction)+1) % len(key_actions)]
def get_state(speed=10,lookahead=1):
global current_direction,head_x,head_y
points_l=[]
points_r=[]
points_u=[]
points_d=[]
for look in range(1,lookahead+1):
point_l = (head_x-look*speed,head_y)
point_r = (head_x+look*speed,head_y)
point_u = (head_x,head_y-look*speed)
point_d = (head_x,head_y+look*speed)
points_l.append(point_l)
points_r.append(point_r)
points_u.append(point_u)
points_d.append(point_d)
dir_l = current_direction == pygame.K_LEFT
dir_r = current_direction == pygame.K_RIGHT
dir_u = current_direction == pygame.K_UP
dir_d = current_direction == pygame.K_DOWN
danger_straight=False
danger_right=False
danger_left=False
# Danger Straight
for look in range(lookahead):
danger_straight = (dir_r and snake_collided(points_r[look][0],points_r[look][1])) or (dir_l and snake_collided(points_l[look][0],points_l[look][1])) or (dir_u and snake_collided(points_u[look][0],points_u[look][1])) or (dir_d and snake_collided(points_d[look][0],points_d[look][1]))
if danger_straight==True:
break
# Danger Right
for look in range(lookahead):
danger_right = (dir_r and snake_collided(points_d[look][0],points_d[look][1])) or (dir_d and snake_collided(points_l[look][0],points_l[look][1])) or (dir_l and snake_collided(points_u[look][0],points_u[look][1])) or (dir_u and snake_collided(points_r[look][0],points_r[look][1]))
if danger_right==True:
break
# Danger Left
for look in range(lookahead):
danger_left = (dir_r and snake_collided(points_u[look][0],points_u[look][1])) or (dir_u and snake_collided(points_l[look][0],points_l[look][1])) or (dir_l and snake_collided(points_d[look][0],points_d[look][1])) or (dir_d and snake_collided(points_r[look][0],points_r[look][1]))
if danger_left==True:
break
state= (
# Danger straight
danger_straight ,
# Danger right
danger_right,
# Danger left
danger_left,
# Move direction
dir_l,
dir_r,
dir_u,
dir_d,
# Food location
food_x<head_x,
food_x>head_x,
food_y<head_y,
food_y<head_y,
)
return state
clock = pygame.time.Clock()
place_food()
try:
while not game_over:
show_score()
for event in pygame.event.get():
print(food_x,food_y,head_x,head_y)
if event.type==pygame.QUIT:
plot(scores, mean_scores,save=True)
# the json file where the output must be stored
print(q_table,"It is getting saved\n")
q_table=dict((str(','.join(map(str, k))), v) for k,v in q_table.items())
pygame.image.save(display_board, "./snapshot_main.jpeg")
out_file = open("./q_table_main.json", "w")
json.dump(q_table, out_file)
out_file.close()
saved_params={}
saved_params["epsilon"]=epsilon
saved_params["LR"]=LR
saved_params["discount"]=discount
saved_params["state_next"]=state_next
saved_params["reward_next"]=reward_next
saved_params["action"]=action
saved_params["max_games"]=max_games
saved_params["scores"]=scores
saved_params["mean_scores"]=mean_scores
saved_params["record"]=record
saved_params["score"]=score
saved_params["n_games"]=n_games
saved_params["total_scores"]=total_score
saved_params["current_direction"]=current_direction
saved_params["head_x"]=head_x
saved_params["head_y"]=head_y
saved_params["snake_body"]=snake_body
saved_params["food_x"]=food_x
saved_params["food_y"]=food_y
out_file = open("./savedparams_main.json", "w")
json.dump(q_table, out_file)
out_file.close()
game_over=True
# agent playing
state=get_state()
action = play_agent(state)
take_action(action)
reward=move_snake()
next_state=get_state()
update_qtable(state,action,next_state,reward)
t+=1
clock.tick(30)
except KeyboardInterrupt:
plot(scores, mean_scores,save=True)
# the json file where the output must be stored
print(q_table,"It is getting saved\n")
q_table=dict((str(','.join(map(str, k))), v) for k,v in q_table.items())
pygame.image.save(display_board, "./snapshot_main.jpeg")
out_file = open("./q_table_main.json", "w")
json.dump(q_table, out_file)
out_file.close()
saved_params={}
saved_params["epsilon"]=epsilon
saved_params["LR"]=LR
saved_params["discount"]=discount
saved_params["state_next"]=state_next
saved_params["reward_next"]=reward_next
saved_params["action"]=action
saved_params["max_games"]=max_games
saved_params["scores"]=scores
saved_params["mean_scores"]=mean_scores
saved_params["record"]=record
saved_params["score"]=score
saved_params["n_games"]=n_games
saved_params["total_scores"]=total_score
saved_params["current_direction"]=current_direction
saved_params["head_x"]=head_x
saved_params["head_y"]=head_y
saved_params["snake_body"]=snake_body
saved_params["food_x"]=food_x
saved_params["food_y"]=food_y
out_file = open("./savedparams_main.json", "w")
json.dump(saved_params, out_file)
out_file.close()
game_over=True
pygame.quit()
quit()
| 13,406 | 4,776 |
from django.db import models
from users.models import CustomUser
class BoardManager(models.Manager):
def create_board(self, author, **kwargs):
current_user = author
board = Board.objects.create(author=current_user, **kwargs)
ParticipantInBoard.objects.create(board=board,
participant=current_user,
is_moderator=True)
for color in Tag.Color.values:
Tag.objects.create(color=color, board=board)
return board
class Board(models.Model):
name = models.CharField(max_length=50,
verbose_name='Название',
help_text='Напишите название',
)
description = models.TextField(verbose_name='Оисание',
help_text='Напишите описание',
blank=True,
)
avatar = models.ImageField(upload_to='board_avatars',
blank=True,
verbose_name='Аватар',
help_text='Загрузите аватар'
)
author = models.ForeignKey(CustomUser,
on_delete=models.CASCADE,
related_name='boards_author',
verbose_name='Автор',
)
participants = models.ManyToManyField(CustomUser,
through='ParticipantInBoard',
related_name='boards_participants',
blank=True,
verbose_name='Участники',
)
objects = BoardManager()
class Meta:
verbose_name = 'Доска'
verbose_name_plural = 'Доски'
ordering = ['name']
def __str__(self):
return self.name
class Tag(models.Model):
class Color(models.IntegerChoices):
RED = 1
ORANGE = 2
YELLOW = 3
GREEN = 4
BLUE = 5
PURPLE = 6
color_to_hex = {
Color.RED: '#f35a5a',
Color.ORANGE: '#ff9b63',
Color.YELLOW: '#fdff97',
Color.GREEN: '#9bc665',
Color.BLUE: '#67b5fd',
Color.PURPLE: '#c173ff'
}
name = models.CharField(max_length=20,
verbose_name='Название тега',
help_text='Напишите название тега',
blank=True,
default=''
)
color = models.PositiveSmallIntegerField(choices=Color.choices
)
board = models.ForeignKey(Board,
on_delete=models.CASCADE,
related_name='tags',
verbose_name='Доска',
)
@property
def hex(self):
return Tag.color_to_hex[self.color]
class Meta:
verbose_name = 'Тег'
verbose_name_plural = 'Теги'
def __str__(self):
return f"Color: '{self.color}', name: '{self.name}'"
class Favorite(models.Model):
user = models.ForeignKey(CustomUser,
on_delete=models.CASCADE,
related_name='favorite_subscriber',
verbose_name='Пользователь',
)
board = models.ForeignKey(Board,
on_delete=models.CASCADE,
related_name='favorite_board',
verbose_name='Доска',
)
pub_date = models.DateTimeField(auto_now_add=True,
verbose_name='Дата добавления',
)
class Meta:
verbose_name = 'Избранный'
verbose_name_plural = 'Избранные'
constraints = [models.UniqueConstraint(
fields=['user', 'board'],
name='unique_favorites_boards')]
def __str__(self):
return (f'Пользователь: {self.user}, '
f'избранные доски: {self.board.name}')
class ParticipantInBoard(models.Model):
board = models.ForeignKey(Board,
on_delete=models.CASCADE,
verbose_name='Доска',
)
participant = models.ForeignKey(CustomUser,
on_delete=models.CASCADE,
verbose_name='Участник'
)
is_moderator = models.BooleanField(default=False)
class Meta:
verbose_name = 'Участник в доске'
verbose_name_plural = 'Участники в досках'
def __str__(self):
return f'Участник: {self.participant} => {self.board}'
| 4,999 | 1,332 |
import os
import logging
import time
import pathlib
import condo
def main(sourcesdeconn
,outputdir):
sourcecondo = condo.Condo()
sourcecondo.extracttofile('DOF_TAXMAP.Condo'
,outputdir)
return sourcecondo.countcondos()
if __name__ == '__main__':
psourcesdeconn = os.environ['SDEFILE']
timestr = time.strftime("%Y%m%d-%H%M%S")
retval = 1
try:
targetlog = os.path.join(os.environ['TARGETLOGDIR']
,'extractcondo-{0}.log'.format(timestr))
except:
targetlog = os.path.join(os.getcwd()
,'extractcondo-{0}.log'.format(timestr))
logging.basicConfig(filename=targetlog
,level=logging.INFO)
datadir = os.path.join(pathlib.Path(__file__).parent
,'data')
if not os.path.isfile(psourcesdeconn):
logging.error("Condo source sde file {0} does not exist, "
"check SDEFILE environmental".format(psourcesdeconn))
exit(retval)
kount = main(psourcesdeconn
,datadir)
# at this point our csv still has two bad duplicate types
# condo_base_bbl condo_billing_bbl
# A X
# A X
# B Y
# B Z
if (kount == 0 or kount is None):
logging.error('Failed to extract any condos')
else:
logging.info('Successfully extracted {0} bbls to data directory'.format(kount))
retval = 0
exit(retval) | 1,605 | 503 |
import fire
from sahi.predict import predict_fiftyone
def main():
fire.Fire(predict_fiftyone)
if __name__ == "__main__":
main()
| 141 | 52 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
logger = logging.getLogger(__name__)
| 149 | 46 |
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 27 08:11:48 2014
@author: space_000
"""
from scipy.io import loadmat
from WindPy import w
import pymongo as mg
from wsiTools import findDate
from mgWsi import upiter
d=loadmat('D:\FieldSHSZ')
Field=d['Field'].tolist()
stride=100
numF=range(len(Field))[::stride]
dt=loadmat('D:\dataTime')
timeD=dt['time']
times=findDate(timeD,'20140925',30)
w.start()
client=mg.MongoClient()
db=client['MKD']
col=db['minData']
for t in times:
for f in numF:
data=w.wsi(Field[f:f+stride],'open,high,low,close,volume',str(t),str(t)+'15:01:00','showblank=0',barsize=1).Data[1:]
uniField=set(data[0])
upiter(data,uniField,t,col)
| 690 | 316 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Author: Tim Henderson
#Email: tim.tadh@hackthology.com
#For licensing see the LICENSE file in the top level directory.
import unittest, os, sys, base64, itertools, random, time, copy
import copy, collections
from random import randint, seed, shuffle
from zss import compare
from zss.test_tree import Node
seed(os.urandom(15))
def test_empty_tree_distance():
assert compare.distance(Node(''), Node('')) == 0
assert compare.distance(Node('a'), Node('')) == 1
assert compare.distance(Node(''), Node('b')) == 1
def test_paper_tree():
A = (
Node("f")
.addkid(Node("d")
.addkid(Node("a"))
.addkid(Node("c")
.addkid(Node("b"))
)
)
.addkid(Node("e"))
)
B = (
Node("f")
.addkid(Node("c")
.addkid(Node("d")
.addkid(Node("a"))
.addkid(Node("b"))
)
)
.addkid(Node("e"))
)
#print A
#print
#print B
dist = compare.distance(A,B)
assert dist == 2
def test_simplelabelchange():
A = (
Node("f")
.addkid(Node("a")
.addkid(Node("h"))
.addkid(Node("c")
.addkid(Node("l"))))
.addkid(Node("e"))
)
B = (
Node("f")
.addkid(Node("a")
.addkid(Node("d"))
.addkid(Node("r")
.addkid(Node("b"))))
.addkid(Node("e"))
)
dist = compare.distance(A,B)
print dist
assert dist == 3
#print 'distance', d
def test_incorrect_behavior_regression():
A = (
Node("a")
.addkid(Node("b")
.addkid(Node("x"))
.addkid(Node("y"))
)
)
B = (
Node("a")
.addkid(Node("x"))
.addkid(Node("b")
.addkid(Node("y"))
)
)
dist = compare.distance(A, B)
print dist
assert dist == 2
| 1,976 | 704 |
from tracardi.process_engine.destination.connector import Connector
class RabbitMqConnector(Connector):
async def run(self, mapping, delta):
print(mapping, delta)
| 180 | 55 |
from . import common
import pandas as pd
import matplotlib.pyplot as plt
from skbio.stats.ordination import OrdinationResults
from qiime2 import Artifact
def beta_3d_plot(
artifact, metadata=None, hue=None, azim=-60, elev=30, s=80, ax=None,
figsize=None, hue_order=None
):
"""
Create a 3D scatter plot from PCoA results.
+---------------------+---------------------------------------------------+
| q2-diversity plugin | Example |
+=====================+===================================================+
| QIIME 2 CLI | qiime diversity pcoa [OPTIONS] |
+---------------------+---------------------------------------------------+
| QIIME 2 API | from qiime2.plugins.diversity.methods import pcoa |
+---------------------+---------------------------------------------------+
Parameters
----------
artifact : str or qiime2.Artifact
Artifact file or object from the q2-diversity plugin with the
semantic type ``PCoAResults`` or
``PCoAResults % Properties('biplot')``.
metadata : str or qiime2.Metadata, optional
Metadata file or object.
hue : str, optional
Grouping variable that will produce points with different colors.
azim : int, default: -60
Azimuthal viewing angle.
elev : int, default: 30
Elevation viewing angle.
s : float, default: 80.0
Marker size.
ax : matplotlib.axes.Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
hue_order : list, optional
Specify the order of categorical levels of the 'hue' semantic.
Returns
-------
matplotlib.axes.Axes
Axes object with the plot drawn onto it.
See Also
--------
dokdo.api.ordinate
dokdo.api.beta_2d_plot
dokdo.api.beta_scree_plot
dokdo.api.beta_parallel_plot
dokdo.api.addbiplot
Examples
--------
Below is a simple example:
.. code:: python3
import dokdo
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
qza_file = '/Users/sbslee/Desktop/dokdo/data/moving-pictures-tutorial/unweighted_unifrac_pcoa_results.qza'
metadata_file = '/Users/sbslee/Desktop/dokdo/data/moving-pictures-tutorial/sample-metadata.tsv'
dokdo.beta_3d_plot(qza_file,
metadata_file,
'body-site',
figsize=(8, 8))
plt.tight_layout()
.. image:: images/beta_3d_plot-1.png
We can control the camera angle with ``elev`` and ``azim``:
.. code:: python3
fig = plt.figure(figsize=(14, 7))
ax1 = fig.add_subplot(1, 2, 1, projection='3d')
ax2 = fig.add_subplot(1, 2, 2, projection='3d')
dokdo.beta_3d_plot(qza_file,
metadata_file,
ax=ax1,
hue='body-site',
elev=15)
dokdo.beta_3d_plot(qza_file,
metadata_file,
ax=ax2,
hue='body-site',
azim=70)
plt.tight_layout()
.. image:: images/beta_3d_plot-2.png
"""
if isinstance(artifact, str):
_pcoa_results = Artifact.load(artifact)
else:
_pcoa_results = artifact
ordination_results = _pcoa_results.view(OrdinationResults)
df = ordination_results.samples.iloc[:, :3]
df.columns = ['A1', 'A2', 'A3']
props = ordination_results.proportion_explained
if metadata is None:
df = df
else:
mf = common.get_mf(metadata)
df = pd.concat([df, mf], axis=1, join='inner')
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.view_init(azim=azim, elev=elev)
if hue is None:
ax.scatter(df['A1'], df['A2'], df['A3'], s=s)
else:
if hue_order is None:
_hue_order = df[hue].unique()
else:
_hue_order = hue_order
for label in _hue_order:
a = df[df[hue] == label]
ax.scatter(a['A1'], a['A2'], a['A3'], label=label, s=s)
ax.set_xlabel(f'Axis 1 ({props[0]*100:.2f} %)')
ax.set_ylabel(f'Axis 2 ({props[1]*100:.2f} %)')
ax.set_zlabel(f'Axis 3 ({props[2]*100:.2f} %)')
ax.legend()
return ax
| 4,585 | 1,485 |
### Reference from: https://github.com/yysijie/st-gcn/tree/master/net
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from Actionsrecognition.Utils import Graph
class GraphConvolution(nn.Module):
"""The basic module for applying a graph convolution.
Args:
- in_channel: (int) Number of channels in the input sequence data.
- out_channels: (int) Number of channels produced by the convolution.
- kernel_size: (int) Size of the graph convolving kernel.
- t_kernel_size: (int) Size of the temporal convolving kernel.
- t_stride: (int, optional) Stride of the temporal convolution. Default: 1
- t_padding: (int, optional) Temporal zero-padding added to both sides of
the input. Default: 0
- t_dilation: (int, optional) Spacing between temporal kernel elements. Default: 1
- bias: (bool, optional) If `True`, adds a learnable bias to the output.
Default: `True`
Shape:
- Inputs x: Graph sequence in :math:`(N, in_channels, T_{in}, V)`,
A: Graph adjacency matrix in :math:`(K, V, V)`,
- Output: Graph sequence out in :math:`(N, out_channels, T_{out}, V)`
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
t_kernel_size=1,
t_stride=1,
t_padding=0,
t_dilation=1,
bias=True,
):
super().__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(
in_channels,
out_channels * kernel_size,
kernel_size=(t_kernel_size, 1),
padding=(t_padding, 0),
stride=(t_stride, 1),
dilation=(t_dilation, 1),
bias=bias,
)
def forward(self, x, A):
x = self.conv(x)
n, kc, t, v = x.size()
x = x.view(n, self.kernel_size, kc // self.kernel_size, t, v)
x = torch.einsum("nkctv,kvw->nctw", (x, A))
return x.contiguous()
class st_gcn(nn.Module):
"""Applies a spatial temporal graph convolution over an input graph sequence.
Args:
- in_channels: (int) Number of channels in the input sequence data.
- out_channels: (int) Number of channels produced by the convolution.
- kernel_size: (tuple) Size of the temporal convolving kernel and
graph convolving kernel.
- stride: (int, optional) Stride of the temporal convolution. Default: 1
- dropout: (int, optional) Dropout rate of the final output. Default: 0
- residual: (bool, optional) If `True`, applies a residual mechanism.
Default: `True`
Shape:
- Inputs x: Graph sequence in :math: `(N, in_channels, T_{in}, V)`,
A: Graph Adjecency matrix in :math: `(K, V, V)`,
- Output: Graph sequence out in :math: `(N, out_channels, T_{out}, V)`
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, dropout=0, residual=True
):
super().__init__()
assert len(kernel_size) == 2
assert kernel_size[0] % 2 == 1
padding = ((kernel_size[0] - 1) // 2, 0)
self.gcn = GraphConvolution(in_channels, out_channels, kernel_size[1])
self.tcn = nn.Sequential(
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
out_channels, out_channels, (kernel_size[0], 1), (stride, 1), padding
),
nn.BatchNorm2d(out_channels),
nn.Dropout(dropout, inplace=True),
)
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=(stride, 1)),
nn.BatchNorm2d(out_channels),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, A):
res = self.residual(x)
x = self.gcn(x, A)
x = self.tcn(x) + res
return self.relu(x)
class StreamSpatialTemporalGraph(nn.Module):
"""Spatial temporal graph convolutional networks.
Args:
- in_channels: (int) Number of input channels.
- graph_args: (dict) Args map of `Actionsrecognition.Utils.Graph` Class.
- num_class: (int) Number of class outputs. If `None` return pooling features of
the last st-gcn layer instead.
- edge_importance_weighting: (bool) If `True`, adds a learnable importance
weighting to the edges of the graph.
- **kwargs: (optional) Other parameters for graph convolution units.
Shape:
- Input: :math:`(N, in_channels, T_{in}, V_{in})`
- Output: :math:`(N, num_class)` where
:math:`N` is a batch size,
:math:`T_{in}` is a length of input sequence,
:math:`V_{in}` is the number of graph nodes,
or If num_class is `None`: `(N, out_channels)`
:math:`out_channels` is number of out_channels of the last layer.
"""
def __init__(
self,
in_channels,
graph_args,
num_class=None,
edge_importance_weighting=True,
**kwargs
):
super().__init__()
# Load graph.
graph = Graph(**graph_args)
A = torch.tensor(graph.A, dtype=torch.float32, requires_grad=False)
self.register_buffer("A", A)
# Networks.
spatial_kernel_size = A.size(0)
temporal_kernel_size = 9
kernel_size = (temporal_kernel_size, spatial_kernel_size)
kwargs0 = {k: v for k, v in kwargs.items() if k != "dropout"}
self.data_bn = nn.BatchNorm1d(in_channels * A.size(1))
self.st_gcn_networks = nn.ModuleList(
(
st_gcn(in_channels, 64, kernel_size, 1, residual=False, **kwargs0),
st_gcn(64, 64, kernel_size, 1, **kwargs),
st_gcn(64, 64, kernel_size, 1, **kwargs),
st_gcn(64, 64, kernel_size, 1, **kwargs),
st_gcn(64, 128, kernel_size, 2, **kwargs),
st_gcn(128, 128, kernel_size, 1, **kwargs),
st_gcn(128, 128, kernel_size, 1, **kwargs),
st_gcn(128, 256, kernel_size, 2, **kwargs),
st_gcn(256, 256, kernel_size, 1, **kwargs),
st_gcn(256, 256, kernel_size, 1, **kwargs),
)
)
# initialize parameters for edge importance weighting.
if edge_importance_weighting:
self.edge_importance = nn.ParameterList(
[nn.Parameter(torch.ones(A.size())) for i in self.st_gcn_networks]
)
else:
self.edge_importance = [1] * len(self.st_gcn_networks)
if num_class is not None:
self.cls = nn.Conv2d(256, num_class, kernel_size=1)
else:
self.cls = lambda x: x
def forward(self, x):
# data normalization.
N, C, T, V = x.size()
x = x.permute(0, 3, 1, 2).contiguous() # (N, V, C, T)
x = x.view(N, V * C, T)
x = self.data_bn(x)
x = x.view(N, V, C, T)
x = x.permute(0, 2, 3, 1).contiguous()
x = x.view(N, C, T, V)
# forward.
for gcn, importance in zip(self.st_gcn_networks, self.edge_importance):
x = gcn(x, self.A * importance)
x = F.avg_pool2d(x, x.size()[2:])
x = self.cls(x)
x = x.view(x.size(0), -1)
return x
class TwoStreamSpatialTemporalGraph(nn.Module):
"""Two inputs spatial temporal graph convolutional networks.
Args:
- graph_args: (dict) Args map of `Actionsrecognition.Utils.Graph` Class.
- num_class: (int) Number of class outputs.
- edge_importance_weighting: (bool) If `True`, adds a learnable importance
weighting to the edges of the graph.
- **kwargs: (optional) Other parameters for graph convolution units.
Shape:
- Input: :tuple of math:`((N, 3, T, V), (N, 2, T, V))`
for points and motions stream where.
:math:`N` is a batch size,
:math:`in_channels` is data channels (3 is (x, y, score)), (2 is (mot_x, mot_y))
:math:`T` is a length of input sequence,
:math:`V` is the number of graph nodes,
- Output: :math:`(N, num_class)`
"""
def __init__(self, graph_args, num_class, edge_importance_weighting=True, **kwargs):
super().__init__()
self.pts_stream = StreamSpatialTemporalGraph(
3, graph_args, None, edge_importance_weighting, **kwargs
)
self.mot_stream = StreamSpatialTemporalGraph(
2, graph_args, None, edge_importance_weighting, **kwargs
)
self.fcn = nn.Linear(256 * 2, num_class)
def forward(self, inputs):
out1 = self.pts_stream(inputs[0])
out2 = self.mot_stream(inputs[1])
concat = torch.cat([out1, out2], dim=-1)
out = self.fcn(concat)
return torch.sigmoid(out)
| 9,682 | 3,229 |
import os
import math
import argparse
from tqdm import tqdm
import pandas as pd
import torch
import torch.nn as nn
import pyro
import mlflow
from pharmacokinetic import Pharmacokinetic
from experiment_tools.pyro_tools import auto_seed
from experiment_tools.output_utils import get_mlflow_meta
from estimators.mi import PriorContrastiveEstimation, NestedMonteCarloEstimation
from neural.aggregators import ImplicitDeepAdaptiveDesign
from neural.baselines import RandomDesignBaseline, ConstantBatchBaseline
def evaluate_nontrainable_policy_pk(
mlflow_experiment_name,
num_experiments_to_perform,
policy, # random or equal_interval
device,
n_rollout=2048 * 2,
num_inner_samples=int(5e5),
seed=-1,
):
""" T designs at equal intervals """
pyro.clear_param_store()
seed = auto_seed(seed)
mlflow.set_experiment(mlflow_experiment_name)
mlflow.log_param("seed", seed)
mlflow.log_param("baseline_type", policy)
mlflow.log_param("n_rollout", n_rollout)
mlflow.log_param("num_inner_samples", num_inner_samples)
factor = 16
n_rollout = n_rollout // factor
n = 1
design_dim = (n, 1)
EIGs = pd.DataFrame(
columns=["mean_lower", "se_lower", "mean_upper", "se_upper"],
index=num_experiments_to_perform,
)
theta_prior_loc = torch.tensor([1, 0.1, 20], device=device).log()
# covariance of the prior
theta_prior_covmat = torch.eye(3, device=device) * 0.05
uniform_sampler = torch.distributions.Uniform(
torch.tensor(-5.0, device=device), torch.tensor(5.0, device=device)
)
for T in num_experiments_to_perform:
if policy == "equal_interval":
# ASSUMPTION: first design 5 min after administation
transformed_designs = (
torch.linspace(5.0 / 60, 23.9, T, dtype=torch.float32) / 24.0
)
equispaced_constant_policy = torch.log(
transformed_designs / (1 - transformed_designs)
).to(device)
design_net = ConstantBatchBaseline(
const_designs_list=equispaced_constant_policy.unsqueeze(1),
design_dim=design_dim,
)
elif policy == "random":
design_net = RandomDesignBaseline(
design_dim=design_dim, random_designs_dist=uniform_sampler
)
# Model and losses
pk_model = Pharmacokinetic(
design_net=design_net,
T=T,
theta_loc=theta_prior_loc,
theta_covmat=theta_prior_covmat,
)
pce_loss_lower = PriorContrastiveEstimation(
pk_model.model, factor, num_inner_samples
)
pce_loss_upper = NestedMonteCarloEstimation(
pk_model.model, factor, num_inner_samples
)
auto_seed(seed)
EIG_proxy_lower = torch.tensor(
[-pce_loss_lower.loss() for _ in range(n_rollout)]
)
auto_seed(seed)
EIG_proxy_upper = torch.tensor(
[-pce_loss_upper.loss() for _ in range(n_rollout)]
)
EIGs.loc[T, "mean_lower"] = EIG_proxy_lower.mean().item()
EIGs.loc[T, "se_lower"] = EIG_proxy_lower.std().item() / math.sqrt(n_rollout)
EIGs.loc[T, "mean_upper"] = EIG_proxy_upper.mean().item()
EIGs.loc[T, "se_upper"] = EIG_proxy_upper.std().item() / math.sqrt(n_rollout)
EIGs.to_csv(f"mlflow_outputs/eval.csv")
mlflow.log_artifact(f"mlflow_outputs/eval.csv", artifact_path="evaluation")
mlflow.log_param("status", "complete")
print(EIGs)
print("Done!")
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="iDAD: Pharmacokinetic model,nontrainable baselines."
)
parser.add_argument(
"--mlflow-experiment-name", default="pharmaco_baselines_nontrainable", type=str,
)
parser.add_argument("--seed", default=-1, type=int)
parser.add_argument(
"--policy", default="random", choices=["random", "equal_interval"], type=str
)
parser.add_argument("--num-experiments-to-perform", nargs="+", default=[5, 10])
parser.add_argument("--device", default="cuda", type=str)
args = parser.parse_args()
args.num_experiments_to_perform = [
int(x) if x else x for x in args.num_experiments_to_perform
]
evaluate_nontrainable_policy_pk(
mlflow_experiment_name=args.mlflow_experiment_name,
num_experiments_to_perform=args.num_experiments_to_perform,
device=args.device,
policy=args.policy,
seed=args.seed,
)
| 4,572 | 1,584 |
TOKEN = '' # your token should be here
file_path_to_download = './images/'
checkpoint_dir = './scripts/checkpoints/'
nst_state_dict = './nst/vgg19-dcbb9e9d.pth' # you should download an vgg19 dict from here https://download.pytorch.org/models/vgg19-dcbb9e9d.pth
| 311 | 111 |
"""app/errors.py"""
from flask import render_template, Blueprint, make_response, jsonify
from werkzeug.http import HTTP_STATUS_CODES
bp = Blueprint('errors', __name__) #pylint: disable=C
def error_response(status_code, message=None):
"""error_response"""
payload = {
'error':
str(status_code) + " - " + HTTP_STATUS_CODES.get(
status_code, 'Unknown error')
}
if message:
payload['message'] = message
response = jsonify(payload)
response.status_code = status_code
return response
@bp.app_errorhandler(400)
def error_400(error): #pylint: disable=W
"""400"""
return error_response(400)
@bp.app_errorhandler(404)
def error_404(error): #pylint: disable=W
"""404"""
return error_response(404)
@bp.app_errorhandler(405)
def error_405(error): #pylint: disable=W
"""405"""
return error_response(405)
@bp.app_errorhandler(429)
def error_429(error): #pylint: disable=W
"""429"""
return error_response(429)
@bp.app_errorhandler(500)
def internal_error(error): #pylint: disable=W
"""500"""
return error_response(500)
| 1,126 | 425 |
from struct import pack
from bglcapi.base_command import command
from bglcapi.types import (MessageType, MessageClass)
def bonding_confirm(connection, confirm):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x0e
payload = pack('<BB', connection, confirm)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def configure(flags, io_capabilities):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x01
payload = pack('<BB', flags, io_capabilities)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def delete_bonding(bonding):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x06
payload = pack('<B', bonding)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def delete_bondings():
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x07
payload = b''
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def enter_passkey(connection, passkey):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x08
payload = pack('<Bi', connection, passkey)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def increase_security(connection):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x04
payload = pack('<B', connection)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def list_all_bondings():
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x0b
payload = b''
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def passkey_confirm(connection, confirm):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x09
payload = pack('<BB', connection, confirm)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def set_bondable_mode(bondable):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x00
payload = pack('<B', bondable)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def set_debug_mode():
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x0f
payload = b''
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def set_oob_data(oob_data):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x0a
payload = pack('<B', len(oob_data)) + bytes(oob_data)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def set_passkey(passkey):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x10
payload = pack('<i', passkey)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def set_sc_remote_oob_data(oob_data):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x12
payload = pack('<B', len(oob_data)) + bytes(oob_data)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def store_bonding_configuration(max_bonding_count, policy_flags):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x02
payload = pack('<BB', max_bonding_count, policy_flags)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def use_sc_oob(enable):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.SM.value
MSG_ID = 0x11
payload = pack('<B', enable)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
| 3,700 | 1,481 |
# Generated by Django 3.2.4 on 2021-07-14 20:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0002_comments'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='uodated_at',
new_name='updated_at',
),
]
| 363 | 126 |
from django.shortcuts import redirect
from django.shortcuts import render
from django.utils import timezone
from .models import Post,views
from django.shortcuts import render, get_object_or_404
from .forms import PostForm
import youtube_dl
from django.core.files import File
from .models import FileSaver
import codecs
from django.views.static import serve
import os
from subprocess import call
import requests
from wsgiref.util import FileWrapper
from django.http import HttpResponse
def post_new(request):
v = views.objects.get(pk=1)
v.k+=1
v.save()
v = v.k
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
l=post.text
f=post.format
ydl_opts = {}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(l, download=False)
download_target = ydl.prepare_filename(info)
a=download_target
print(a)
b=download_target[-3:]
print(b)
if (b=="mp4" or b=="mkv"):
a=download_target[:-3]
print(a)
else:
a=download_target[:-4]
print("else",a)
print(a)
if f=="1":
url="youtube-dl --extract-audio --audio-format mp3 "+l
a+="mp3"
ct='audio/mp3'
command = url
call(command.split(), shell=False)
if f=="2":
url="youtube-dl -f bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4 "+l
command = url
call(command.split(), shell=False)
a+="mp4"
print("f=2 mp4 video",a)
ct='video/mp4'
if f=="3":
url="youtube-dl -f bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4 "+l
a+="mp4"
ct='video/mkv'
command = url
call(command.split(), shell=False)
print(a)
filepath = a
wrapper = FileWrapper(open(filepath, 'rb'))
response = HttpResponse(wrapper, content_type=ct)
response['Content-Disposition'] = 'attachment; filename=%s' % os.path.basename(filepath)
response['Content-Length'] = os.path.getsize(filepath)
response['Set-Cookie'] = 'fileDownload=true; Path=/'
return response
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form,'page_views':v})
| 2,849 | 813 |
"""Small helper classes for testing."""
import typing as tp
import plumbum as pb
from benchbuild.source import Variant, FetchableSource
class BBTestSource(FetchableSource):
"""Source test fixture class."""
test_versions: tp.List[str]
def __init__(
self, test_versions: tp.List[str], local: str,
remote: tp.Union[str, tp.Dict[str, str]]
):
super().__init__(local, remote)
self.test_versions = test_versions
@property
def local(self) -> str:
return "test_source"
@property
def remote(self) -> tp.Union[str, tp.Dict[str, str]]:
return "test_remote"
@property
def default(self) -> Variant:
return Variant(owner=self, version=self.test_versions[0])
# pylint: disable=unused-argument,no-self-use
def version(self, target_dir: str, version: str) -> pb.LocalPath:
return pb.local.path('.') / f'varats-test-{version}'
def versions(self) -> tp.Iterable[Variant]:
return [Variant(self, v) for v in self.test_versions]
| 1,042 | 350 |
'''
If you find this useful, please give a thumbs up!
Thanks!
- Claire & Alhan
https://github.com/alhankeser/kaggle-petfinder
'''
# External libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import RandomOverSampler
from sklearn.model_selection import train_test_split
from mlxtend.feature_selection import SequentialFeatureSelector as sfs
from sklearn.metrics import make_scorer
# from sklearn.metrics import accuracy_score
# from sklearn.metrics import confusion_matrix
import scipy.stats as stats
import math
import time
import traceback
import warnings
import os
import zipfile
import shutil
import sys
import json
# Options
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 200)
warnings.filterwarnings(action="ignore")
class Explore:
def get_dtype(cls, include_type=[], exclude_type=[]):
df = cls.get_df('train')
df.drop(columns=[cls.target_col], inplace=True)
return df.select_dtypes(include=include_type, exclude=exclude_type)
def get_non_numeric(cls):
return cls.get_dtype(exclude_type=['float64', 'int', 'float32'])
def get_numeric(cls):
return cls.get_dtype(exclude_type=['object', 'category'])
def get_categorical(cls, as_df=False):
return cls.get_dtype(include_type=['object'])
def get_correlations(cls, method='spearman'):
df = cls.get_df('train')
corr_mat = df.corr(method=method)
corr_mat.sort_values(cls.target_col, inplace=True)
corr_mat.drop(cls.target_col, inplace=True)
return corr_mat[[cls.target_col]]
def get_skewed_features(cls, df, features, skew_threshold=0.4):
feat_skew = pd.DataFrame(
{'skew': df[features].apply(lambda x: stats.skew(x))})
skewed = feat_skew[abs(feat_skew['skew']) > skew_threshold].index
return skewed.values
def show_boxplot(cls, x, y, **kwargs):
sns.boxplot(x=x, y=y)
x = plt.xticks(rotation=90)
def plot_categorical(cls, df, cols):
target = cls.target_col
categorical = pd.melt(df, id_vars=[target],
value_vars=cols)
grouped = categorical.groupby(['value', 'variable'],
as_index=False)[target]\
.mean().rename(columns={target: target + '_Mean'})
categorical = pd.merge(categorical, grouped, how='left',
on=['variable', 'value'])\
.sort_values(target + '_Mean')
facet_grid = sns.FacetGrid(categorical, col="variable",
col_wrap=3, size=5,
sharex=False, sharey=False,)
facet_grid = facet_grid.map(cls.show_boxplot, "value", target)
plt.savefig('boxplots.png')
class Clean:
def sample_ros(cls, df):
if df.name == 'train':
X = df.drop(cls.target_col, axis=1)
y = df[cls.target_col]
ros = RandomOverSampler(sampling_strategy='minority',
random_state=1)
X_ros, y_ros = ros.fit_sample(X, y)
df = pd.DataFrame(list(X_ros),
columns=df.drop(cls.target_col, axis=1)
.columns)
df[cls.target_col] = list(y_ros)
return df
def sample(cls, df, target_val_sets):
if df.name == 'train':
for target_val_set in target_val_sets:
df_class_0 = df[df[cls.target_col] == target_val_set[0]]
count_1 = df[cls.target_col].value_counts()[target_val_set[1]]
df_class_0_sampled = df_class_0.sample(count_1,
replace='True',
random_state=1)
df = pd.merge(df.drop(df_class_0.index),
df_class_0_sampled, how='outer')
return df
def keep_only_keep(cls, df):
to_drop = set(df.columns.values) - set(cls.keep)
if df.name == 'train':
to_drop = to_drop - set([cls.target_col])
to_drop = list(to_drop)
df.drop(to_drop, axis=1, inplace=True)
return df
def remove_outliers(cls, df):
if df.name == 'train':
# GrLivArea (1299 & 524)
# df.drop(df[(df['GrLivArea'] > 4000) &
# (df[cls.target_col] < 300000)].index,
# inplace=True)
pass
return df
def fill_by_type(cls, x, col):
if pd.isna(x):
if col.dtype == 'object':
return 0
return 0
return x
def fill_na(cls, df):
for col in df.columns:
df[col] = df[col].apply(lambda x: cls.fill_by_type(x, df[col]))
return df
def get_encoding_lookup(cls, cols):
df = cls.get_df('train')
target = cls.target_col
suffix = '_E'
result = pd.DataFrame()
for cat_feat in cols:
cat_feat_target = df[[cat_feat, target]].groupby(cat_feat)
cat_feat_encoded_name = cat_feat + suffix
order = pd.DataFrame()
order['val'] = df[cat_feat].unique()
order.index = order.val
order.drop(columns=['val'], inplace=True)
order[target + '_mean'] = cat_feat_target[[target]].median()
order['feature'] = cat_feat
order['encoded_name'] = cat_feat_encoded_name
order = order.sort_values(target + '_mean')
order['num_val'] = range(1, len(order)+1)
result = result.append(order)
result.reset_index(inplace=True)
return result
def get_scaled_categorical(cls, encoding_lookup):
scaled = encoding_lookup.copy()
target = cls.target_col
for feature in scaled['feature'].unique():
values = scaled[scaled['feature'] == feature]['num_val'].values
medians = scaled[
scaled['feature'] == feature][target + '_mean'].values
for median in medians:
scaled_value = ((values.min() + 1) *
(median / medians.min()))-1
scaled.loc[(scaled['feature'] == feature) &
(scaled[target + '_mean'] == median),
'num_val'] = scaled_value
return scaled
def encode_with_lookup(cls, df, encoding_lookup):
for encoded_index, encoded_row in encoding_lookup.iterrows():
feature = encoded_row['feature']
encoded_name = encoded_row['encoded_name']
value = encoded_row['val']
encoded_value = encoded_row['num_val']
df.loc[df[feature] == value, encoded_name] = encoded_value
return df
def encode_onehot(cls, df, cols):
df = pd.concat([df, pd.get_dummies(df[cols], drop_first=True)], axis=1)
return df
def encode_categorical(cls, df, cols=[], method='one_hot'):
if len(cols) == 0:
cols = cls.get_categorical().columns.values
if method == 'target_mean':
encoding_lookup = cls.get_encoding_lookup(cols)
encoding_lookup = cls.get_scaled_categorical(encoding_lookup)
df = cls.encode_with_lookup(df, encoding_lookup)
if method == 'one_hot':
if len(set(cols) - set(cls.get_dtype(include_type=['object'])
.columns.values)) > 0:
for col in cols:
df[col] = df[col].apply(lambda x: str(x))
df = cls.encode_onehot(df, cols)
df.drop(cols, axis=1, inplace=True)
return df
def fix_zero_infinity(cls, x):
if (x == 0) or math.isinf(x):
return 0
return x
def normalize_features(cls, df, cols=[]):
if len(cols) == 0:
cols = cls.get_numeric().columns.values
for col in cols:
if col in df.columns:
df[col] = df[col].apply(lambda x:
np.log1p(x).astype('float64'))
df[col] = df[col].apply(lambda x: cls.fix_zero_infinity(x))
return df
def scale_quant_features(cls, df, cols):
scaler = StandardScaler()
scaler.fit(df[cols])
scaled = scaler.transform(df[cols])
for i, col in enumerate(cols):
df[col] = scaled[:, i]
return df
def drop_ignore(cls, df):
for col in cls.ignore:
try:
df.drop(col, axis=1, inplace=True)
except Exception:
pass
return df
def drop_low_corr(cls, df, threshold=0.12):
to_drop = pd.DataFrame(columns=['drop'])
corr_mat = cls.get_correlations()
target = cls.target_col
to_drop['drop'] = corr_mat[(abs(corr_mat[target]) <= threshold)].index
df.drop(to_drop['drop'], axis=1, inplace=True)
return df
class Engineer:
def get_image_data(cls, json_path):
image_data = False
if os.path.isfile(json_path):
with open(json_path) as f:
try:
image_data = pd.DataFrame(
json.load(f)['labelAnnotations'])
except Exception:
pass
return image_data
def calculate_photo_scores(cls, df, x, match='exact',
start=1, stop=2):
try:
pet_id = x
pet_type = df[df['PetID'] == pet_id]['Type'].values[0]
pet_type_dict = {1: 'dog', 2: 'cat'}
pet_type = pet_type_dict[pet_type]
scores = []
score = 0
i = start
while (i > 0) & (i < stop):
json_path = path + '/input/train_metadata/'\
+ pet_id + '-' + str(i) + '.json'
image_data = cls.get_image_data(json_path)
try:
if match == 'exact':
scores.append(
image_data[image_data['description'] ==
pet_type]['score'].values[0])
except Exception:
scores.append(.0)
break
i += 1
try:
score = np.array(scores)
except Exception:
pass
except Exception:
print('########## calculate_photo_scores')
print(pet_id)
return score
def rate_first_photo(cls, x):
try:
score = x['AllPhotoScores'][0]
except Exception:
return 'Not Great'
pet_type = x['Type']
if pet_type == 1:
good_threshold = 0.96
if pet_type == 2:
good_threshold = 0.99
if score > good_threshold:
return 'Good'
if (score < good_threshold) & (score > .5):
return 'Okay'
return 'Not Great'
def rate_secondary_good_photos(cls, x):
count = 0
pet_type = x['Type']
scores = x['AllPhotoScores']
if pet_type == 1:
good_threshold = 0.96
if pet_type == 2:
good_threshold = 0.99
try:
scores = scores[1:]
count = len(scores[scores > good_threshold])
except Exception:
pass
if count > 2:
return 'Good'
if count > 0:
return 'Okay'
return 'Not Great'
def get_photo_scores(cls, df):
try:
df['AllPhotoScores'] = df['PetID']\
.apply(lambda x:
cls.calculate_photo_scores(df,
x, match='exact',
start=1, stop=99))
df['FirstPhotoScore'] = df[['Type', 'AllPhotoScores']]\
.apply(lambda x: cls.rate_first_photo(x), axis=1)
df['SecondaryPhotoScore'] = df[['AllPhotoScores', 'Type']]\
.apply(lambda x: cls.rate_secondary_good_photos(x), axis=1)
except Exception:
print('########## get_photo_scores')
print(df.head())
return df
def get_top_rescuers(cls, x, top_rescuers):
if x in top_rescuers:
return x
return False
def rescuer(cls, df):
top_rescuers = list(df['RescuerID'].value_counts().index[:5])
df['Big_Rescuer'] = df['RescuerID']\
.apply(lambda x: cls.get_top_rescuers(x, top_rescuers))
return df
def fee(cls, df):
df.loc[df['Fee'] > 0, 'Has_Fee'] = True
df.loc[df['Fee'] == 0, 'Has_Fee'] = False
return df
def photo(cls, df):
df.loc[df['PhotoAmt'] > 1, 'Has_2Photos'] = True
df.loc[df['PhotoAmt'] < 2, 'Has_2Photos'] = False
# df.loc[df['VideoAmt'] > 0, 'Has_Video'] = True
# df.loc[df['VideoAmt'] == 0, 'Has_Video'] = False
return df
def simplify_name_length(cls, x):
length = len(str(x))
if length < 3:
return 'short'
# if length < 20:
# return 'medium'
# if length > 19:
# return 'long'
return 'long'
def name_length(cls, df):
df['NameLength'] = df['Name']\
.apply(lambda x: cls.simplify_name_length(x))
return df
def get_name_groups(cls, df):
names = {}
names_by_count = df[df['Type'] == 1]['Name']\
.value_counts().index.tolist()
top5 = [a.lower() for a in names_by_count[:5]]
top30 = [a.lower() for a in names_by_count[:30]]
rest = [a.lower() for a in names_by_count[:]]
names['dog'] = {
'top5': top5,
'top30': top30,
'rest': rest
}
names_by_count = df[df['Type'] == 2]['Name']\
.value_counts().index.tolist()
top5 = [a.lower() for a in names_by_count[:5]]
top30 = [a.lower() for a in names_by_count[:30]]
rest = [a.lower() for a in names_by_count[:]]
names['cat'] = {
'top5': top5,
'top30': top30,
'rest': rest
}
return names
def simplify_names(cls, x, names):
x = str(x)
x = x.lower()
if 'nan' in x:
return 'NAN'
if x in names['top5']:
return 'top5'
# if x in names['top30']:
# return 'top30'
# if '&' in x:
# return 'and'
if x in names['rest']:
return 'rest'
def names(cls, df):
names = cls.get_name_groups(df)
df.loc[df['Type'] == 1, 'NameGroup'] = df[df['Type'] == 1]['Name']\
.apply(lambda x: cls.simplify_names(x, names['dog']))
df.loc[df['Type'] == 2, 'NameGroup'] = df[df['Type'] == 2]['Name']\
.apply(lambda x: cls.simplify_names(x, names['cat']))
return df
def color(cls, df):
df.loc[(df['Color3'] > 0) | (df['Color2'] > 0),
'Mixed_Color'] = True
df.loc[(df['Color3'] == 0) | (df['Color2'] == 0),
'Mixed_Color'] = False
return df
def simplify_quantity(cls, df):
bins = (0, 1, 10, 100)
group_names = ['solo', 'litter', 'herd']
categories = pd.cut(df['Quantity'], bins, labels=group_names)
return categories
def quantity(cls, df):
df.loc[df['Quantity'] == 0, 'Is_Solo'] = True
df.loc[df['Quantity'] > 0, 'Is_Solo'] = False
return df
def gender(cls, df):
df.loc[(df['Gender'] == 3) &
(df['Quantity'] == 2), 'Gender'] = 1.5
df.loc[(df['Gender'] == 3) &
(df['Quantity'] > 2), 'Gender'] = 0
return df
def breed(cls, df):
# df.loc[df['Breed2'] > 0, 'Mixed_Breed'] = True
# df.loc[df['Breed2'] == 0, 'Mixed_Breed'] = False
df.loc[df['Breed1'] == 307, 'Mixed_Breed'] = True
df.loc[df['Breed1'] != 307, 'Mixed_Breed'] = False
return df
def numerize_features(cls, df, cols):
train, test = cls.get_dfs()
df_combined = pd.concat([train[cols], test[cols]])
train.drop(cls.target_col, axis=1, inplace=True)
for feature in cols:
le = LabelEncoder()
df_combined[feature] = df_combined[feature].apply(lambda x: str(x))
df[feature] = df[feature].apply(lambda x: str(x))
le = le.fit(df_combined[feature])
df[feature] = le.transform(df[feature])
return df
def simplify_ages(cls, df, animal):
if animal == 'dog':
bins = (-1, 0, 2, 256)
group_names = ['baby', 'child', 'adult']
categories = pd.cut(df[df['Type'] == 1]['Age'], bins,
labels=group_names)
if animal == 'cat':
bins = (-1, 4, 256)
group_names = ['baby', 'adult']
categories = pd.cut(df[df['Type'] == 2]['Age'], bins,
labels=group_names)
return categories
def age(cls, df):
df.loc[df['Type'] == 1, 'AgeGroup'] = cls.simplify_ages(df, 'dog')
df.loc[df['Type'] == 2, 'AgeGroup'] = cls.simplify_ages(df, 'cat')
df.drop('Age', axis=1, inplace=True)
return df
def sum_features(cls, df, col_sum):
for col_set in col_sum:
f_name = '__'.join(col_set[:])
df[f_name] = df[[*col_set]].sum(axis=1)
df.drop(col_set, axis=1, inplace=True)
return df
def combine_features(cls, row, col_set):
result = ''
for col in col_set:
if result != '':
result += '_'
result += str(row[col])
return result
def combine(cls, df, col_sets):
for col_set in col_sets:
f_name = '__'.join(col_set[:])
df[f_name] = df.apply(lambda x: cls.combine_features(x, col_set),
axis=1)
df.drop(col_set, axis=1, inplace=True)
return df
def multiply_features(cls, df, feature_sets):
for feature_set in feature_sets:
# multipled_name = '_x_'.join(feature_set[:])
# df.drop(feature_set, axis=1, inplace=True)
pass
return df
class Model:
def forward_selection(cls, df, features_count=1):
if df.name == 'train':
qwk_scorer = make_scorer(cls.quadratic_weighted_kappa,
greater_is_better=True)
model = RandomForestClassifier(n_estimators=100, n_jobs=-1)
X = df.drop('AdoptionSpeed', axis=1)
y = df['AdoptionSpeed']
X_train, X_test,\
y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=42)
y_train = y_train.ravel()
y_test = y_test.ravel()
sfs1 = sfs(model,
k_features=3,
forward=True,
floating=False,
verbose=2,
scoring=qwk_scorer,
cv=5)
sfs1 = sfs1.fit(X_train, y_train)
best_cols = list(sfs1.k_feature_idx_)
return best_cols
def confusion_matrix(cls, rater_a, rater_b,
min_rating=None, max_rating=None):
"""
https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/quadratic_weighted_kappa.py
Returns the confusion matrix between rater's ratings
"""
assert(len(rater_a) == len(rater_b))
if min_rating is None:
min_rating = min(rater_a + rater_b)
if max_rating is None:
max_rating = max(rater_a + rater_b)
num_ratings = int(max_rating - min_rating + 1)
conf_mat = [[0 for i in range(num_ratings)]
for j in range(num_ratings)]
for a, b in zip(rater_a, rater_b):
conf_mat[a - min_rating][b - min_rating] += 1
return conf_mat
def histogram(cls, ratings, min_rating=None, max_rating=None):
"""
https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/quadratic_weighted_kappa.py
Returns the counts of each type of rating that a rater made
"""
if min_rating is None:
min_rating = min(ratings)
if max_rating is None:
max_rating = max(ratings)
num_ratings = int(max_rating - min_rating + 1)
hist_ratings = [0 for x in range(num_ratings)]
for r in ratings:
hist_ratings[r - min_rating] += 1
return hist_ratings
def quadratic_weighted_kappa(cls, rater_a, rater_b,
min_rating=0, max_rating=4):
"""
https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/quadratic_weighted_kappa.py
Calculates the quadratic weighted kappa
quadratic_weighted_kappa calculates the quadratic weighted kappa
value, which is a measure of inter-rater agreement between two raters
that provide discrete numeric ratings. Potential values range from -1
(representing complete disagreement) to 1 (representing complete
agreement). A kappa value of 0 is expected if all agreement is due to
chance.
quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b
each correspond to a list of integer ratings. These lists must have the
same length.
The ratings should be integers, and it is assumed that they contain
the complete range of possible ratings.
quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating
is the minimum possible rating, and max_rating is the maximum possible
rating
"""
rater_a = np.array(rater_a, dtype=int)
rater_b = np.array(rater_b, dtype=int)
assert(len(rater_a) == len(rater_b))
if min_rating is None:
min_rating = min(min(rater_a), min(rater_b))
if max_rating is None:
max_rating = max(max(rater_a), max(rater_b))
conf_mat = cls.confusion_matrix(rater_a, rater_b,
min_rating, max_rating)
num_ratings = len(conf_mat)
num_scored_items = float(len(rater_a))
hist_rater_a = cls.histogram(rater_a, min_rating, max_rating)
hist_rater_b = cls.histogram(rater_b, min_rating, max_rating)
numerator = 0.0
denominator = 0.0
for i in range(num_ratings):
for j in range(num_ratings):
expected_count = (hist_rater_a[i] * hist_rater_b[j]
/ num_scored_items)
d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)
numerator += d * conf_mat[i][j] / num_scored_items
denominator += d * expected_count / num_scored_items
return 1.0 - numerator / denominator
def fix_shape(cls, df):
df_name = df.name
if df_name == 'train':
cols_to_add = set(cls.get_df('test').columns.values) -\
set(df.drop(cls.target_col, axis=1).columns.values)
if df_name == 'test':
cols_to_add = set(cls.get_df('train').drop(cls.target_col, axis=1)
.columns.values) - set(df.columns.values)
cols_to_add = np.array(list(cols_to_add))
cols_to_add = np.append(cols_to_add, df.columns.values)
df = df.reindex(columns=cols_to_add, fill_value=0)
df.name = df_name
return df
def cross_validate(cls, model, parameters):
train, test = cls.get_dfs()
# TODO: check if there are lists in parameters to run gridsearch
if len(train.drop(cls.target_col,
axis=1).columns) != len(test.columns):
cls.mutate(cls.fix_shape)
train = cls.get_df('train')
scores = np.array([])
skf = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)
X = train.drop(columns=[cls.target_col])
y = train[cls.target_col]
for train_index, test_index in skf.split(X, y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
cv_model = model(**parameters)
cv_model.fit(X_train, y_train)
X_predictions = cv_model.predict(X_test)
score = cls.quadratic_weighted_kappa(y_test, X_predictions, 0, 4)
scores = np.append(scores, score)
score = np.round(scores.mean(), decimals=5)
return score
def fit(cls, model, parameters):
train = cls.get_df('train')
X = train.drop(columns=[cls.target_col])
y = train[cls.target_col]
model = model(**parameters)
model.fit(X, y)
return model
def predict(cls, model):
test = cls.get_df('test')
predictions = model.predict(test)
return predictions
def save_predictions(cls, predictions, score=0, id_col=False):
now = str(time.time()).split('.')[0]
df = cls.get_df('test', False, True)
target = cls.target_col
if not id_col:
id_col = df.columns[0]
df[target] = predictions
if not os.path.exists(path + '/output'):
os.makedirs(path + '/output')
if os.path.exists(path + '/output'):
df[[id_col,
target]].to_csv(path + '/output/submit__'
+ str(int(score * 100000))
+ '__' + now +
'.csv', index=False)
df[[id_col, target]].to_csv('submission.csv', index=False)
class Data(Explore, Clean, Engineer, Model):
def __init__(self, train_csv, test_csv, target='',
ignore=[], keep=[], col_sum=[]):
'''Create pandas DataFrame objects for train and test data.
Positional arguments:
train_csv -- relative path to training data in csv format.
test_csv -- relative path to test data in csv format.
Keyword arguments:
target -- target feature column name in training data.
ignore -- columns names in list to ignore during analyses.
'''
self.__train = pd.read_csv(train_csv)
self.__test = pd.read_csv(test_csv)
self.__train.name, self.__test.name = self.get_df_names()
self.target_col = target
self.ignore = ignore
self.keep = keep
self.col_sum = col_sum
self.__original = False
self.__log = False
self.check_in()
self.debug = False
def __str__(cls):
train_columns = 'Train: \n"' + '", "'.join(cls.__train.head(2)) + '"\n'
test_columns = 'Test: \n"' + '", "'.join(cls.__test.head(2)) + '"\n'
return train_columns + test_columns
def get_df_names(cls):
return ('train', 'test')
def get_dfs(cls, ignore=False, originals=False, keep=False):
train, test = (cls.__train.copy(),
cls.__test.copy())
if originals:
train, test = (cls.__original)
if ignore:
train, test = (train.drop(columns=cls.ignore),
test.drop(columns=cls.ignore))
if keep:
train, test = (train[cls.keep],
test[cls.keep])
train.name, test.name = cls.get_df_names()
return (train, test)
def get_df(cls, name, ignore=False, original=False, keep=False):
train, test = cls.get_dfs(ignore, original, keep)
if name == 'train':
return train
if name == 'test':
return test
def log(cls, entry=False, status=False):
if cls.__log is False:
cls.__log = pd.DataFrame(columns=['entry', 'status'])
log_entry = pd.DataFrame({'entry': entry, 'status': status}, index=[0])
cls.__log = cls.__log.append(log_entry, ignore_index=True)
if status == 'Fail':
cls.rollback()
else:
cls.check_out()
if cls.debug:
cls.print_log()
def print_log(cls):
print(cls.__log)
def check_in(cls):
cls.__current = cls.get_dfs()
if cls.__original is False:
cls.__original = cls.__current
def check_out(cls):
cls.__previous = cls.__current
cls.__train.name, cls.__test.name = cls.get_df_names()
def rollback(cls):
try:
cls.__train, cls.__test = cls.__previous
status = 'Success - To Previous'
except Exception:
cls.__train, cls.__test = cls.__original
status = 'Success - To Original'
cls.log('rollback', status)
def reset(cls):
cls.__train, cls.__test = cls.__original
cls.log('reset', 'Success')
def update_dfs(cls, train, test):
train.name, test.name = cls.get_df_names()
cls.__train = train
cls.__test = test
def mutate(cls, mutation, *args):
'''Make changes to both train and test DataFrames.
Positional arguments:
mutation -- function to pass both train and test DataFrames to.
*args -- arguments to pass to the function, following each DataFrame.
Example usage:
def multiply_column_values(df, col_name, times=10):
#do magic...
Data.mutate(multiply_column_values, 'Id', 2)
'''
cls.check_in()
try:
train = mutation(cls.get_df('train'), *args)
test = mutation(cls.get_df('test'), *args)
cls.update_dfs(train, test)
status = 'Success'
except Exception:
print(traceback.print_exc())
status = 'Fail'
cls.log(mutation.__name__, status)
def run(d, model, parameters):
mutate = d.mutate
# mutate(d.sample, [[0, 1]])
# mutate(d.sample_ros)
# print(d.get_df('train')['AdoptionSpeed'].value_counts())
mutate(d.get_photo_scores)
# mutate(d.rescuer)
# mutate(d.age)
# mutate(d.gender)
# mutate(d.quantity)
# mutate(d.names)
# mutate(d.name_length)
# mutate(d.color)
# mutate(d.breed)
# mutate(d.fee)
# mutate(d.photo)
# mutate(d.sum_features, d.col_sum)
mutate(d.combine, [
# ['Breed1', 'Breed2'],
# ['Color1', 'Color2']
])
# mutate(d.fill_na)
mutate(d.numerize_features, [
# 'Breed1',
# 'Color1__Color2'
])
mutate(d.encode_categorical, [
'Type',
# 'AgeGroup',
# 'NameLength',
# 'Is_Solo',
# 'Has_2Photos',
'FirstPhotoScore',
'SecondaryPhotoScore'
])
mutate(d.drop_ignore)
# best_features = d.forward_selection(d.get_df('train'), 5)
# print('Best Features', best_features)
# sys.exit()
score = d.cross_validate(model, parameters)
print('Score: ', score)
print(d.get_df('train').head(2))
model = d.fit(model, parameters)
predictions = d.predict(model)
d.print_log()
return (predictions, score)
path = '.'
if os.getcwd().split('/')[1] == 'kaggle':
path = '..'
zip_files = list(filter(lambda x: '.zip' in x, os.listdir(path + '/input/')))
def unzip(file):
to_unzip = path + '/input/' + file
destination = path + '/input/' + file.split('.')[0]
with zipfile.ZipFile(to_unzip, 'r') as zip_ref:
zip_ref.extractall(destination)
def move_zips(move_from, move_to):
zip_files = list(filter(lambda x: '.zip' in x, os.listdir(move_from)))
if not os.path.exists(move_to):
os.makedirs(move_to)
for file in zip_files:
shutil.move(move_from + file, move_to + file)
if len(zip_files) > 0:
for file in zip_files:
unzip(file)
move_zips(path + '/input/', path + '/input/source_zips/')
model = RandomForestClassifier
parameters = {
'n_estimators': 100,
}
cols_to_ignore = ['PetID',
'RescuerID',
'Description',
'Name',
# 'Type',
'Age',
'Breed1',
'Breed2',
'Gender',
'Color1',
'Color2',
'Color3',
'MaturitySize',
'FurLength',
'Vaccinated',
'Dewormed',
'Sterilized',
'Health',
'Quantity',
'Fee',
'State',
'VideoAmt',
'PhotoAmt',
# Custom:
'AllPhotoScores',
]
id_col = 'PetID'
d = Data(path + '/input/train/train.csv',
path + '/input/test/test.csv',
'AdoptionSpeed',
ignore=cols_to_ignore)
predictions, score = run(d, model, parameters)
d.save_predictions(predictions, score, id_col)
| 33,366 | 10,710 |
from tkinter import messagebox
import os
import itertools
from datetime import datetime
import shutil
import pickle
from bin.fileutils import human_filesize, get_directory_size
from bin.color import bcolor
from bin.threadmanager import ThreadManager
from bin.config import Config
from bin.status import Status
class Backup:
def __init__(self, config, backup_config_dir, backup_config_file, do_copy_fn, do_del_fn, start_backup_timer_fn, update_file_detail_list_fn, analysis_summary_display_fn, display_backup_command_info_fn, thread_manager, update_ui_component_fn=None, uicolor=None, progress=None):
"""Configure a backup to be run on a set of drives.
Args:
config (dict): The backup config to be processed.
backup_config_dir (String): The directory to store backup configs on each drive.
backup_config_file (String): The file to store backup configs on each drive.
do_copy_fn (def): The function to be used to handle file copying. TODO: Move do_copy_fn outside of Backup class.
do_del_fn (def): The function to be used to handle file copying. TODO: Move do_del_fn outside of Backup class.
start_backup_timer_fn (def): The function to be used to start the backup timer.
update_ui_component_fn (def): The function to be used to update UI components (default None).
update_file_detail_list_fn (def): The function to be used to update file lists.
analysis_summary_display_fn (def): The function to be used to show an analysis
summary.
display_backup_command_info_fn (def): The function to be used to enumerate command info
in the UI.
thread_manager (ThreadManager): The thread manager to check for kill flags.
uicolor (Color): The UI color instance to reference for styling (default None). TODO: Move uicolor outside of Backup class
progress (Progress): The progress tracker to bind to.
"""
self.totals = {
'master': 0,
'delete': 0,
'delta': 0,
'running': 0,
'buffer': 0,
'progressBar': 0
}
self.confirm_wipe_existing_drives = False
self.analysis_valid = False
self.analysis_started = False
self.analysis_running = False
self.backup_running = False
self.backup_start_time = 0
self.command_list = []
self.delete_file_list = {}
self.replace_file_list = {}
self.new_file_list = {}
self.config = config
self.DRIVE_VID_INFO = {drive['vid']: drive for drive in config['destinations']}
self.SHARE_NAME_PATH_INFO = {share['dest_name']: share['path'] for share in config['sources']}
self.BACKUP_CONFIG_DIR = backup_config_dir
self.BACKUP_CONFIG_FILE = backup_config_file
self.BACKUP_HASH_FILE = 'hashes.pkl'
self.CLI_MODE = self.config['cli_mode']
self.file_hashes = {drive['name']: {} for drive in self.config['destinations']}
self.uicolor = uicolor
self.do_copy_fn = do_copy_fn
self.do_del_fn = do_del_fn
self.start_backup_timer_fn = start_backup_timer_fn
self.update_ui_component_fn = update_ui_component_fn
self.update_file_detail_list_fn = update_file_detail_list_fn
self.analysis_summary_display_fn = analysis_summary_display_fn
self.display_backup_command_info_fn = display_backup_command_info_fn
self.thread_manager = thread_manager
self.progress = progress
def sanity_check(self):
"""Check to make sure everything is correct before a backup.
Before running a backup, or an analysis, both shares and drives need to be
selected, and the drive space on selected drives needs to be larger than the
total size of the selected shares.
Returns:
bool: True if conditions are good, False otherwise.
"""
if len(self.config['destinations']) > 0 and len(self.config['sources']) > 0:
share_total = 0
drive_total = 0
# Shares and destinations need identifiers
if self.config['source_mode'] in [Config.SOURCE_MODE_MULTI_DRIVE, Config.SOURCE_MODE_MULTI_PATH] and [share for share in self.config['sources'] if not share['dest_name']]:
return False
if self.config['dest_mode'] == Config.DEST_MODE_PATHS and [drive for drive in self.config['destinations'] if not drive['vid']]:
return False
shares_known = True
for share in self.config['sources']:
if share['size'] is None:
shares_known = False
break
# Add total space of selection
share_total += share['size']
drive_total = sum([drive['capacity'] for drive in self.config['destinations']])
config_total = drive_total + sum([size for drive, size in self.config['missing_drives'].items()])
if shares_known and ((len(self.config['missing_drives']) == 0 and share_total < drive_total) or (share_total < config_total and self.config['splitMode'])):
# Sanity check pass if more drive selected than shares, OR, split mode and more config drives selected than shares
selected_new_drives = [drive['name'] for drive in self.config['destinations'] if drive['hasConfig'] is False]
if not self.confirm_wipe_existing_drives and len(selected_new_drives) > 0:
drive_string = ', '.join(selected_new_drives[:-2] + [' and '.join(selected_new_drives[-2:])])
new_drive_confirm_title = f"New drive{'s' if len(selected_new_drives) > 1 else ''} selected"
new_drive_confirm_message = f"Drive{'s' if len(selected_new_drives) > 1 else ''} {drive_string} appear{'' if len(selected_new_drives) > 1 else 's'} to be new. Existing data will be deleted.\n\nAre you sure you want to continue?"
self.confirm_wipe_existing_drives = messagebox.askyesno(new_drive_confirm_title, new_drive_confirm_message)
return self.confirm_wipe_existing_drives
return True
return False
def get_share_source_path(self, share):
"""Convert a share name into a share path.
Args:
share (String): The share to convert.
Returns:
String: The source path for the given share.
"""
share_base = share.split(os.path.sep)[0]
share_slug = share[len(share_base):].strip(os.path.sep)
share_base_path = self.SHARE_NAME_PATH_INFO[share_base]
share_full_path = os.path.join(share_base_path, share_slug).strip(os.path.sep)
return share_full_path
# IDEA: When we ignore other stuff on the drives, and delete it, have a dialog popup that summarizes what's being deleted, and ask the user to confirm
def analyze(self):
"""Analyze the list of selected shares and drives and figure out how to split files.
Args:
shares (dict[]): The list of selected shares.
shares.name (String): The name of the share.
shares.size (int): The size in bytes of the share.
drives (tuple(String)): The list of selected drives.
This function is run in a new thread, but is only run if the backup config is valid.
If sanity_check() returns False, the analysis isn't run.
"""
# Sanity check for space requirements
if not self.sanity_check():
return
self.analysis_running = True
self.analysis_started = True
if not self.CLI_MODE:
self.progress.start_indeterminate()
self.update_ui_component_fn(Status.UPDATEUI_STATUS_BAR, Status.BACKUP_ANALYSIS_RUNNING)
self.update_ui_component_fn(Status.UPDATEUI_BACKUP_BTN, {'state': 'disable'})
self.update_ui_component_fn(Status.UPDATEUI_ANALYSIS_START)
self.update_ui_component_fn(Status.LOCK_TREE_SELECTION)
share_info = {share['dest_name']: share['size'] for share in self.config['sources']}
all_share_info = {share['dest_name']: share['size'] for share in self.config['sources']}
# Get hash list for all drives
bad_hash_files = []
self.file_hashes = {drive['name']: {} for drive in self.config['destinations']}
special_ignore_list = [self.BACKUP_CONFIG_DIR, '$RECYCLE.BIN', 'System Volume Information']
for drive in self.config['destinations']:
drive_hash_file_path = os.path.join(drive['name'], self.BACKUP_CONFIG_DIR, self.BACKUP_HASH_FILE)
if os.path.isfile(drive_hash_file_path):
write_trimmed_changes = False
with open(drive_hash_file_path, 'rb') as f:
try:
# Load hash list, and filter out ignored folders
hash_list = pickle.load(f)
new_hash_list = {file_name: hash_val for file_name, hash_val in hash_list.items() if file_name.split('/')[0] not in special_ignore_list}
new_hash_list = {os.path.sep.join(file_name.split('/')): hash_val for file_name, hash_val in new_hash_list.items() if os.path.isfile(os.path.join(drive['name'], file_name))}
# If trimmed list is shorter, new changes have to be written to the file
if len(new_hash_list) < len(hash_list):
write_trimmed_changes = True
self.file_hashes[drive['name']] = new_hash_list
except Exception:
# Hash file is corrupt
bad_hash_files.append(drive_hash_file_path)
# If trimmed list is different length than original, write changes to file
if write_trimmed_changes:
with open(drive_hash_file_path, 'wb') as f:
pickle.dump({'/'.join(file_name.split(os.path.sep)): hash_val for file_name, hash_val in new_hash_list.items()}, f)
else:
# Hash file doesn't exist, so create it
if not os.path.exists(os.path.join(drive['name'], self.BACKUP_CONFIG_DIR)):
os.makedirs(os.path.join(drive['name'], self.BACKUP_CONFIG_DIR))
with open(drive_hash_file_path, 'wb') as f:
pickle.dump({}, f)
# If there are missing or corrupted pickle files, write empty data
if bad_hash_files:
for file in bad_hash_files:
with open(file, 'wb') as f:
pickle.dump({}, f)
drive_info = []
drive_share_list = {}
master_drive_list = [drive for drive in self.config['destinations']]
master_drive_list.extend([{'vid': vid, 'capacity': capacity} for vid, capacity in self.config['missing_drives'].items()])
connected_vid_list = [drive['vid'] for drive in self.config['destinations']]
show_drive_info = []
for i, drive in enumerate(master_drive_list):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
drive_connected = drive['vid'] in connected_vid_list
current_drive_info = drive
current_drive_info['connected'] = drive_connected
# If drive is connected, collect info about config size and free space
if drive_connected:
current_drive_info['configSize'] = get_directory_size(os.path.join(drive['name'], self.BACKUP_CONFIG_DIR))
else:
current_drive_info['name'] = f"[{drive['vid']}]"
current_drive_info['configSize'] = 20000 # Assume 20K config size
current_drive_info['free'] = drive['capacity'] - drive['configSize']
drive_info.append(current_drive_info)
# Enumerate list for tracking what shares go where
drive_share_list[drive['vid']] = []
show_drive_info.append((current_drive_info['name'], human_filesize(drive['capacity']), drive_connected))
# For each drive, smallest first, filter list of shares to those that fit
drive_info.sort(key=lambda x: x['free'])
all_drive_files_buffer = {drive['name']: [] for drive in master_drive_list}
for i, drive in enumerate(drive_info):
# Get list of sources small enough to fit on drive
total_small_sources = {source: size for source, size in share_info.items() if size <= drive['free']}
# Since the list of files is truncated to prevent an unreasonably large
# number of combinations to check, we need to keep processing the file list
# in chunks to make sure we check if all files can be fit on one drive
sources_that_fit_on_dest = []
small_source_list = {}
processed_small_sources = []
processed_source_size = 0
while len(processed_small_sources) < len(total_small_sources):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
# Trim the list of small files to those that aren't already processed
small_source_list = {source: size for (source, size) in total_small_sources.items() if source not in processed_small_sources}
small_source_list = sorted(small_source_list.items(), key=lambda x: x[1], reverse=True)
trimmed_small_source_list = {source[0]: source[1] for source in small_source_list[:15]}
# Try every combination of sources that fit to find result that uses most of that drive
largest_sum = 0
largest_set = []
for n in range(1, len(trimmed_small_source_list) + 1):
for subset in itertools.combinations(trimmed_small_source_list.keys(), n):
combination_total = sum(trimmed_small_source_list[share] for share in subset)
if (combination_total > largest_sum and combination_total <= drive['free']):
largest_sum = combination_total
largest_set = subset
sources_that_fit_on_dest.extend([source for source in largest_set])
remaining_small_sources = {source[0]: source[1] for source in small_source_list if source not in sources_that_fit_on_dest}
processed_small_sources.extend([source for source in trimmed_small_source_list.keys()])
share_info = {share: size for (share, size) in share_info.items() if share not in sources_that_fit_on_dest}
# Subtract file size of each batch of files from the free space on the drive so the next batch sorts properly
processed_source_size += sum([source[1] for source in small_source_list if source[0] in largest_set])
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
# If not all shares fit on smallest drive at once (at least one share has to be put
# on the next largest drive), check free space on next largest drive
if len(sources_that_fit_on_dest) < len(small_source_list) and i < (len(drive_info) - 1):
not_fit_total = sum(size for size in remaining_small_sources.values())
next_drive = drive_info[i + 1]
next_drive_free_space = next_drive['free'] - not_fit_total
# If free space on next drive is less than total capacity of current drive, it
# becomes more efficient to skip current drive, and put all shares on the next
# drive instead.
# This applies only if they can all fit on the next drive. If they have to be
# split across multiple drives after moving them to a larger drive, then it's
# easier to fit what we can on the small drive, to leave the larger drives
# available for larger shares
if not_fit_total <= next_drive['free']:
total_small_share_space = sum(size for size in small_source_list.values())
if next_drive_free_space < drive['free'] and total_small_share_space <= next_drive['free']:
# Next drive free space less than total on current, so it's optimal to store on next drive instead
drive_share_list[next_drive['vid']].extend([share for share in small_source_list.keys()]) # All small shares on next drive
else:
# Better to leave on current, but overflow to next drive
drive_share_list[drive['vid']].extend(sources_that_fit_on_dest) # Shares that fit on current drive
drive_share_list[next_drive['vid']].extend([share for share in small_source_list.keys() if share not in sources_that_fit_on_dest]) # Remaining small shares on next drive
else:
# If overflow for next drive is more than can fit on that drive, ignore it, put overflow
# back in pool of shares to sort, and put small drive shares only in current drive
drive_share_list[drive['vid']].extend(sources_that_fit_on_dest) # Shares that fit on current drive
all_drive_files_buffer[drive['name']].extend([f"{drive['name']}{share}" for share in sources_that_fit_on_dest])
# Put remaining small shares back into pool to work with for next drive
share_info.update({share: size for share, size in remaining_small_sources.items()})
else:
# Fit all small shares onto drive
drive_share_list[drive['vid']].extend(sources_that_fit_on_dest)
# Calculate space used by shares, and subtract it from capacity to get free space
used_space = sum(all_share_info[share] for share in drive_share_list[drive['vid']])
drive_info[i]['free'] -= used_space
def split_share(share):
"""Recurse into a share or directory, and split the contents.
Args:
share (String): The share to split.
Returns:
dict[]: A list of shares to be split
dict.share (String): The share to split
dict.files (dict): The list of drive splits.
Key (String) is a drive volume ID,
Value (String[]) is a list of filenames for a given drive.
dict.exclusions (String[]): The list of files to exclude from the split.
"""
# Enumerate list for tracking what shares go where
drive_file_list = {drive['vid']: [] for drive in drive_info}
file_info = {}
share_path = self.get_share_source_path(share)
try:
for entry in os.scandir(share_path):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
if entry.is_file():
new_dir_size = entry.stat().st_size
elif entry.is_dir():
new_dir_size = get_directory_size(entry.path)
filename = entry.path[len(share_path):].strip(os.path.sep)
file_info[filename] = new_dir_size
except PermissionError:
pass
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
return
# For splitting shares, sort by largest free space first
drive_info.sort(reverse=True, key=lambda x: x['free'])
for i, drive in enumerate(drive_info):
# Get list of files small enough to fit on drive
total_small_files = {file: size for file, size in file_info.items() if size <= drive['free']}
# Since the list of files is truncated to prevent an unreasonably large
# number of combinations to check, we need to keep processing the file list
# in chunks to make sure we check if all files can be fit on one drive
files_that_fit_on_drive = []
small_file_list = {}
processed_small_files = []
processed_file_size = 0
while len(processed_small_files) < len(total_small_files):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
# Trim the list of small files to those that aren't already processed
small_file_list = {file: size for (file, size) in total_small_files.items() if file not in processed_small_files}
# Make sure we don't end with an unreasonable number of combinations to go through
# by sorting by largest first, and truncating
# Sorting files first, since files can't be split, so it's preferred to have directories last
file_list = {}
dir_list = {}
for file, size in small_file_list.items():
if os.path.isfile(os.path.join(share_path, file)):
file_list[file] = size
elif os.path.isdir(os.path.join(share_path, file)):
dir_list[file] = size
# Sort file list by largest first, and truncate to prevent unreasonably large number of combinations
small_file_list = sorted(file_list.items(), key=lambda x: x[1], reverse=True)
small_file_list.extend(sorted(dir_list.items(), key=lambda x: x[1], reverse=True))
trimmed_small_file_list = {file[0]: file[1] for file in small_file_list[:15]}
small_file_list = {file[0]: file[1] for file in small_file_list}
# Try every combination of shares that fit to find result that uses most of that drive
largest_sum = 0
largest_set = []
for n in range(1, len(trimmed_small_file_list) + 1):
for subset in itertools.combinations(trimmed_small_file_list.keys(), n):
combination_total = sum(trimmed_small_file_list[file] for file in subset)
if (combination_total > largest_sum and combination_total <= drive['free'] - processed_file_size):
largest_sum = combination_total
largest_set = subset
files_that_fit_on_drive.extend([file for file in largest_set])
processed_small_files.extend([file for file in trimmed_small_file_list.keys()])
file_info = {file: size for (file, size) in file_info.items() if file not in largest_set}
# Subtract file size of each batch of files from the free space on the drive so the next batch sorts properly
processed_file_size += sum([size for (file, size) in small_file_list.items() if file in largest_set])
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
# Assign files to drive, and subtract filesize from free space
# Since we're sorting by largest free space first, there's no cases to move
# to a larger drive. This means all files that can fit should be put on the
# drive they fit on.
drive_file_list[drive['vid']].extend(files_that_fit_on_drive)
drive_info[i]['free'] -= processed_file_size
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
return
share_split_summary = [{
'share': share,
'files': drive_file_list,
'exclusions': [file for file in file_info.keys()]
}]
for file in file_info.keys():
file_path = os.path.join(share, file)
share_split_summary.extend(split_share(file_path))
return share_split_summary
# For shares larger than all drives, recurse into each share
# share_info contains shares not sorted into drives
drive_exclusions = {drive['name']: [] for drive in master_drive_list}
for share in share_info.keys():
share_path = self.get_share_source_path(share)
if os.path.exists(share_path) and os.path.isdir(share_path):
summary = split_share(share)
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
# Build exclusion list for other drives\
# This is done by "inverting" the file list for each drive into a list of exclusions for other drives
for split in summary:
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
file_list = split['files']
for drive_vid, files in file_list.items():
# Add files to file list
all_drive_files_buffer[self.DRIVE_VID_INFO[drive_vid]['name']].extend(os.path.join(split['share'], file) for file in files)
# Each summary contains a split share, and any split subfolders, starting with
# the share and recursing into the directories
for split in summary:
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
share_name = split['share']
share_files = split['files']
share_exclusions = split['exclusions']
all_files = share_files.copy()
all_files['exclusions'] = share_exclusions
# For each drive, gather list of files to be written to other drives, and
# use that as exclusions
for drive_vid, files in share_files.items():
if len(files) > 0:
raw_exclusions = all_files.copy()
raw_exclusions.pop(drive_vid, None)
# Build master full exclusion list
master_exclusions = [file for file_list in raw_exclusions.values() for file in file_list]
# Remove share if excluded in parent splitting
if share_name in drive_exclusions[self.DRIVE_VID_INFO[drive_vid]['name']]:
drive_exclusions[self.DRIVE_VID_INFO[drive_vid]['name']].remove(share_name)
# Add new exclusions to list
drive_exclusions[self.DRIVE_VID_INFO[drive_vid]['name']].extend([os.path.join(share_name, file) for file in master_exclusions])
drive_share_list[drive_vid].append(share_name)
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
def recurse_file_list(directory):
"""Get a complete list of files in a directory.
Args:
directory (String): The directory to check.
Returns:
String[]: The file list.
"""
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
return []
file_list = []
try:
if len(os.scandir(directory)) > 0:
for entry in os.scandir(directory):
# For each entry, either add file to list, or recurse into directory
if entry.is_file():
file_list.append(entry.path)
elif entry.is_dir():
file_list.append(entry.path)
file_list.extend(recurse_file_list(entry.path))
else:
# No files, so append dir to list
file_list.append(entry.path)
except NotADirectoryError:
return []
except PermissionError:
return []
except OSError:
return []
except TypeError:
return []
return file_list
# For each drive in file list buffer, recurse into each directory and build a complete file list
all_drive_files = {drive['name']: [] for drive in master_drive_list}
for drive, files in all_drive_files_buffer.items():
for file in files:
all_drive_files[drive].extend(recurse_file_list(file))
def build_delta_file_list(drive, path, shares, exclusions):
"""Get lists of files to delete and replace from the destination drive, that no longer
exist in the source, or have changed.
Args:
drive (String): The drive to check.
path (String): The path to check.
shares (String[]): The list of shares to check.
exclusions (String[]): The list of files and folders to exclude.
Returns:
{
'delete' (tuple(String, int)[]): The list of files and filesizes for deleting.
'replace' (tuple(String, int, int)[]): The list of files and source/dest filesizes for replacement.
}
"""
special_ignore_list = [self.BACKUP_CONFIG_DIR, '$RECYCLE.BIN', 'System Volume Information']
file_list = {
'delete': [],
'replace': []
}
try:
shares_to_process = [share for share in shares if share == path or path.find(share + os.path.sep) == 0]
for entry in os.scandir(os.path.join(drive, path)):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
return file_list
stub_path = entry.path[len(drive):].strip(os.path.sep)
# For each entry, either add filesize to the total, or recurse into the directory
if entry.is_file():
file_stat = entry.stat()
if (stub_path.find(os.path.sep) == -1 # Files should not be on root of drive
# or not os.path.isfile(source_path) # File doesn't exist in source, so delete it
or stub_path in exclusions # File is excluded from drive
or len(shares_to_process) == 0): # File should only count if dir is share or child, not parent
file_list['delete'].append((drive, stub_path, file_stat.st_size))
self.update_file_detail_list_fn('delete', entry.path)
else: # File is in share on destination drive
target_share = max(shares_to_process, key=len)
path_slug = stub_path[len(target_share):].strip(os.path.sep)
share_path = self.get_share_source_path(target_share)
source_path = os.path.join(share_path, path_slug)
if os.path.isfile(source_path): # File exists on source
if (file_stat.st_mtime != os.path.getmtime(source_path) # Existing file is older than source
or file_stat.st_size != os.path.getsize(source_path)): # Existing file is different size than source
# If existing dest file is not same time as source, it needs to be replaced
file_list['replace'].append((drive, target_share, path_slug, os.path.getsize(source_path), file_stat.st_size))
self.update_file_detail_list_fn('copy', entry.path)
else: # File doesn't exist on source, so delete it
file_list['delete'].append((drive, stub_path, file_stat.st_size))
self.update_file_detail_list_fn('delete', entry.path)
elif entry.is_dir():
found_share = False
for item in shares:
path_slug = stub_path[len(item):].strip(os.path.sep)
share_path = self.get_share_source_path(item)
source_path = os.path.join(share_path, path_slug)
if (stub_path == item # Dir is share, so it stays
or (stub_path.find(item + os.path.sep) == 0 and os.path.isdir(source_path)) # Dir is subdir inside share, and it exists in source
or item.find(stub_path + os.path.sep) == 0): # Dir is parent directory of a share we're copying, so it stays
# Recurse into the share
new_list = build_delta_file_list(drive, stub_path, shares, exclusions)
file_list['delete'].extend(new_list['delete'])
file_list['replace'].extend(new_list['replace'])
found_share = True
break
if (not found_share or stub_path in exclusions) and stub_path not in special_ignore_list:
# Directory isn't share, or part of one, and isn't a special folder or
# exclusion, so delete it
file_list['delete'].append((drive, stub_path, get_directory_size(entry.path)))
self.update_file_detail_list_fn('delete', entry.path)
except NotADirectoryError:
return {
'delete': [],
'replace': []
}
except PermissionError:
return {
'delete': [],
'replace': []
}
except OSError:
return {
'delete': [],
'replace': []
}
return file_list
def build_new_file_list(drive, path, shares, exclusions):
"""Get lists of files to copy to the destination drive, that only exist on the
source.
Args:
drive (String): The drive to check.
path (String): The path to check.
shares (String[]): The list of shares the drive should contain.
exclusions (String[]): The list of files and folders to exclude.
Returns:
{
'new' (tuple(String, int)[]): The list of file destinations and filesizes to copy.
}
"""
def scan_share_source_for_new_files(drive, share, path, exclusions, all_shares):
"""Get lists of files to copy to the destination drive from a given share.
Args:
drive (String): The drive to check.
share (String): The share to check.
path (String): The path to check.
exclusions (String[]): The list of files and folders to exclude.
all_shares (String[]): The list of shares the drive should contain, to
avoid recursing into split shares.
Returns:
{
'new' (tuple(String, int)[]): The list of file destinations and filesizes to copy.
}
"""
file_list = {
'new': []
}
try:
share_path = self.get_share_source_path(share)
source_path = os.path.join(share_path, path)
# Check if directory has files
if len(os.listdir(source_path)) > 0:
for entry in os.scandir(source_path):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
return file_list
stub_path = entry.path[len(share_path):].strip(os.path.sep)
exclusion_stub_path = os.path.join(share, stub_path)
target_path = os.path.join(drive, share, stub_path)
# For each entry, either add filesize to the total, or recurse into the directory
if entry.is_file():
if (not os.path.isfile(target_path) # File doesn't exist in destination drive
and exclusion_stub_path not in exclusions): # File isn't part of drive exclusion
file_list['new'].append((drive, share, stub_path, entry.stat().st_size))
self.update_file_detail_list_fn('copy', target_path)
elif entry.is_dir():
# Avoid recursing into any split share directories and double counting files
if exclusion_stub_path not in all_shares:
if os.path.isdir(target_path):
# If exists on dest, recurse into it
new_list = scan_share_source_for_new_files(drive, share, stub_path, exclusions, all_shares)
file_list['new'].extend(new_list['new'])
# break
elif exclusion_stub_path not in exclusions:
# Path doesn't exist on dest, so add to list if not excluded
new_list = scan_share_source_for_new_files(drive, share, stub_path, exclusions, all_shares)
file_list['new'].extend(new_list['new'])
# break
elif not os.path.isdir(os.path.join(drive, share, path)):
# If no files in folder on source, create empty folder in destination
return {
'new': [(drive, share, path, get_directory_size(os.path.join(source_path, path)))]
}
except NotADirectoryError:
return {
'new': []
}
except PermissionError:
return {
'new': []
}
except OSError:
return {
'new': []
}
return file_list
file_list = {
'new': []
}
for share in shares:
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
file_list['new'].extend(scan_share_source_for_new_files(drive, share, path, exclusions, shares)['new'])
return file_list
# Build list of files/dirs to delete and replace
self.delete_file_list = {}
self.replace_file_list = {}
self.new_file_list = {}
purge_command_list = []
copy_command_list = []
display_purge_command_list = []
display_copy_command_list = []
for drive, shares in drive_share_list.items():
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
modified_file_list = build_delta_file_list(self.DRIVE_VID_INFO[drive]['name'], '', shares, drive_exclusions[self.DRIVE_VID_INFO[drive]['name']])
delete_items = modified_file_list['delete']
if len(delete_items) > 0:
self.delete_file_list[self.DRIVE_VID_INFO[drive]['name']] = delete_items
file_delete_list = [os.path.join(drive, file) for drive, file, size in delete_items]
display_purge_command_list.append({
'enabled': True,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'size': sum([size for drive, file, size in delete_items]),
'fileList': file_delete_list,
'mode': 'delete'
})
purge_command_list.append({
'displayIndex': len(display_purge_command_list) + 1,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'fileList': file_delete_list,
'payload': delete_items,
'mode': 'delete'
})
# Build list of files to replace
replace_items = modified_file_list['replace']
replace_items.sort(key=lambda x: x[1])
if len(replace_items) > 0:
self.replace_file_list[self.DRIVE_VID_INFO[drive]['name']] = replace_items
file_replace_list = [os.path.join(drive, share, file) for drive, share, file, source_size, dest_size in replace_items]
display_copy_command_list.append({
'enabled': True,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'size': sum([source_size for drive, share, file, source_size, dest_size in replace_items]),
'fileList': file_replace_list,
'mode': 'replace'
})
copy_command_list.append({
'displayIndex': len(display_purge_command_list) + 1,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'fileList': file_replace_list,
'payload': replace_items,
'mode': 'replace'
})
# Build list of new files to copy
new_items = build_new_file_list(self.DRIVE_VID_INFO[drive]['name'], '', shares, drive_exclusions[self.DRIVE_VID_INFO[drive]['name']])['new']
if len(new_items) > 0:
self.new_file_list[self.DRIVE_VID_INFO[drive]['name']] = new_items
file_copy_list = [os.path.join(drive, share, file) for drive, share, file, size in new_items]
display_copy_command_list.append({
'enabled': True,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'size': sum([size for drive, share, file, size in new_items]),
'fileList': file_copy_list,
'mode': 'copy'
})
copy_command_list.append({
'displayIndex': len(display_purge_command_list) + 1,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'fileList': file_copy_list,
'payload': new_items,
'mode': 'copy'
})
# Gather and summarize totals for analysis summary
show_file_info = []
for i, drive in enumerate(drive_share_list.keys()):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
file_summary = []
drive_total = {
'running': 0,
'delta': 0,
'delete': 0,
'replace': 0,
'copy': 0,
'new': 0
}
if self.DRIVE_VID_INFO[drive]['name'] in self.delete_file_list.keys():
drive_total['delete'] = sum([size for drive, file, size in self.delete_file_list[self.DRIVE_VID_INFO[drive]['name']]])
drive_total['running'] -= drive_total['delete']
self.totals['delta'] -= drive_total['delete']
file_summary.append(f"Deleting {len(self.delete_file_list[self.DRIVE_VID_INFO[drive]['name']])} files ({human_filesize(drive_total['delete'])})")
if self.DRIVE_VID_INFO[drive]['name'] in self.replace_file_list.keys():
drive_total['replace'] = sum([source_size for drive, share, file, source_size, dest_size in self.replace_file_list[self.DRIVE_VID_INFO[drive]['name']]])
drive_total['running'] += drive_total['replace']
drive_total['copy'] += drive_total['replace']
drive_total['delta'] += sum([source_size - dest_size for drive, share, file, source_size, dest_size in self.replace_file_list[self.DRIVE_VID_INFO[drive]['name']]])
file_summary.append(f"Updating {len(self.replace_file_list[self.DRIVE_VID_INFO[drive]['name']])} files ({human_filesize(drive_total['replace'])})")
if self.DRIVE_VID_INFO[drive]['name'] in self.new_file_list.keys():
drive_total['new'] = sum([size for drive, share, file, size in self.new_file_list[self.DRIVE_VID_INFO[drive]['name']]])
drive_total['running'] += drive_total['new']
drive_total['copy'] += drive_total['new']
drive_total['delta'] += drive_total['new']
file_summary.append(f"{len(self.new_file_list[self.DRIVE_VID_INFO[drive]['name']])} new files ({human_filesize(drive_total['new'])})")
# Increment master totals
# Double copy total to account for both copy and verify operations
self.totals['master'] += 2 * drive_total['copy'] + drive_total['delete']
self.totals['delete'] += drive_total['delete']
self.totals['delta'] += drive_total['delta']
if len(file_summary) > 0:
show_file_info.append((self.DRIVE_VID_INFO[drive]['name'], '\n'.join(file_summary)))
if not self.thread_manager.threadlist['Backup Analysis']['killFlag']:
self.analysis_summary_display_fn(
title='Files',
payload=show_file_info
)
# Concat both lists into command list
self.command_list = [cmd for cmd in purge_command_list]
self.command_list.extend([cmd for cmd in copy_command_list])
# Concat lists into display command list
display_command_list = [cmd for cmd in display_purge_command_list]
display_command_list.extend([cmd for cmd in display_copy_command_list])
# Fix display index on command list
for i, cmd in enumerate(self.command_list):
self.command_list[i]['displayIndex'] = i
self.analysis_summary_display_fn(
title='Summary',
payload=[(self.DRIVE_VID_INFO[drive]['name'], '\n'.join(shares), drive in connected_vid_list) for drive, shares in drive_share_list.items()]
)
self.display_backup_command_info_fn(display_command_list)
self.analysis_valid = True
if not self.CLI_MODE:
self.update_ui_component_fn(Status.UPDATEUI_STATUS_BAR, Status.BACKUP_READY_FOR_BACKUP)
self.update_ui_component_fn(Status.UPDATEUI_BACKUP_BTN, {'state': 'normal'})
self.update_ui_component_fn(Status.UPDATEUI_ANALYSIS_END)
else:
# If thread halted, mark analysis as invalid
if not self.CLI_MODE:
self.update_ui_component_fn(Status.UPDATEUI_STATUS_BAR, Status.BACKUP_READY_FOR_ANALYSIS)
self.update_ui_component_fn(Status.UPDATEUI_ANALYSIS_END)
self.update_ui_component_fn(Status.RESET_ANALYSIS_OUTPUT)
self.update_ui_component_fn(Status.UNLOCK_TREE_SELECTION)
if not self.CLI_MODE:
self.progress.stop_indeterminate()
self.analysis_running = False
# TODO: Make changes to existing @config check the existing for missing @drives, and delete the config file from drives we unselected if there's multiple drives in a config
# TODO: If a @drive @config is overwritten with a new config file, due to the drive
# being configured for a different backup, then we don't want to delete that file
# In that case, the config file should be ignored. Thus, we need to delete configs
# on unselected drives only if the config file on the drive we want to delete matches
# the config on selected drives
# TODO: When @drive @selection happens, drives in the @config should only be selected if the config on the other drive matches. If it doesn't don't select it by default, and warn about a conflict.
def write_config_to_disks(self):
"""Write the current running backup config to config files on the drives."""
if self.config['sources'] and self.config['destinations']:
share_list = ','.join([item['dest_name'] for item in self.config['sources']])
raw_vid_list = [drive['vid'] for drive in self.config['destinations']]
raw_vid_list.extend(self.config['missing_drives'].keys())
vid_list = ','.join(raw_vid_list)
# For each drive letter connected, get drive info, and write file
for drive in self.config['destinations']:
# If config exists on drives, back it up first
if os.path.isfile(os.path.join(drive['name'], self.BACKUP_CONFIG_DIR, self.BACKUP_CONFIG_FILE)):
shutil.move(os.path.join(drive['name'], self.BACKUP_CONFIG_DIR, self.BACKUP_CONFIG_FILE), os.path.join(drive['name'], self.BACKUP_CONFIG_DIR, self.BACKUP_CONFIG_FILE + '.old'))
drive_config_file = Config(os.path.join(self.DRIVE_VID_INFO[drive['vid']]['name'], self.BACKUP_CONFIG_DIR, self.BACKUP_CONFIG_FILE))
# Write shares and VIDs to config file
drive_config_file.set('selection', 'sources', share_list)
drive_config_file.set('selection', 'vids', vid_list)
# Write info for each drive to its own section
for cur_drive in self.config['destinations']:
drive_config_file.set(cur_drive['vid'], 'vid', cur_drive['vid'])
drive_config_file.set(cur_drive['vid'], 'serial', cur_drive['serial'])
drive_config_file.set(cur_drive['vid'], 'capacity', cur_drive['capacity'])
# Write info for missing drives
for drive_vid, capacity in self.config['missing_drives'].items():
drive_config_file.set(drive_vid, 'vid', drive_vid)
drive_config_file.set(drive_vid, 'serial', 'Unknown')
drive_config_file.set(drive_vid, 'capacity', capacity)
def run(self):
"""Once the backup analysis is run, and drives and shares are selected, run the backup.
This function is run in a new thread, but is only run if the backup config is valid.
If sanity_check() returns False, the backup isn't run.
"""
self.backup_running = True
if not self.analysis_valid or not self.sanity_check():
return
if not self.CLI_MODE:
self.update_ui_component_fn(Status.UPDATEUI_BACKUP_START)
self.update_ui_component_fn(Status.LOCK_TREE_SELECTION)
self.progress.set(0)
self.progress.set_max(self.totals['master'])
for cmd in self.command_list:
self.cmd_info_blocks[cmd['displayIndex']].state.configure(text='Pending', fg=self.uicolor.PENDING)
if cmd['type'] == 'fileList':
self.cmd_info_blocks[cmd['displayIndex']].configure('current_file', text='Pending', fg=self.uicolor.PENDING)
self.cmd_info_blocks[cmd['displayIndex']].configure('progress', text='Pending', fg=self.uicolor.PENDING)
# Write config file to drives
self.write_config_to_disks()
self.totals['running'] = 0
self.totals['buffer'] = 0
self.totals['progressBar'] = 0
timer_started = False
for cmd in self.command_list:
if cmd['type'] == 'fileList':
if not self.CLI_MODE:
self.cmd_info_blocks[cmd['displayIndex']].state.configure(text='Running', fg=self.uicolor.RUNNING)
if not timer_started:
timer_started = True
self.backup_start_time = datetime.now()
self.thread_manager.start(ThreadManager.KILLABLE, name='backupTimer', target=self.start_backup_timer_fn)
if cmd['mode'] == 'delete':
for drive, file, size in cmd['payload']:
if self.thread_manager.threadlist['Backup']['killFlag']:
break
src = os.path.join(drive, file)
gui_options = {
'displayIndex': cmd['displayIndex']
}
self.do_del_fn(src, size, gui_options)
# If file hash was in list, remove it, and write changes to file
if file in self.file_hashes[drive].keys():
del self.file_hashes[drive][file]
drive_hash_file_path = os.path.join(drive, self.BACKUP_CONFIG_DIR, self.BACKUP_HASH_FILE)
with open(drive_hash_file_path, 'wb') as f:
hash_list = {'/'.join(file_name.split(os.path.sep)): hash_val for file_name, hash_val in self.file_hashes[drive].items()}
pickle.dump(hash_list, f)
if cmd['mode'] == 'replace':
for drive, share, file, source_size, dest_size in cmd['payload']:
if self.thread_manager.threadlist['Backup']['killFlag']:
break
share_path = self.get_share_source_path(share)
src = os.path.join(share_path, file)
dest = os.path.join(drive, share, file)
gui_options = {
'displayIndex': cmd['displayIndex']
}
file_hashes = self.do_copy_fn(src, dest, drive, gui_options)
self.file_hashes[drive].update(file_hashes)
# Write updated hash file to drive
drive_hash_file_path = os.path.join(drive, self.BACKUP_CONFIG_DIR, self.BACKUP_HASH_FILE)
with open(drive_hash_file_path, 'wb') as f:
hash_list = {'/'.join(file_name.split(os.path.sep)): hash_val for file_name, hash_val in self.file_hashes[drive].items()}
pickle.dump(hash_list, f)
elif cmd['mode'] == 'copy':
for drive, share, file, size in cmd['payload']:
if self.thread_manager.threadlist['Backup']['killFlag']:
break
share_path = self.get_share_source_path(share)
src = os.path.join(share_path, file)
dest = os.path.join(drive, share, file)
gui_options = {
'displayIndex': cmd['displayIndex']
}
file_hashes = self.do_copy_fn(src, dest, drive, gui_options)
self.file_hashes[drive].update(file_hashes)
# Write updated hash file to drive
drive_hash_file_path = os.path.join(drive, self.BACKUP_CONFIG_DIR, self.BACKUP_HASH_FILE)
with open(drive_hash_file_path, 'wb') as f:
hash_list = {'/'.join(file_name.split(os.path.sep)): hash_val for file_name, hash_val in self.file_hashes[drive].items()}
pickle.dump(hash_list, f)
if self.thread_manager.threadlist['Backup']['killFlag'] and self.totals['running'] < self.totals['master']:
if not self.CLI_MODE:
self.cmd_info_blocks[cmd['displayIndex']].state.configure(text='Aborted', fg=self.uicolor.STOPPED)
self.cmd_info_blocks[cmd['displayIndex']].configure('progress', text='Aborted', fg=self.uicolor.STOPPED)
else:
print(f"{bcolor.FAIL}Backup aborted by user{bcolor.ENDC}")
break
else:
if not self.CLI_MODE:
self.cmd_info_blocks[cmd['displayIndex']].state.configure(text='Done', fg=self.uicolor.FINISHED)
self.cmd_info_blocks[cmd['displayIndex']].configure('progress', text='Done', fg=self.uicolor.FINISHED)
else:
print(f"{bcolor.OKGREEN}Backup finished{bcolor.ENDC}")
self.thread_manager.kill('backupTimer')
if not self.CLI_MODE:
self.update_ui_component_fn(Status.UPDATEUI_BACKUP_END)
self.update_ui_component_fn(Status.UNLOCK_TREE_SELECTION)
self.backup_running = False
def get_backup_start_time(self):
"""
Returns:
datetime: The time the backup started. (default 0)
"""
if self.backup_start_time:
return self.backup_start_time
else:
return 0
def is_running(self):
"""
Returns:
bool: Whether or not the backup is actively running something.
"""
return self.analysis_running or self.backup_running
| 58,219 | 15,499 |
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""A CLASS TO MANAGE BULLETS FIRED FROM THE SHIP"""
def __init__(self, ai_settings, screen, ship):
"""CREATE A BULLET OBJECT AT THE SHIP'S CURRENT POSITION"""
super(Bullet, self).__init__()
self.screen = screen
# CREATE A BULLET RECT AT (0, 0) AND THEN SET CORRECT POSITION
self.rect = pygame.Rect(0, 0, ai_settings.bullet_width, ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
# STORE THE BULLET'S POSITION AS A DECIMAL VALUE
self.y = float(self.rect.y)
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
def update(self):
"""MOVE THE BULLET UP THE SCREEN"""
# UPDATE THE DECIMAL POSITION OF THE BULLET
self.y -= self.speed_factor
# UPDATE THE RECT POSITION
self.rect.y = self.y
def draw_bullet(self):
"""DRAW THE BULLET TO THE SCREEN"""
pygame.draw.rect(self.screen, self.color, self.rect)
| 1,134 | 393 |
print ('HELLO JJP')
| 20 | 11 |
import time
import os
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
# change these as desired - they're the pins connected from the
# SPI port on the ADC to the Cobbler
SPICLK = 11
SPIMISO = 9
SPIMOSI = 10
SPICS = 22
GPIO.setwarnings(False)
# set up the SPI interface pins
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPICS, GPIO.OUT)
MAX_VARIATION = 30
SUCCESS_TRIES = 3
adc_channel = int(input())
value = 0
success = 0
while success < SUCCESS_TRIES:
readvalue = readadc(adc_channel, SPICLK, SPIMOSI, SPIMISO, SPICS)
if abs(readvalue - value) < MAX_VARIATION:
success += 1
else:
success = 0
value = readvalue
time.sleep(1.0)
print(value)
GPIO.cleanup()
| 1,648 | 769 |
from django import template
from ..models import HomePage
register = template.Library()
@register.simple_tag
def get_homepages(count=None):
if count:
return HomePage.objects.all()[:count]
return HomePage.objects.all()
| 237 | 69 |
from core.views import BaseView, LoginRequiredMixin
from ..models import PokerMember, PokerRoom
class SettingsView(LoginRequiredMixin, BaseView):
template_name = 'settings.html'
def get(self, request, token):
"""Handle GET request."""
if not self.member:
return self.redirect('poker:room', args=(token,))
return super().get(request, token)
def post(self, request, token):
"""Handle POST request."""
# Exit room
if '_exit' in request.POST:
self.member.is_active = False
self.member.save()
return self.redirect('poker:index')
room_name = request.POST.get('room_name')
member_name = request.POST.get('member_name')
use_time = request.POST.get('use_time')
self.room.name = room_name
self.room.use_time = bool(int(use_time))
self.member.name = member_name
self.room.save()
self.member.save()
return self.redirect('poker:room', args=(token,))
def get_context_data(self, *args, **kwargs):
"""Get context data."""
return {
'room': self.room,
'member': self.member,
}
def dispatch(self, *args, **kwargs):
"""Dispatch request."""
self.user = (
self.request.user if self.request.user.is_authenticated else None
)
self.room = self.get_object_or_404(PokerRoom, token=kwargs['token'])
self.poker_round = self.room.get_poker_round()
self.member = PokerMember.objects.filter(
room=self.room,
user=self.user,
is_active=True,
).first()
return super().dispatch(*args, **kwargs)
| 1,720 | 512 |
"""HyperHDR Client package."""
| 31 | 11 |
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import attr
import mock
import openhtf
from openhtf import plugs
from openhtf.core import base_plugs
def plain_func():
"""Plain Docstring."""
pass
def normal_test_phase():
return 'return value'
@openhtf.PhaseOptions(name='func-name({input_value[0]})')
def extra_arg_func(input_value=None):
return input_value
class ExtraPlug(base_plugs.BasePlug):
name = 'extra_plug_0'
def echo(self, phrase):
return '%s says %s' % (self.name, phrase)
@openhtf.PhaseOptions(name='extra_plug_func[{plug.name}][{phrase}]')
@plugs.plug(plug=ExtraPlug.placeholder)
def extra_plug_func(plug, phrase):
return plug.echo(phrase)
class PlaceholderCapablePlug(base_plugs.BasePlug):
auto_placeholder = True
class SubPlaceholderCapablePlug(PlaceholderCapablePlug):
pass
@plugs.plug(placed=PlaceholderCapablePlug)
def placeholder_using_plug(placed):
del placed # Unused.
@plugs.plug(subplaced=SubPlaceholderCapablePlug)
def sub_placeholder_using_plug(subplaced):
del subplaced # Unused.
class NonPlugBase(object):
"""A base class that is not a BasePlug."""
class PlugVersionOfNonPlug(NonPlugBase, base_plugs.BasePlug):
"""Plug implementation of a non-plug base."""
custom_placeholder = base_plugs.PlugPlaceholder(NonPlugBase)
@plugs.plug(custom=custom_placeholder)
def custom_placeholder_phase(custom):
del custom # Unused.
class TestPhaseDescriptor(unittest.TestCase):
def setUp(self):
super(TestPhaseDescriptor, self).setUp()
self._phase_data = mock.Mock(
plug_manager=plugs.PlugManager(), execution_uid='01234567890')
def test_basics(self):
phase = openhtf.PhaseDescriptor.wrap_or_copy(plain_func)
self.assertIs(phase.func, plain_func) # pytype: disable=wrong-arg-types
self.assertEqual(0, len(phase.plugs))
self.assertEqual('plain_func', phase.name)
self.assertEqual('Plain Docstring.', phase.doc)
phase(self._phase_data)
test_phase = openhtf.PhaseDescriptor.wrap_or_copy(normal_test_phase)
self.assertEqual('normal_test_phase', test_phase.name)
self.assertEqual('return value', test_phase(self._phase_data))
def test_multiple_phases(self):
phase = openhtf.PhaseDescriptor.wrap_or_copy(plain_func)
second_phase = openhtf.PhaseDescriptor.wrap_or_copy(phase)
for field in attr.fields(type(phase)):
if field.name == 'func':
continue
self.assertIsNot(
getattr(phase, field.name), getattr(second_phase, field.name))
def test_callable_name_with_args(self):
def namer(**kwargs):
return 'renamed_{one}_{two}'.format(**kwargs)
@openhtf.PhaseOptions(name=namer)
def custom_phase(one=None, two=None):
del one # Unused.
del two # Unused.
self.assertEqual('custom_phase', custom_phase.name)
arged = custom_phase.with_args(one=1, two=2)
self.assertEqual('renamed_1_2', arged.name)
def test_with_args(self):
phase = extra_arg_func.with_args(input_value='input arg')
result = phase(self._phase_data)
first_result = phase(self._phase_data)
self.assertIs(phase.func, extra_arg_func.func)
self.assertEqual('input arg', result)
self.assertEqual('func-name(i)', phase.name)
self.assertEqual('input arg', first_result)
# Must do with_args() on the original phase, otherwise it has already been
# formatted and the format-arg information is lost.
second_phase = extra_arg_func.with_args(input_value='second input')
second_result = second_phase(self._phase_data)
self.assertEqual('second input', second_result)
self.assertEqual('func-name(s)', second_phase.name)
def test_with_args_argument_not_specified(self):
phase = extra_arg_func.with_args(arg_does_not_exist=1)
self.assertNotIn('arg_does_not_exist', phase.extra_kwargs)
def test_with_args_kwargs(self):
@openhtf.PhaseOptions()
def phase(test_api, **kwargs):
del test_api # Unused.
del kwargs # Unused.
updated = phase.with_args(arg_does_not_exist=1)
self.assertEqual({'arg_does_not_exist': 1}, updated.extra_kwargs)
def test_with_plugs(self):
self._phase_data.plug_manager.initialize_plugs([ExtraPlug])
phase = extra_plug_func.with_plugs(plug=ExtraPlug).with_args(phrase='hello')
self.assertIs(phase.func, extra_plug_func.func)
self.assertEqual(1, len(phase.plugs))
self.assertEqual('extra_plug_func[extra_plug_0][hello]', phase.options.name)
self.assertEqual('extra_plug_func[extra_plug_0][hello]', phase.name)
result = phase(self._phase_data)
self.assertEqual('extra_plug_0 says hello', result)
def test_with_plugs_unknown_plug_name_ignored(self):
phase = placeholder_using_plug.with_plugs(undefined_plug=ExtraPlug)
self.assertIs(phase, placeholder_using_plug)
def test_with_plugs_auto_placeholder(self):
phase = placeholder_using_plug.with_plugs(placed=SubPlaceholderCapablePlug)
self.assertIs(phase.func, placeholder_using_plug.func)
self.assertEqual(1, len(phase.plugs))
def test_with_plugs_subclass_auto_placeholder_error(self):
with self.assertRaises(base_plugs.InvalidPlugError):
sub_placeholder_using_plug.with_plugs(subplaced=SubPlaceholderCapablePlug)
def test_with_plugs_auto_placeholder_non_subclass_error(self):
with self.assertRaises(base_plugs.InvalidPlugError):
placeholder_using_plug.with_plugs(placed=ExtraPlug)
def test_with_plugs_custom_placeholder_is_base_plug(self):
phase = custom_placeholder_phase.with_plugs(custom=PlugVersionOfNonPlug)
self.assertIs(phase.func, custom_placeholder_phase.func)
self.assertEqual([base_plugs.PhasePlug('custom', PlugVersionOfNonPlug)],
phase.plugs)
| 6,268 | 2,184 |
def convert_tt(element, text):
if text:
text = "``" + text + "``"
return text
| 94 | 33 |
def fixipmapping(ipparams, posflux, etc = [], retbinflux = False, retbinstd = False):
"""
This function returns the fixed best-fit intra-pixel mapping.
Parameters
----------
ipparams : tuple
unused
bestmip : 1D array, size = # of measurements
Best-fit ip mapping
Returns
-------
output : 1D array, size = # of measurements
Intra-pixel-corrected flux multiplier
Revisions
---------
2010-08-03 Kevin Stevenson, UCF
kevin218@knights.ucf.edu
Original version
"""
bestmip, binflux, binstd = posflux
#Return fit with or without binned flux
if retbinflux == False and retbinstd == False:
return bestmip
elif retbinflux == True and retbinstd == True:
return [bestmip, binflux, binstd]
elif retbinflux == True:
return [bestmip, binflux]
else:
return [bestmip, binstd]
| 954 | 311 |
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Filipe Laíns <lains@riseup.net>
from typing import List, Set
import _testsuite
import pages
class Device(_testsuite.Device):
def __init__(
self,
*,
name: str,
functions: Set[pages.Function],
) -> None:
super().__init__(
name=name,
functions=pages.functions_to_fw_page_array(functions),
)
def hid_send(self, data: List[int]):
'''``hid_send`` callback for the HID HAL'''
| 528 | 178 |
import gfapy
import unittest
class TestApiCustomRecords(unittest.TestCase):
def test_from_string(self):
str1 = "X\tthis is a\tcustom line"
l1 = gfapy.Line(str1)
self.assertEqual(gfapy.line.CustomRecord, l1.__class__)
self.assertEqual("X", l1.record_type)
self.assertEqual("this is a", l1.field1)
self.assertEqual("custom line", l1.field2)
def test_from_string_with_tags(self):
str2 = "XX\txx:i:2\txxxxxx\txx:i:1"
l2 = gfapy.Line(str2)
self.assertEqual(gfapy.line.CustomRecord, l2.__class__)
self.assertEqual("XX", l2.record_type)
self.assertEqual("xx:i:2", l2.field1)
self.assertEqual("xxxxxx", l2.field2)
with self.assertRaises(AttributeError): l2.field3
self.assertEqual(1, l2.xx)
l2.xx = 3
self.assertEqual(3, l2.xx)
l2.field1 = "blabla"
self.assertEqual("blabla", l2.field1)
def test_to_s(self):
str1 = "X\tthis is a\tcustom line"
self.assertEqual(str1, str(gfapy.Line(str1)))
str2 = "XX\txx:i:2\txxxxxx\txx:i:1"
self.assertEqual(str2, str(gfapy.Line(str2)))
def test_add_custom_records(self):
gfa = gfapy.Gfa(version="gfa2")
x1 = "X\tthis is a custom record"
gfa.append(x1) # nothing raised
self.assertEqual(["X"], gfa.custom_record_keys)
self.assertEqual([x1], [str(x) for x in gfa.custom_records_of_type("X")])
def test_delete_custom_records(self):
gfa = gfapy.Gfa(version="gfa2")
c = "X\tThis is a custom_record"
gfa.append(c)
self.assertEqual([c], [str(x) for x in gfa.custom_records_of_type("X")])
for x in gfa.custom_records_of_type("X"): x.disconnect()
self.assertEqual([], gfa.custom_records_of_type("X"))
def test_custom_records(self):
x = ["X\tVN:Z:1.0", "Y\ttesttesttest"]
self.assertEqual(x[0], str(gfapy.Gfa(x).custom_records_of_type("X")[0]))
self.assertEqual(x[1], str(gfapy.Gfa(x).custom_records_of_type("Y")[0]))
| 1,899 | 810 |
from django.db import models
from auditlog.registry import auditlog
from apps.core.models import (
TypePet,
PetSize,
PetColor,
)
from apps.partner.models import Partner
class Pet(models.Model):
MAN = 1
WOMAN = 2
SEX_CHOICES = (
(MAN, 'Male'),
(WOMAN, 'Female'),
)
name = models.CharField(max_length = 50, verbose_name = 'Name')
sex = models.PositiveIntegerField(choices = SEX_CHOICES, verbose_name = 'Sex')
aproximated_age = models.CharField(max_length = 50, verbose_name = 'Aproximated age')
birthdate = models.DateField(blank = True, null = True, verbose_name = 'Birthdate')
typepet = models.ForeignKey(to = TypePet, on_delete = models.CASCADE, verbose_name = 'Type of pet')
petsize = models.ForeignKey(to = PetSize, on_delete = models.CASCADE, verbose_name = 'Pet Size')
petcolor = models.ForeignKey(to = PetColor, on_delete = models.CASCADE, verbose_name = 'Pet Color')
adopted = models.BooleanField(default = False, verbose_name = 'Adopted')
adopter = models.ForeignKey(to = Partner, on_delete = models.CASCADE, blank = True, null = True, verbose_name = 'Adopter', help_text = 'Adopter')
adoption_date = models.DateField(blank = True, null = True, verbose_name = 'Adoption date')
status = models.BooleanField(default = True, verbose_name = 'Status')
created = models.DateTimeField(auto_now_add = True, blank = True, null = True, verbose_name = 'Creation date')
updated = models.DateTimeField(auto_now = True, blank = True, null = True, verbose_name = 'Modification date')
class Meta:
db_table = "cobija_pet"
verbose_name = "Pet"
verbose_name_plural = "Pets"
# ordering = ["name"]
def __str__(self):
return self.name
class PetPhoto(models.Model):
pet = models.ForeignKey(to = Pet, on_delete = models.CASCADE, verbose_name = 'Pet')
photo = models.ImageField(upload_to = 'pets', verbose_name = 'Photo', help_text = 'Reference image')
status = models.BooleanField(default = True, verbose_name = 'Status')
created = models.DateTimeField(auto_now_add = True, blank = True, null = True, verbose_name = 'Creation date')
updated = models.DateTimeField(auto_now = True, blank = True, null = True, verbose_name = 'Modification date')
class Meta:
db_table = "cobija_petphoto"
verbose_name = "Pet photo"
verbose_name_plural = "Pet photos"
ordering = ["pk"]
# def __str__(self):
# return self.photo
auditlog.register(Pet)
auditlog.register(PetPhoto)
| 2,562 | 843 |
from asphalt.core.cli import main
main()
| 42 | 16 |
from quart import Blueprint
main = Blueprint(
"main", __name__, template_folder="templates", static_folder="static", static_url_path="/main"
)
from .routes import *
| 171 | 52 |
from DatabaseConnection import DatabaseConnection
import string
import random
class Instalation:
def __init__(self, dbhost, dbname, dbusername, dbpass):
self.dbName = dbname
# self.dbName = env.DB_NAME
self.db = DatabaseConnection(
dbhost, dbusername, dbpass, dbname)
def setUniqueId(self, id):
self.uniqueId = id
self.ignoreColums = ['sync_token', 'last_action_at']
def getTables(self):
query = """
select * from information_schema.TABLES
where TABLE_SCHEMA = '{}' and TABLE_NAME not like "tb_sync_%"
""".format(self.dbName)
return self.db.executeFetchAll(query)
def getColums(self, tableName):
query = """
select COLUMN_NAME, COLUMN_TYPE, COLUMN_KEY from information_schema.COLUMNS
where TABLE_SCHEMA = '{}' and TABLE_NAME = '{}'
"""
return self.db.executeFetchAll(query.format(self.dbName, tableName))
def randomString(self, stringLength=16):
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(stringLength))
def _createAfterInsertTrigger(self, tablename, columns=[]):
triggername = f"after_insert_{tablename}"
print(f"Creating `{triggername}`", end="...")
header = f"""CREATE TRIGGER `{triggername}` AFTER INSERT ON `{tablename}`
FOR EACH ROW BEGIN
"""
declaration = """
DECLARE qry TEXT;
DECLARE tb VARCHAR(100);
"""
colWoPk = [col['COLUMN_NAME']
for col in columns if col['COLUMN_KEY'] != "PRI"]
# creating fields string
fields = ""
lencol = len(colWoPk)
i = 1
for item in colWoPk:
fields += item
if(i < lencol):
fields += ','
i += 1
#
# creating value string
values = ""
prefix = "\",\"\'\","
firstColDivider = ",\"\',\'\","
secondColDivider = ",\"\',\'\","
middle = ",\"\',\'\","
sufix = ",\"\')\""
values += prefix
i = 1
for col in colWoPk:
values += f"new.{col}"
if (i == 1):
values += firstColDivider
elif(i == 2):
values += secondColDivider
elif(i < lencol):
values += middle
i += 1
values += sufix
pk = columns[0]['COLUMN_NAME']
#
body = f"""
SET qry := CONCAT("insert into {tablename}({fields}) values({values});
SET tb := "{tablename}";
INSERT INTO `tb_sync_changelog`(`query`, `table`, `type`, row_id, occur_at, first_time_occur_at, sync_token) VALUES(qry, tb, 'INS', new.{pk}, UNIX_TIMESTAMP(now(3)), new.last_action_at, new.sync_token);
"""
footer = "END;"
# print(header + declaration + body + footer)
inserted = self.db.executeCommit(header + declaration + body + footer)
return inserted
def _createBeforeInsertTrigger(self, tablename):
# before insert digunakan untuk membuat token dan last action at
# pada setiap table yang di sinkron
triggername = f"before_insert_{tablename}"
print(f"Creating `{triggername}`", end="...")
header = f"""CREATE TRIGGER `{triggername}` BEFORE INSERT ON `{tablename}`
FOR EACH ROW BEGIN
"""
declaration = """
DECLARE auto_id BIGINT DEFAULT 0;
#DECLARE sync_id_temp BIGINT DEFAULT 0;
"""
body = f"""
SELECT IFNULL(MAX(log_id), 0)+1 INTO auto_id
FROM tb_sync_changelog;
#SELECT IFNULL(MAX(sync_id), 0)+1 INTO sync_id_temp
#FROM {tablename};
#SET new.sync_id = sync_id_temp;
IF new.sync_token IS NULL THEN
SET new.sync_token = CAST(CONCAT('{self.uniqueId}', auto_id) AS UNSIGNED);
SET new.last_action_at = UNIX_TIMESTAMP(now(6));
END IF;
"""
footer = "END;"
created = self.db.executeCommit(header + declaration + body + footer)
return created
def _createAfterDeleteTrigger(self, tablename, pk):
# after delete
triggername = f"after_delete_{tablename}"
print(f"Creating `{triggername}`", end="...")
header = f"""CREATE
TRIGGER `{triggername}` BEFORE DELETE ON `{tablename}`
FOR EACH ROW BEGIN
"""
declaration = """
DECLARE qry TEXT;
DECLARE tb VARCHAR(100);
DECLARE time_at DOUBLE DEFAULT 0;
"""
body = f"""
SET qry := old.{pk};
SET tb := "{tablename}";
SET time_at := UNIX_TIMESTAMP(NOW(3));
INSERT INTO `tb_sync_changelog`(`query`, `table`, `type`, row_id, occur_at, first_time_occur_at, sync_token)
VALUES(qry, tb, 'DEL', old.{pk}, time_at, time_at, old.sync_token);
"""
footer = "END;"
created = self.db.executeCommit(header + declaration + body + footer)
return created
def _createAfterUpdateTrigger(self, tablename, columns):
triggername = f"after_update_{tablename}"
print(f"Creating `{triggername}`", end="...")
header = f"""CREATE TRIGGER `{triggername}` AFTER UPDATE ON `{tablename}`
FOR EACH ROW BEGIN
"""
declaration = """
DECLARE update_query TEXT;
DECLARE tb VARCHAR(100);
DECLARE front_update TINYINT DEFAULT 0;
DECLARE update_count TINYINT DEFAULT 0;
DECLARE latest_update_id BIGINT DEFAULT 0;
"""
col = [col['COLUMN_NAME']
for col in columns if col['COLUMN_KEY'] != "PRI" and col['COLUMN_NAME'] not in self.ignoreColums]
pk = columns[0]['COLUMN_NAME']
body = f"""
SELECT inbox_id INTO latest_update_id FROM tb_sync_inbox
WHERE msg_type = 'UPD' AND table_name = '{tablename}' AND row_id = new.{pk}
AND first_time_occur_at > new.last_action_at ORDER BY first_time_occur_at DESC LIMIT 1;
IF(latest_update_id != 0) THEN
UPDATE tb_sync_inbox SET STATUS = 'waiting' WHERE inbox_id = latest_update_id;
ELSE
SET update_query := "update {tablename} set ";"""
for c in col:
body += f"""
IF !(new.{c} <=> old.{c}) THEN
SET front_update = front_update + 1;
SET update_count = update_count + 1;
IF(front_update > 1) THEN
SET update_query = CONCAT(update_query, ",");
END IF;
SET update_query = CONCAT(update_query, '{c}=', "'", new.{c}, "'");
END IF;
"""
body += f"""
SET tb := '{tablename}';
IF update_count > 0 THEN
SET update_query := CONCAT(update_query, ",last_action_at='", new.last_action_at, "',", "sync_token='", new.sync_token, "'");
SET update_query := CONCAT(update_query, " where {pk}=", new.{pk});
INSERT INTO `tb_sync_changelog`(`query`, `table`, `type`, row_id, occur_at, first_time_occur_at, sync_token) VALUES(update_query, tb, 'UPD', new.{pk}, UNIX_TIMESTAMP(NOW(3)), new.last_action_at, new.sync_token);
END IF;
END IF;
"""
footer = "END;"
# print(header + declaration + body + footer)
created = self.db.executeCommit(header + declaration + body + footer)
if not created:
print(self.db.getLastCommitError())
return created
def _createBeforeUpdateTrigger(self, tablename, pk):
triggername = f"before_update_{tablename}"
print(f"Creating `{triggername}`", end="...")
header = f"""CREATE TRIGGER `{triggername}` BEFORE UPDATE ON `{tablename}`
FOR EACH ROW BEGIN
"""
declaration = """
DECLARE auto_id BIGINT DEFAULT 0;
DECLARE pri_change TINYINT DEFAULT 0;
"""
body = f"""
SELECT IFNULL(MAX(log_id), 0)+1 INTO auto_id
FROM tb_sync_changelog;
IF old.{pk} != new.{pk} THEN
SET pri_change := 1;
END IF;
IF pri_change = 1 OR (pri_change = 0 AND not (new.sync_token <=> old.sync_token)) THEN
SET new.sync_token = new.sync_token;
SET new.last_action_at = new.last_action_at;
ELSE
SET new.sync_token = CAST(CONCAT('{self.uniqueId}', auto_id) AS UNSIGNED);
SET new.last_action_at = UNIX_TIMESTAMP(NOW(6));
END IF;
"""
footer = "END;"
created = self.db.executeCommit(header + declaration + body + footer)
return created
def generateSyncTrigger(self):
print('--------------')
print("Generate sync trigger")
# prosedur
# 1. mengambil semua tabel dari information schema
# 2. mengambil setiap kolom dari information schema untuk dimasukkan ke triiger
# triiger yang dibuat adalah after insert, before insert, after delete
tables = self.getTables()
if (tables['execute_status']):
for tb in tables['data']:
columns = self.getColums(tb['TABLE_NAME'])
print('OK') if self._createAfterInsertTrigger(
tb['TABLE_NAME'], columns['data']) else print("ERROR")
print('OK') if self._createBeforeInsertTrigger(
tb['TABLE_NAME']) else print("ERROR")
print('OK') if self._createAfterDeleteTrigger(
tb['TABLE_NAME'], columns['data'][0]['COLUMN_NAME']) else print("ERROR")
print('OK') if self._createAfterUpdateTrigger(
tb['TABLE_NAME'], columns['data']) else print("ERROR")
print('OK') if self._createBeforeUpdateTrigger(
tb['TABLE_NAME'], columns['data'][0]['COLUMN_NAME']) else print("ERROR")
def dropAllTrigger(self):
print('--------------')
print("Cleaning all trigger...")
sql = "show triggers"
triggers = self.db.executeFetchAll(sql)
if (triggers['execute_status']):
for trigger in triggers['data']:
print('Deleting trigger `{}`'.format(
trigger['Trigger']), end="...")
delete = self._dropTriggerIfExist(trigger['Trigger'])
print("OK") if delete else print("ERROR")
def _dropTriggerIfExist(self, trigger_name):
sql = 'drop trigger if exists {}'.format(trigger_name)
return self.db.executeCommit(sql)
def generateDefaultTrigger(self):
# generating after insert changelog
print('--------------')
# creating triiger
print('Creating default trigger `{}`...'.format(
'after_insert_changelog'), end=" ")
header = """
CREATE TRIGGER `{}` AFTER INSERT ON `tb_sync_changelog` FOR EACH ROW BEGIN
""".format('after_insert_changelog')
declaration = """
DECLARE finished INTEGER DEFAULT 0;
DECLARE id INTEGER(11);
DECLARE curClient CURSOR FOR
SELECT client_unique_id FROM tb_sync_client where client_mode = 2;
DECLARE CONTINUE HANDLER FOR NOT FOUND SET finished = 1;
"""
body = """
OPEN curClient;
getClient: LOOP
FETCH curClient INTO id;
IF finished = 1 THEN
LEAVE getClient;
END IF;
INSERT INTO tb_sync_outbox(row_id, table_name, `query`, msg_type, `client_unique_id`, created_at, occur_at, first_time_occur_at, sync_token)
VALUES(new.row_id, new.table, new.query, new.type, id, new.created_at,
new.occur_at, new.first_time_occur_at, new.sync_token);
END LOOP getClient;
CLOSE curClient;
"""
footer = """
END;
"""
create = self.db.executeCommit(
header + ' ' + declaration + ' ' + body + ' ' + footer)
if (not create):
print("ERROR")
else:
print('OK')
print('Creating default trigger `{}`...'.format(
'before_insert_outbox'), end=" ")
header = """
CREATE
TRIGGER `before_insert_outbox` BEFORE INSERT ON `tb_sync_outbox`
FOR EACH ROW BEGIN
"""
declaration = """
DECLARE ip VARCHAR(100);
DECLARE ports INT(11);
DECLARE skey VARCHAR(16);
DECLARE iv VARCHAR(16);
"""
body = """
IF (new.client_unique_id <> 0) THEN
SELECT client_ip, client_port, client_key, client_iv INTO ip, ports, skey, iv
FROM tb_sync_client WHERE client_unique_id = new.client_unique_id;
SET new.client_ip = ip;
SET new.client_port = ports;
SET new.client_key = skey;
SET new.client_iv = iv;
END IF;
"""
footer = """
END;
"""
create = self.db.executeCommit(
header + ' ' + declaration + ' ' + body + ' ' + footer)
if (not create):
print("ERROR")
else:
print('OK')
def __createChanglogTable(self):
sql = """
CREATE TABLE `tb_sync_changelog` (
`log_id` bigint(20) NOT NULL AUTO_INCREMENT,
`row_id` int(1) DEFAULT NULL COMMENT 'primary key of the table',
`table` varchar(100) DEFAULT NULL,
`query` text,
`type` varchar(5) DEFAULT NULL,
`is_proceed` tinyint(4) DEFAULT '0',
`first_time_occur_at` double DEFAULT NULL,
`occur_at` double DEFAULT NULL,
`created_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`sync_token` varchar(100) DEFAULT NULL,
PRIMARY KEY (`log_id`)
)
"""
return self.db.executeCommit(sql)
def __createClientTable(self):
sql = """
CREATE TABLE `tb_sync_client` (
`client_id` int(11) NOT NULL AUTO_INCREMENT,
`client_unique_id` int(11) DEFAULT NULL,
`client_key` varchar(255) DEFAULT NULL,
`client_iv` varchar(25) DEFAULT NULL,
`client_ip` varchar(20) DEFAULT NULL,
`client_port` int(11) DEFAULT NULL,
`client_mode` tinyint(4) DEFAULT '2',
PRIMARY KEY (`client_id`)
)
"""
return self.db.executeCommit(sql)
def __createInboxTable(self):
sql = """
CREATE TABLE `tb_sync_inbox` (
`inbox_id` bigint(20) NOT NULL AUTO_INCREMENT,
`row_id` int(11) DEFAULT NULL,
`table_name` varchar(255) DEFAULT NULL,
`msg_type` enum('INS','UPD','DEL','ACK','PRI','REG','PROC','NEEDPK','DONE') DEFAULT NULL,
`msg_id` int(11) DEFAULT NULL,
`query` text,
`client_unique_id` int(11) DEFAULT NULL,
`master_status` tinyint(4) DEFAULT '0',
`result_primary_key` int(11) DEFAULT '0' COMMENT 'primary key after process the query, due to differential PK between host',
`status` enum('waiting','need_pk_update','done','error','processing') DEFAULT 'waiting',
`priority` tinyint(4) DEFAULT '2',
`sync_token` varchar(100) DEFAULT NULL,
`first_time_occur_at` double DEFAULT NULL,
`occur_at` double DEFAULT NULL,
`created_at` datetime DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`inbox_id`)
)
"""
return self.db.executeCommit(sql)
def __createOutboxTable(self):
sql = """
CREATE TABLE `tb_sync_outbox` (
`outbox_id` bigint(20) NOT NULL AUTO_INCREMENT,
`row_id` int(11) DEFAULT NULL COMMENT 'primary key of table in local',
`table_name` varchar(255) DEFAULT NULL,
`msg_type` enum('INS','UPD','DEL','ACK','PRI','REG','PROC','NEEDPK','DONE') DEFAULT NULL,
`msg_id` int(11) DEFAULT NULL COMMENT 'outbox_id from local',
`query` text,
`client_unique_id` int(11) DEFAULT NULL COMMENT 'client_unique_id',
`status` enum('waiting','sent','arrived','canceled','retry','need_pk_update','done','processing') DEFAULT 'waiting',
`priority` tinyint(4) DEFAULT '2',
`sync_token` varchar(100) DEFAULT NULL,
`retry_again_at` datetime DEFAULT NULL,
`first_time_occur_at` double DEFAULT NULL,
`occur_at` double DEFAULT NULL,
`created_at` datetime DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`client_ip` varchar(100) DEFAULT NULL,
`client_port` int(11) DEFAULT NULL,
`client_key` varchar(16) DEFAULT NULL,
`client_iv` varchar(16) DEFAULT NULL,
PRIMARY KEY (`outbox_id`)
)
"""
return self.db.executeCommit(sql)
def createSyncTable(self):
print('--------------')
print("Creating sync table")
print('Creating changelog table', end="...")
print('OK') if self.__createChanglogTable() else print('ERROR')
print('Creating client table', end="...")
print('OK') if self.__createClientTable() else print('ERROR')
print('Creating inbox table', end="...")
print('OK') if self.__createInboxTable() else print('ERROR')
print('Creating outbox table', end="...")
print('OK') if self.__createOutboxTable() else print('ERROR')
def addUnixTimestampColumnToEveryTable(self):
table = self.getTables()
print('--------------')
print('Adding sync column')
for tb in table['data']:
print(
f"Add `last_action_at` column to `{tb['TABLE_NAME']}`", end="...")
columns = self.getColums(tb['TABLE_NAME'])
lastColumn = columns['data'][len(columns['data']) - 1]
alterTableQuery = """
alter table {} add last_action_at double after {}
""".format(tb['TABLE_NAME'], lastColumn['COLUMN_NAME'])
if(self.db.executeCommit(alterTableQuery)):
print("OK")
else:
print('ERROR')
print(
f"Add `sync_token` column to `{tb['TABLE_NAME']}`", end="...")
addSyncTokenQuery = f"alter table {tb['TABLE_NAME']} add sync_token varchar(100) after last_action_at"
print('OK') if self.db.executeCommit(
addSyncTokenQuery) else print("ERROR")
# print(
# f"Add `sync_id` column to `{tb['TABLE_NAME']}`", end="...")
# addSyncTokenQuery = f"alter table {tb['TABLE_NAME']} add sync_id int after sync_token"
# print('OK') if self.db.executeCommit(
# addSyncTokenQuery) else print("ERROR")
# autotrigger = Instalation("localhost", "db_coba", 'rama', 'ramapradana24')
# autotrigger.dropAllTrigger()
# autotrigger.createSyncTable()
# autotrigger.generateDefaultTrigger()
# autotrigger.addUnixTimestampColumnToEveryTable()
# autotrigger.generateSyncTrigger()
| 19,172 | 5,796 |
rows = [['-', '-', '-'], ['-', '-', '-'], ['-', '-', '-']]
turn = 'o'
def printboard():
for x in range(len(rows)):
print(rows[x])
def placexo(move1, move2, xoturn):
if rows[move1-1][move2-1] == '-':
rows[move1-1][move2-1] = xoturn
else:
print("that tile is taken")
def wincheck(currentplayer):
if rows[0][0] == rows[0][1] == rows[0][2]:
print("Game over ", currentplayer, "wins")
elif rows[1][0] == rows[1][1] == rows[1][2]:
print("Game over ", currentplayer, "wins")
elif rows[2][0] == rows[2][1] == rows[2][2]:
print("Game over ", currentplayer, "wins")
elif rows[0][0] == rows[1][0] == rows[2][0]:
print("Game over ", currentplayer, "wins")
elif rows[0][1] == rows[1][1] == rows[2][1]:
print("Game over ", currentplayer, "wins")
elif rows[0][2] == rows[1][2] == rows[2][2]:
print("Game over ", currentplayer, "wins")
elif rows[0][0] == rows[1][1] == rows[2][2]:
print("Game over ", currentplayer, "wins")
elif rows[2][0] == rows[1][1] == rows[0][2]:
print("Game over ", currentplayer, "wins")
win = False
while not win:
turncount = 0
printboard()
if turn == 'x':
turn = 'o'
else:
turn = 'x'
print("It is", turn, "turn")
movec = int(input("Enter the column you want"))
mover = int(input("Enter the row you want"))
if movec < 1 | movec > 4:
print("Enter a column from 1-3")
movec = int(input("Enter the column you want"))
if mover < 1 | mover > 4:
print("Enter a column from 1-3")
mover = int(input("Enter the row you want"))
placexo(movec, mover, turn)
wincheck(turn)
turncount = turncount + 1
if turncount == 9:
win = True
print("This game is a draw")
| 1,846 | 667 |
'''n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro: '))
n3 = int(input('Digite mais um número: '))
maior = n1
if n2 > n1 and n2 > n3:
maior = n2
if n3 > n1 and n3 > n2:
maior = n3
menor = n1
if n2 < n1 and n2 < 3:
menor = n2
if n3 < n1 and n3 < n2:
menor = n3
print(f'O maior é {maior}.')
print(f'O menor é {menor}.')
if n1 == n2 == n3:
print('Os números são iguais.')'''
n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro: '))
n3 = int(input('Digite mais um número: '))
lista = [n1, n2, n3]
ordem = sorted(lista)
print(f'O menor é {ordem[0]}.')
print(f'O maior é {ordem[-1]}.')
'''primeiro = int(input('Digite o primeiro número:'))
segundo = int(input('Digite o segundo número:'))
terceiro = int(input('Digite o terceiro número:'))
numeros = [primeiro, segundo, terceiro]
print('O maior valor digitado foi {}'.format(max(numeros)))
print('O menor numero digitado foi {}'.format(min(numeros)))'''
'''n1 =int(input('digite o primeiro número: '))
n2 =int(input('Digite o segundo número: '))
n3 =int(input ('Digite o terceiro número: '))
lista =[n1,n2,n3]
lista_ordenada = sorted(lista)
print('O menor número é {}'.format(lista_ordenada[0]))
print ('O maior número é {}'.format(lista_ordenada[-1]))'''
| 1,267 | 514 |
import time
from datetime import datetime, timedelta, timezone
from typing import (
Any,
AsyncIterator,
Dict,
List,
Optional,
cast,
)
import aiohttp_client
import jwt
import pydantic
from cryptography.hazmat.backends import default_backend
from opencoverage.settings import Settings
from opencoverage.types import Pull
from .base import SCMClient
from .exceptions import (
APIException,
AuthorizationException,
InstallationException,
NotFoundException,
)
class GitlabUser(pydantic.BaseModel):
name: str
id: int
avatar_url: str
username: str
web_url: str
state: str
# site_admin: bool
class GitlabRepo(pydantic.BaseModel):
id: int
name: str
full_name: str
private: bool
owner: GitlabUser
description: Optional[str]
fork: bool
url: str
created_at: str
updated_at: str
class GitlabRef(pydantic.BaseModel):
label: str
ref: str
sha: str
user: GitlabUser
repo: GitlabRepo
class GitlabPull(pydantic.BaseModel):
web_url: str
id: str
# diff_url: str
# patch_url: str
# number: int
state: str
title: Optional[str]
author: GitlabUser
created_at: str
updated_at: str
closed_at: Optional[str]
merged_at: Optional[str]
merge_commit_sha: str
assignee: Optional[GitlabUser]
assignees: List[GitlabUser]
reviewers: List[GitlabUser]
# draft: bool
commits_url: str
target_branch: str
source_branch: str
# {
# "id": 1,
# "iid": 1,
# "project_id": 3,
# "title": "test1",
# "description": "fixed login page css paddings",
# "state": "merged",
# "merged_by": {
# "id": 87854,
# "name": "Douwe Maan",
# "username": "DouweM",
# "state": "active",
# "avatar_url": "https://gitlab.example.com/uploads/-/system/user/avatar/87854/avatar.png",
# "web_url": "https://gitlab.com/DouweM"
# },
# "merged_at": "2018-09-07T11:16:17.520Z",
# "closed_by": null,
# "closed_at": null,
# "created_at": "2017-04-29T08:46:00Z",
# "updated_at": "2017-04-29T08:46:00Z",
# "target_branch": "master",
# "source_branch": "test1",
# "upvotes": 0,
# "downvotes": 0,
# "author": {
# "id": 1,
# "name": "Administrator",
# "username": "admin",
# "state": "active",
# "avatar_url": null,
# "web_url" : "https://gitlab.example.com/admin"
# },
# "assignee": {
# "id": 1,
# "name": "Administrator",
# "username": "admin",
# "state": "active",
# "avatar_url": null,
# "web_url" : "https://gitlab.example.com/admin"
# },
# "assignees": [{
# "name": "Miss Monserrate Beier",
# "username": "axel.block",
# "id": 12,
# "state": "active",
# "avatar_url": "http://www.gravatar.com/avatar/46f6f7dc858ada7be1853f7fb96e81da?s=80&d=identicon",
# "web_url": "https://gitlab.example.com/axel.block"
# }],
# "reviewers": [{
# "id": 2,
# "name": "Sam Bauch",
# "username": "kenyatta_oconnell",
# "state": "active",
# "avatar_url": "https://www.gravatar.com/avatar/956c92487c6f6f7616b536927e22c9a0?s=80&d=identicon",
# "web_url": "http://gitlab.example.com//kenyatta_oconnell"
# }],
# "source_project_id": 2,
# "target_project_id": 3,
# "labels": [
# "Community contribution",
# "Manage"
# ],
# "work_in_progress": false,
# "milestone": {
# "id": 5,
# "iid": 1,
# "project_id": 3,
# "title": "v2.0",
# "description": "Assumenda aut placeat expedita exercitationem labore sunt enim earum.",
# "state": "closed",
# "created_at": "2015-02-02T19:49:26.013Z",
# "updated_at": "2015-02-02T19:49:26.013Z",
# "due_date": "2018-09-22",
# "start_date": "2018-08-08",
# "web_url": "https://gitlab.example.com/my-group/my-project/milestones/1"
# },
# "merge_when_pipeline_succeeds": true,
# "merge_status": "can_be_merged",
# "sha": "8888888888888888888888888888888888888888",
# "merge_commit_sha": null,
# "squash_commit_sha": null,
# "user_notes_count": 1,
# "discussion_locked": null,
# "should_remove_source_branch": true,
# "force_remove_source_branch": false,
# "allow_collaboration": false,
# "allow_maintainer_to_push": false,
# "web_url": "http://gitlab.example.com/my-group/my-project/merge_requests/1",
# "references": {
# "short": "!1",
# "relative": "my-group/my-project!1",
# "full": "my-group/my-project!1"
# },
# "time_stats": {
# "time_estimate": 0,
# "total_time_spent": 0,
# "human_time_estimate": null,
# "human_total_time_spent": null
# },
# "squash": false,
# "task_completion_status":{
# "count":0,
# "completed_count":0
# }
# }
class GitlabCheckOutput(pydantic.BaseModel):
title: Optional[str]
summary: Optional[str]
text: Optional[str]
annotations_count: int
annotations_url: Optional[str]
class GitlabApp(pydantic.BaseModel):
created_at: datetime
description: Optional[str]
external_url: str
id: int
name: str
class GitlabCheck(pydantic.BaseModel):
id: int
head_sha: str
node_id: Optional[str]
external_id: Optional[str]
url: Optional[str]
html_url: Optional[str]
details_url: Optional[str]
status: str
conclusion: Optional[str]
started_at: datetime
completed_at: Optional[datetime]
name: str
app: Optional[GitlabApp]
class GitlabChecks(pydantic.BaseModel):
check_runs: List[GitlabCheck]
total_count: int
class GitlabAccessData(pydantic.BaseModel):
token: str
expires_at: datetime
permissions: Dict[str, str]
repository_selection: str
class GitlabComment(pydantic.BaseModel):
id: int
body: str
user: Optional[GitlabUser]
# class GitlabInstallation(pydantic.BaseModel):
# account: Optional[GitlabUser]
# app_id: int
# app_slug: str
# created_at: str
# id: int
# permissions: Dict[str, str]
# suspended_at: Optional[str]
# suspended_by: Optional[str]
# target_id: Optional[int]
# target_type: Optional[str]
# updated_at: Optional[str]
GITHUB_API_URL = "https://api.gitlab.com"
class Token(pydantic.BaseModel):
jwt_token: str
jwt_expiration: int
access_data: Optional[GitlabAccessData]
class Permissions:
WRITE = "write"
READ = "read"
# this should
_token_cache: Dict[str, Token] = {}
_private_key_cache = {}
class Gitlab(SCMClient):
_required_permissions = {
"checks": Permissions.WRITE,
"contents": Permissions.WRITE,
"issues": Permissions.WRITE,
"metadata": Permissions.READ,
"pull_requests": Permissions.WRITE,
"statuses": Permissions.READ,
}
def __init__(self, settings: Settings, installation_id: Optional[str]):
super().__init__(settings, installation_id)
self.installation_id = cast(
str, installation_id or settings.gitlab_default_installation_id
)
if settings.gitlab_app_pem_file is None:
raise TypeError("Must configure gitlab_app_pem_file")
#if settings.gitlab_app_pem_file not in _private_key_cache:
# with open(settings.gitlab_app_pem_file, "rb") as fi:
# _private_key_cache[
# settings.gitlab_app_pem_file
# ] = default_backend().load_pem_private_key(fi.read(), None)
#self._private_key = _private_key_cache[settings.gitlab_app_pem_file]
self._private_key = ""
# def _get_jwt_token(self) -> str:
# time_since_epoch_in_seconds = int(time.time())
# token_data = _token_cache.get(self.installation_id)
# if token_data is None or token_data.jwt_expiration < (
# time_since_epoch_in_seconds - 10
# ):
# jwt_expiration = time_since_epoch_in_seconds + (2 * 60)
# _token_cache[self.installation_id] = Token(
# jwt_expiration=jwt_expiration,
# jwt_token=jwt.encode(
# {
# # issued at time
# "iat": time_since_epoch_in_seconds,
# # JWT expiration time (10 minute maximum)
# "exp": jwt_expiration,
# # GitHub App's identifier
# "iss": self.settings.gitlab_app_id,
# },
# self._private_key,
# algorithm="RS256",
# ),
# )
# return _token_cache[self.installation_id].jwt_token
async def get_access_token(self) -> str:
return "TxeACPEtQAeHP-1vsUw7"
token_data = _token_cache.get(self.installation_id)
now = datetime.utcnow().replace(tzinfo=timezone.utc)
if (
token_data is None
or token_data.access_data is None
or token_data.access_data.expires_at < (now - timedelta(minutes=2))
):
url = (
f"{GITHUB_API_URL}/app/installations/{self.installation_id}/access_tokens"
)
jwt_token = self._get_jwt_token()
async with aiohttp_client.post(
url,
headers={
"Accepts": "application/vnd.gitlab.v3+json",
"Authorization": f"Bearer {jwt_token}",
},
) as resp:
if resp.status != 201:
text = await resp.text()
raise APIException(
f"Could not authenticate with pem: {resp.status}: {text}"
)
data = await resp.json()
access_data = GitlabAccessData.parse_obj(data)
_token_cache[self.installation_id].access_data = access_data
return access_data.token
else:
return token_data.access_data.token
async def _prepare_request(
self,
*,
url: str,
method: str,
headers: Optional[Dict[str, str]] = None,
params: Optional[Dict[str, str]] = None,
json: Optional[Dict[str, Any]] = None,
):
func = getattr(aiohttp_client, method.lower())
headers = headers or {}
token = await self.get_access_token()
headers["Content-Type"] = "application/json"
headers["PRIVATE-TOKEN"] = token
return func(url, headers=headers, params=params or {}, json=json)
# async def validate(self) -> None:
# # Check the installation is correctly working
# return
# url = f"{GITHUB_API_URL}/app/installations/{self.installation_id}"
# jwt_token = self._get_jwt_token()
# async with aiohttp_client.get(
# url,
# headers={
# "Accepts": "application/vnd.gitlab.v3+json",
# "Authorization": f"Bearer {jwt_token}",
# },
# ) as resp:
# if resp.status != 200:
# text = await resp.text()
# raise AuthorizationException(
# f"Invalid request from configuration application: {resp.status}: {text}"
# )
# install = GitlabInstallation.parse_obj(await resp.json())
# missing_perms = []
# for name, lvl in self._required_permissions.items():
# install_lvl = install.permissions.get(name)
# if install_lvl is None or (
# install_lvl != lvl and install_lvl != Permissions.WRITE
# ):
# missing_perms.append((name, lvl))
# if len(missing_perms) > 0:
# raise InstallationException(
# f"Applicaiton missing required permissions: {missing_perms}"
# )
async def get_pulls(self, project_id: int, repo: str, commit_hash: str) -> List[Pull]:
url = f"{GITLAB_API_URL}//projects/{project_id}/repository/commits/{commit_hash}/merge_requests"
async with await self._prepare_request(
url=url,
method="get",
) as resp:
if resp.status == 422:
# no pulls found
return []
if resp.status == 401:
text = await resp.json()
raise AuthorizationException(f"API Unauthorized: {text}")
data = await resp.json()
pulls = []
for item in data:
gpull = GitlabPull.parse_obj(item)
pulls.append(
Pull(base=gpull.source_branch, head=gpull.target_branch, id=gpull.id)
)
return pulls
async def get_pull_diff(self, project_id: int, repo: str, id: int) -> str:
url = f"{GITLAB_API_URL}/projects/{project_id}/merge_requests/{id}/changes"
async with await self._prepare_request(
url=url,
method="get",
) as resp:
if resp.status == 401:
text = await resp.json()
raise AuthorizationException(f"API Unauthorized: {text}")
data = await resp.text(encoding="latin-1")
return data
# async def create_check(
# self,
# org: str,
# repo: str,
# commit: str,
# details_url: Optional[str] = None,
# ) -> str:
# url = f"{GITHUB_API_URL}/repos/{org}/{repo}/check-runs"
# async with await self._prepare_request(
# url=url,
# method="post",
# headers={"Accept": "application/vnd.gitlab.v3+json"},
# json={
# "head_sha": commit,
# "name": "coverage",
# "status": "in_progress",
# "details_url": details_url or self.settings.public_url,
# "output": {
# "title": "Open Coverage: Running",
# "summary": "Recording and checking coverage data",
# },
# },
# ) as resp:
# if resp.status != 201:
# text = await resp.text()
# raise AuthorizationException(
# f"Error creating check: {resp.status}: {text}"
# )
# check = GitlabCheck.parse_obj(await resp.json())
# return str(check.id)
# async def update_check(
# self,
# org: str,
# repo: str,
# check_id: str,
# running: bool = False,
# success: bool = False,
# text: Optional[str] = None,
# ) -> None:
# url = f"{GITHUB_API_URL}/repos/{org}/{repo}/check-runs/{check_id}"
# if success:
# conclusion = "success"
# else:
# conclusion = "failure"
# if running:
# status = "in_progress"
# else:
# status = "completed"
# if text is None:
# text = "Successful"
# async with await self._prepare_request(
# url=url,
# method="patch",
# headers={"Accept": "application/vnd.gitlab.v3+json"},
# json={
# "status": status,
# "conclusion": conclusion,
# "output": {
# "title": text,
# "summary": "Recording and checking coverage data",
# },
# },
# ) as resp:
# if resp.status != 200:
# text = await resp.text()
# raise APIException(f"Error update check: {resp.status}: {text}")
# async def create_comment(self, org: str, repo: str, pull_id: int, text: str) -> str:
# url = f"{GITHUB_API_URL}/repos/{org}/{repo}/issues/{pull_id}/comments"
# async with await self._prepare_request(
# url=url,
# method="post",
# headers={"Accept": "application/vnd.gitlab.v3+json"},
# json={"body": text},
# ) as resp:
# if resp.status != 201:
# text = await resp.text()
# raise APIException(f"Error update check: {resp.status}: {text}")
# ob = GitlabComment.parse_obj(await resp.json())
# return str(ob.id)
# async def update_comment(
# self, org: str, repo: str, comment_id: str, text: str
# ) -> None:
# url = f"{GITHUB_API_URL}/repos/{org}/{repo}/issues/comments/{comment_id}"
# async with await self._prepare_request(
# url=url,
# method="patch",
# headers={"Accept": "application/vnd.gitlab.v3+json"},
# json={"body": text},
# ) as resp:
# if resp.status != 200:
# text = await resp.text()
# raise APIException(f"Error update check: {resp.status}: {text}")
async def getCompareInfosFromGitlab(self, project_id, forward_commit, previos):
path = "projects/%s/repository/compare?from=%s&to=%s" % (project_id, forward_commit, previos)
return self.requestToGitlab(path)
async def getCommitInfoFromGitlab(self, project_id, commit):
path = "projects/%s/repository/commits/%s" % (project_id, commit)
return self.requestToGitlab(path)
async def getAllCommitsFromGitlab(self, project_id):
commits = []
page_id = 1
while page_id:
path = "projects/%s/repository/commits?all=true&per_page=50&page=%s" % (
project_id, page_id)
resp = self.requestToGitlab(path)
if resp and resp.status_code == 200:
commits.extend(json.loads(resp.text))
page_id = resp.headers["X-Next-Page"]
else:
break
return commits
async def requestToGitlab(self, path, params={}):
url = "%s%s" % (GITLAB_API_URL, path)
headers = {
"Content-Type": "application/json",
"PRIVATE-TOKEN": Authorization.gitlab_access_token
}
async with await requests.self._prepare_request((url, method="get",headers=headers, params=params) as resp:
# print resp
if resp and resp.status_code in [200, 201]:
self.request.logger.info(
"gitlab GET request %s send success with status code %s" %
(url, resp.status_code))
return resp
elif resp.status_code == 404:
self.request.logger.warning(
"gitlab GET request %s send fail with status code %s" %
(url, resp.status_code))
return resp
else:
self.request.logger.warning(
"gitlab GET request %s send fail with status code %s" %
(url, resp.status_code))
return None
async def file_exists(self, org: str, repo: str, commit: str, filename: str) -> bool:
url = f"{GITHUB_API_URL}/repos/{org}/{repo}/contents/{filename}"
async with await self._prepare_request(
url=url,
method="get",
params={"ref": commit},
) as resp:
if resp.status == 401:
text = await resp.json()
raise AuthorizationException(f"API Unauthorized: {text}")
if resp.status == 404:
return False
return True
async def download_file(
self, org: str, repo: str, commit: str, filename: str
) -> AsyncIterator[bytes]:
url = f"{GITHUB_API_URL}/repos/{org}/{repo}/contents/{filename}"
async with await self._prepare_request(
url=url,
method="get",
params={"ref": commit},
headers={"Accept": "application/vnd.gitlab.v3.raw"},
) as resp:
if resp.status == 401:
text = await resp.json()
raise AuthorizationException(f"API Unauthorized: {text}")
if resp.status == 404:
text = await resp.json()
raise NotFoundException(f"File not found: {text}")
while chunk := await resp.content.read(1024):
yield chunk
| 20,289 | 6,620 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Main dialog. """
from botbuilder.dialogs import (
ComponentDialog,
WaterfallDialog,
WaterfallStepContext,
DialogTurnResult,
)
from botbuilder.dialogs.prompts import TextPrompt, PromptOptions
from botbuilder.core import MessageFactory
from github_summary_bot import MySummaryBot
from bots import DialogAndWelcomeBot
from .summarize_dialog import SummarizeDialog
class MainDialog(ComponentDialog):
"""Main dialog. """
def __init__(self, configuration: dict, dialog_id: str = None):
super(MainDialog, self).__init__(dialog_id or MainDialog.__name__)
self._configuration = configuration
self.add_dialog(TextPrompt(TextPrompt.__name__))
self.add_dialog(SummarizeDialog())
self.add_dialog(
WaterfallDialog(
"WFDialog", [self.intro_step, self.act_step, self.final_step]
)
)
self.initial_dialog_id = "WFDialog"
self.sum_bot = MySummaryBot()
async def intro_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
"""Initial prompt."""
result = self.sum_bot.update_state_reply(step_context.context.activity.text)
if (result == ''):
return await step_context.context.send_activity(DialogAndWelcomeBot.create_welcome_response(step_context.context.activity))
else:
return await step_context.prompt(
TextPrompt.__name__,
PromptOptions(
prompt=MessageFactory.text(result)
),
)
async def act_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
# Run the SummarizeDialog, dialog will prompt to find out the remaining details.
return await step_context.begin_dialog(SummarizeDialog.__name__, self.sum_bot)
async def final_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
"""Complete dialog.
At this step, display the summary for each comment and summary of all comments
"""
# If the child dialog ("SummarizeDialog") was cancelled or the user failed
# to confirm, the Result here will be null.
if step_context.result is not None:
result = step_context.result
await step_context.context.send_activity(MessageFactory.text(result))
else:
await step_context.context.send_activity(MessageFactory.text("Thank you."))
return await step_context.end_dialog()
| 2,578 | 690 |
# Copyright (c) 2014, Matt Layman
import filecmp
import os
import shutil
import warnings
from pkg_resources import iter_entry_points
from handroll import logger
from handroll.i18n import _
class Composer(object):
"""Interface for all composers"""
def compose(self, catalog, source_file, out_dir):
"""Compose whatever appropriate output is generated by the composer.
:param catalog: the ``TemplateCatalog``
:param source_file: the filename of the source
:param out_dir: the directory to store output
"""
raise NotImplementedError
class Composers(object):
"""A collection of available composers"""
def __init__(self):
self._available_composers = {}
self._composers = {}
self.default_composer = CopyComposer()
# pkg_resources emits an annoying message related to security that is
# completely irritating for an average user to address. Filter it out.
#
# For the record, the warning is:
#
# pkg_resources.py:991: UserWarning: ~/.python-eggs is writable by
# group/others and vulnerable to attack when used with
# get_resource_filename. Consider a more secure location (set with
# .set_extraction_path or the PYTHON_EGG_CACHE environment variable).
#
# handroll assumes a level of trust in whatever is placed in the
# ``handroll.composers`` entry points.
warnings.filterwarnings('ignore', '.*get_resource_filename.*')
for entry_point in iter_entry_points('handroll.composers'):
cls = entry_point.load()
self._available_composers[entry_point.name] = cls
def select_composer_for(self, filename):
_, ext = os.path.splitext(filename)
return self._get_composer(ext)
def _get_composer(self, ext):
"""Get a composer. Lazy load composers for an extension so that an
individual composer only initializes when a file of its type is found.
"""
if ext not in self._composers:
if ext in self._available_composers:
self._composers[ext] = self._available_composers[ext]()
else:
self._composers[ext] = self.default_composer
return self._composers[ext]
class CopyComposer(Composer):
"""Copy a source file to the destination.
``CopyComposer`` is the default composer for any unrecognized file type.
The source file will be copied to the output directory unless there is a
file with an identical name and content already at the destination.
"""
def compose(self, catalog, source_file, out_dir):
"""Copy a file to the destination if the file does not exist or was
modified."""
filename = os.path.basename(source_file)
# Do not copy files that are already there unless different.
destination = os.path.join(out_dir, filename)
if os.path.exists(destination):
if filecmp.cmp(source_file, destination):
# Files are equal. Do nothing.
logger.debug(_('Skipping {filename} ... It is the same as '
'{destination}.').format(
filename=filename, destination=destination))
return
else:
logger.info(
_('{filename} differs from {destination} ...').format(
filename=filename, destination=destination))
logger.info(_('Copying {filename} to {out_dir} ...').format(
filename=filename, out_dir=out_dir))
shutil.copy(source_file, out_dir)
| 3,642 | 964 |
from django.test import TestCase, RequestFactory
from volunteer.views import *
from django.contrib.auth.models import User, Group
from django.urls import reverse
from rest_framework import status
"""
class BaseTestCase(TestCase):
def create_authenticated_volunteer_user(self):
user = User.objects.create_user(
username="testuser",
email="test@email.com",
first_name="testfirst",
last_name="testlast",
password="beautifulbutterfly125",
)
Group.objects.get_or_create(name="volunteer")
vol_group = Group.objects.get(name="volunteer")
vol_group.user_set.add(user)
return user
def create_authenticated_nonvolunteer_user(self):
user = User.objects.create_user(
username="otherstafftestuser",
email="othertest@email.com",
first_name="othertestfirst",
last_name="othertestlast",
password="beautifulbutterfly125",
)
Group.objects.get_or_create(name="staff")
staff_group = Group.objects.get(name="staff")
staff_group.user_set.add(user)
return user
class VolunteerViewsTest(BaseTestCase):
def test_volunteer(self):
request = RequestFactory().get(reverse("home-home"))
request.user = self.create_authenticated_nonvolunteer_user()
response = volunteer(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
request.user = self.create_authenticated_volunteer_user()
response = volunteer(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
"""
| 1,656 | 489 |
#!/usr/bin/env python3
#
# Copyright (C) 2017 Olaf Lessenich
import argparse
import csv
import os
import sys
import tempfile
import time
import signal
import statistics
import psutil
from plumbum import colors
from plumbum import local
from plumbum.cmd import grep
from plumbum.commands.processes import ProcessExecutionError
from xml.etree import ElementTree as ET
from subprocess import TimeoutExpired
GIT = local['git']
STRATEGY = '$$STRATEGY$$'
COLS = ['project', 'timestamp', 'merge', 'left', 'right', 'file', 'mergetype',
'strategies', 'target', 'cmd', 'loc_in']
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def get_merge_commits(before):
if before:
return GIT['rev-list', '--all', '--merges', '--reverse',
'--before', before]().splitlines()
else:
return GIT['rev-list', '--all', '--merges', '--reverse']().splitlines()
def get_jobs(target, strategies=None, jdimeopts=None, noop=False, statedir=None, commits=[]):
options = ["-o", target]
if strategies:
options.append("-m")
options.append(','.join(strategies))
if noop:
options.append("-n")
if jdimeopts:
options.append("-j")
options.append(jdimeopts)
if statedir:
options.append("-s")
options.append(statedir)
return csv.DictReader(iter(GIT['preparemerge', options, commits]()\
.splitlines()), delimiter=';', fieldnames=COLS)
def count_conflicts(merged_file):
conflicts = 0
try:
m1 = int(grep['-c', '-e', '^<<<<<<<', merged_file]().strip())
m2 = int(grep['-c', '-e', '^=======', merged_file]().strip())
m3 = int(grep['-c', '-e', '^>>>>>>>', merged_file]().strip())
conflicts = min(m1, m2, m3)
except ProcessExecutionError:
pass
return conflicts
def run(job, prune, writer, runs=1, srcfile=None, noop=False):
if noop:
writer = csv.DictWriter(sys.stdout, delimiter=';', fieldnames=COLS)
writer.writerow(job)
return
project = job['project']
timestamp = job['timestamp']
mergecommit = job['merge'][0:7]
left = job['left'][0:7]
right = job['right'][0:7]
file = job['file']
target = job['target']
mergetype = job['mergetype']
timeout = 1800
fail = False
if mergetype == "skipped":
writer.writerow([project,
timestamp,
mergecommit,
left,
right,
file,
mergetype,
job["cmd"],
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
job["loc_in"],
0,
jdimeversion])
return
if not srcfile or srcfile == file:
errorlog = os.path.join(target, 'error.log')
strategies = job['strategies'].split(',')
for strategy in strategies:
strategy = strategy.replace('+', ',')
scenario = '%s %s %s %s %s %s %s %s' % (project, timestamp,
mergecommit, left, right,
file, mergetype, strategy)
cmd = job['cmd'].replace(STRATEGY, strategy).split(' ')
exe = cmd[0]
args = cmd[1:]
outfile = args[7]
runtimes = []
for i in range(runs):
if os.path.exists(outfile):
os.remove(outfile)
t0 = time.time()
# ret, stdout, stderr = local[exe][args].run(retcode=None)
p = local[exe][args].popen()
try:
stdout, stderr = p.communicate(timeout=timeout)
ret = p.returncode
t1 = time.time()
runtimes.append(t1 - t0)
except TimeoutExpired:
kill(p.pid)
stdout = ''
stderr = ('Timeouted after %d seconds.\r\n' % (timeout)).encode("utf-8")
ret = -5
t1 = time.time()
runtimes.append(t1 - t0)
break
runtime = statistics.median(runtimes)
if ret >= 0 and ret <= 127:
tree = ET.fromstring(stdout)
conflicts = int(tree.find("./mergescenariostatistics/conflicts").text)
clines = int(tree.find('./mergescenariostatistics/lineStatistics').attrib['numOccurInConflict'])
ctokens = int(tree.find('./mergescenariostatistics/tokenStatistics').attrib['numOccurInConflict'])
parsed_conflicts = count_conflicts(outfile)
loc_out = int(local['wc']['-l', outfile]().split(' ')[0])
xmlruntimes={'merge': None,
'parse': None,
'semistructure': None,
'LinebasedStrategy': None,
'SemiStructuredStrategy': None,
'StructuredStrategy': None}
for e in tree.findall("./mergescenariostatistics/runtime"):
for label in xmlruntimes:
if label == e.attrib['label']:
xmlruntimes[label] = int(e.attrib['timeMS'])
if not writer:
print('%s: ' % scenario, end='')
if conflicts > 0:
print(colors.cyan | ('OK (%d conflicts)' % conflicts))
else:
print(colors.green | 'OK')
else:
writer.writerow([project,
timestamp,
mergecommit,
left,
right,
file,
mergetype,
strategy,
conflicts,
clines,
ctokens,
parsed_conflicts,
runtime,
xmlruntimes['merge'],
xmlruntimes['parse'],
xmlruntimes['semistructure'],
xmlruntimes['LinebasedStrategy'],
xmlruntimes['SemiStructuredStrategy'],
xmlruntimes['StructuredStrategy'],
job["loc_in"],
loc_out,
jdimeversion])
else:
fail = True
if not writer:
print('%s: ' % scenario, end='', file=sys.stderr)
print(colors.red | ('FAILED (%d)' % ret), file=sys.stderr)
else:
writer.writerow([project,
timestamp,
mergecommit,
left,
right,
file,
'FAILED (' + str(ret) + ')',
strategy,
'',
'',
'',
'',
runtime,
'',
'',
'',
'',
'',
'',
job["loc_in"],
'',
jdimeversion])
with open(errorlog, 'a') as err:
err.write(80 * '=' + '\r\n')
err.write(scenario + '\r\n')
err.write('> %s\r\n' % ' '.join(cmd))
err.write(80 * '-' + '\r\n')
err.writelines(stderr.decode("utf-8"))
err.write(80 * '-' + '\r\n')
if prune and not fail:
for root, dirs, files in os.walk(target, topdown=False):
for f in files:
path = os.path.join(root, f)
if path.endswith(file):
os.remove(path)
if not os.listdir(root):
os.rmdir(root)
def write_state(project, commit, strategies, statedir):
if statedir:
statefile = os.path.join(statedir, project)
if os.path.exists(statefile):
with open(statefile, 'r') as f:
for done in csv.DictReader(f, delimiter=';', fieldnames=['project',
'commit',
'strategy']):
if project == done['project'] and commit == done['commit']:
if done['strategy'] in strategies:
strategies.remove(done['strategy'])
if len(strategies) == 0:
return
with open(statefile, 'a') as f:
statewriter = csv.writer(f, delimiter=';')
for strategy in strategies:
statewriter.writerow([project,
commit,
strategy])
def main():
global jdimeversion
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output',
help='Store output in this directory',
type=str)
parser.add_argument('-m', '--modes',
help='Strategies to be prepared, separated by comma',
type=str,
default='structured,linebased')
parser.add_argument('-j', '--jdimeopts',
help='Additional options to pass to jdime',
type=str)
parser.add_argument('-f', '--file',
help='Merge only specified file',
type=str)
parser.add_argument('-p', '--prune',
help='Prune successfully merged scenarios',
action="store_true")
parser.add_argument('-c', '--csv',
help='Print in csv format',
action="store_true")
parser.add_argument('-H', '--header',
help='Include csv header',
action="store_true")
parser.add_argument('-n', '--noop',
help='Do not actually run',
action="store_true")
parser.add_argument('-s', '--statedir',
help='Use state files to skip completed tasks',
type=str)
parser.add_argument('-b', '--before',
help='Use only commits before <date>',
type=str)
parser.add_argument('-r', '--runs',
help='Run task this many times (e.g., for benchmarks)',
type=int,
default=1)
parser.add_argument('-t', '--tag',
help='Append this tag to each line',
type=str)
parser.add_argument('commits', default=[], nargs='+')
args = parser.parse_args()
strategies = args.modes.split(',')
writer = None
if args.csv:
writer = csv.writer(sys.stdout, delimiter=';')
if args.header:
outputcols = ['project',
'timestamp',
'mergecommit',
'left',
'right',
'file',
'mergetype',
'strategy',
'conflicts',
'clines',
'ctokens',
'parsed_conflicts',
'runtime',
't_merge',
't_parse',
't_semistructure',
't_LinebasedStrategy',
't_SemiStructuredStrategy',
't_StructuredStrategy',
'loc_in',
'loc_out',
'jdimeversion']
writer.writerow(outputcols)
if args.output:
target = args.output
else:
target = tempfile.mkdtemp(prefix="jdime.")
if args.statedir:
if not os.path.exists(args.statedir):
os.makedirs(args.statedir)
if args.tag:
jdimeversion = args.tag
else:
jdimeversion = local['jdime']['-v']().strip()
if args.runs > 1:
jdimeversion += " runs:" + str(args.runs)
# make sure this doesn't interfere with our csv delimiter
jdimeversion.replace(';', ',')
project = os.path.basename(os.getcwd())
commits = args.commits
if len(commits) == 1 and commits[0] == 'all':
for commit in get_merge_commits(args.before):
for job in get_jobs(target, strategies, args.jdimeopts, args.noop, args.statedir, [commit,]):
run(job, args.prune, writer, args.runs, args.file, args.noop)
write_state(project, commit, strategies.copy(), args.statedir)
else:
for job in get_jobs(target, strategies, args.jdimeopts, args.noop, args.statedir, commits):
run(job, args.prune, writer, args.runs, args.file, args.noop)
for commit in commits:
write_state(project, commit, strategies.copy(), args.statedir)
if args.prune and os.path.exists(target) and not os.listdir(target):
os.rmdir(target)
elif not args.csv:
print()
if args.prune:
stored = 'Erroneous'
else:
stored = 'All'
print('%s merge scenarios have been stored to %s' % (stored, target))
if __name__ == "__main__":
main()
| 14,791 | 3,826 |
from wagtail import VERSION as WAGTAIL_VERSION
if WAGTAIL_VERSION >= (2, 0):
from wagtail.core.signals import page_published
from wagtail.contrib.redirects.models import Redirect
if WAGTAIL_VERSION >= (2, 10):
from wagtail.core.signals import post_page_move
else:
post_page_move = None
else:
from wagtail.wagtailcore.signals import page_published
from wagtail.wagtailredirects.models import Redirect
# Create redirect from old slug to new if slug changed in published page.
# Redirect will be created for Page and all it's children.
# It will not work when page moved in the site tree.
def create_redirect_object_if_slug_changed(sender, **kwargs):
instance = kwargs['instance']
# The main part is getting the old URL from which the redirect is required.
# Wagtail keeps the record of every page change in terms of revisions.
# This will help to keep track of every change made to page including
# page slug. The next part is determining the revision is for draft or
# published page. For example, an admin user start editing the page
# (with slug /original) change Url (/original-changed) and save as draft.
# On next edit, user again change the URL to something else
# (/original-desired) and then publish the page. So, in this case, redirect
# should be created from /original to /original-desired. Page object that
# has has_unpublished_changes value True, is draft revision. Interestingly
# when admin user edit a page, user is editing the page object created from
# JSON and value is stored as JSON in revision.
page_revisions = instance.revisions.order_by('-created_at', '-id')
for revision in page_revisions:
page_obj = revision.page.specific_class.from_json(
revision.content_json)
# The first revision's page object that has has_published_changes
# value False is the last published Page.
if not page_obj.has_unpublished_changes:
# Only create redirect if slug change
if instance.url != page_obj.url:
old_path = Redirect.normalise_path(page_obj.url)
Redirect.objects.update_or_create(
old_path=old_path,
defaults={
'redirect_page': instance
}
)
# Also create redirect objects for children of this Page
create_redirect_objects_for_children(old_path, page_obj)
break
def create_redirect_object_after_page_move(sender, **kwargs):
if kwargs['url_path_before'] == kwargs['url_path_after']:
return
page_after = kwargs['instance']
parent_page_before_url = kwargs['parent_page_before'].get_url()
page_before_url = Redirect.normalise_path(
parent_page_before_url + page_after.slug
)
Redirect.objects.update_or_create(
old_path=page_before_url,
defaults={
'redirect_page': page_after
}
)
create_redirect_objects_for_children(page_before_url, page_after)
# Register receivers
def register_signal_handlers():
page_published.connect(create_redirect_object_if_slug_changed)
if post_page_move is not None:
post_page_move.connect(create_redirect_object_after_page_move)
def create_redirect_objects_for_children(parent_old_slug, parent):
if not parent.get_children():
return
else:
for child_page in parent.get_children():
old_path = Redirect.normalise_path(
parent_old_slug + '/' + child_page.slug)
Redirect.objects.update_or_create(
old_path=old_path,
defaults={
'redirect_page': child_page
}
)
create_redirect_objects_for_children(old_path, child_page)
| 3,863 | 1,085 |
import os
import glob
import pprint
from pathlib import Path
# Setup
pp = pprint.PrettyPrinter(indent=4)
# All variable
DOWNLOAD_FOLDER_PATH = os.path.join( # add trailing slash
os.getenv('DOWNLOAD_FOLDER_PATH', f'{Path.home()}/Downloads/') # get from env
)
def extract_file_data(filepath):
_, file_extension = os.path.splitext(filepath)
return {
'ext': file_extension,
'last_modified': os.path.getmtime(filepath),
'filepath': filepath
}
def ext_mapping(list_of_files):
return set( file_object.get('ext').lower() for file_object in list_of_files)
def file_grouping(list_of_ext, list_of_files):
ext_counter = {}
ext_group = {}
for ext in list_of_ext:
list_filter = tuple(filter(lambda files: files.get('ext') == ext, list_of_files))
ext_group[ext] = list_filter
ext_counter[ext] = len(list_filter)
return ext_counter, ext_group
if __name__ == "__main__":
filepaths = glob.glob(DOWNLOAD_FOLDER_PATH + '*')
filepaths.sort(key=os.path.getmtime) # sort by time modified, unix time, asc (last is newest)
files = tuple(map(extract_file_data, filepaths))
extensions = ext_mapping(files)
extension_counter, file_group = file_grouping(extensions, files)
pp.pprint(extension_counter)
# Sample, get all .zip files
pp.pprint(file_group.get('.zip'))
| 1,377 | 481 |
# Copyright 2020-present, Netherlands Institute for Sound and Vision (Nanne van Noord)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pika
import sys
import json
import threading
from time import sleep
import functools
import logging
MAX_RETRY = 8
RETRY_INTERVAL = 2 # seconds
logger = logging.getLogger('DANE')
class RabbitMQHandler():
def __init__(self, config):
self.config = config
self.callback = None
self.retry = 0
self.connect()
def connect(self):
if not hasattr(self, 'connection') or \
not self.connection or self.connection.is_closed:
credentials = pika.PlainCredentials(
self.config.RABBITMQ.USER,
self.config.RABBITMQ.PASSWORD)
try:
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(
credentials=credentials,
host=self.config.RABBITMQ.HOST,
port=self.config.RABBITMQ.PORT))
except (pika.exceptions.AMQPConnectionError,
pika.exceptions.ConnectionClosedByBroker) as e:
self.retry += 1
if self.retry <= MAX_RETRY:
nap_time = RETRY_INTERVAL ** self.retry
logger.warning('RabbitMQ Connection Failed. '\
'RETRYING in {} seconds'.format(nap_time))
sleep(nap_time)
self.connect()
else:
logger.critical(
'RabbitMQ connection failed, no retries left')
raise e from None
else:
self.retry = 0
self.channel = self.connection.channel()
self.pub_channel = self.connection.channel()
self.pub_channel.confirm_delivery()
self.channel.exchange_declare(
exchange=self.config.RABBITMQ.EXCHANGE,
exchange_type='topic')
self.channel.queue_declare(
queue=self.config.RABBITMQ.RESPONSE_QUEUE,
durable=True)
def run(self):
raise NotImplementedError('Run should be implemented server-side')
def stop(self):
raise NotImplementedError('Stop should be implemented server-side')
def assign_callback(self, callback):
raise NotImplementedError('assign_callback should be implemented server-side')
def publish(self, routing_key, task, document, retry=False):
try:
self.pub_channel.basic_publish(
exchange=self.config.RABBITMQ.EXCHANGE,
routing_key=routing_key,
properties=pika.BasicProperties(
reply_to=self.config.RABBITMQ.RESPONSE_QUEUE,
correlation_id=str(task._id),
priority=int(task.priority),
delivery_mode=2
),
mandatory=True,
body=json.dumps({
# flipflop between json and object is intentional
# but maybe not most elegant way..
'task': json.loads(task.to_json()),
'document': json.loads(document.to_json())
}))
except pika.exceptions.ChannelWrongStateError as e:
if not retry: # retry once
logger.exception('Publish error')
self.connect()
self.publish(routing_key, task, document, retry=True)
else:
raise e
except Exception as e:
raise e
| 4,295 | 1,124 |
""""
The goal of this module is to implement all the visualization
tools needed to graph the data and results of the computations
for the Task 3 from the coding homeworks in the Machine Learning
course on coursera.com.
"""
import numpy as np
import matplotlib.pyplot as plt
def display_random_grid(x: np.ndarray, n: int = 20, indices: np.ndarray = None) -> None:
""""
Display a grid with n digits on it. If no indices are specified,
a grid of n random digits is displayed.
Args:
x:
An array containing 5000 images. Each image is a row. Each image contains 400 pixels (20x20).
n:
Number of digits to be displayed.
indices:
The indices of the digits in matrix x.
Returns:
None
"""
if indices is None:
indices = np.random.choice(x.shape[0], n)
plt.figure(figsize=(6, 6))
image = x[indices, 1:].reshape(-1, n).T
plt.imshow(image)
plt.axis("off")
| 947 | 300 |
from datetime import datetime
from pydantic import BaseModel
class BaseObject(BaseModel):
class Config:
orm_mode = True
class Profile(BaseObject):
name: str
class Ship(BaseObject):
name: str
class Cruise(BaseObject):
name: str
class IdentifiedObject(BaseObject):
id: int
class Project(IdentifiedObject):
name: str
class EcoPartProject(Project):
...
class EcoTaxaProject(Project):
...
class Sample(IdentifiedObject):
lat: float
lng: float
datetime: datetime
class SampleDetails(Sample):
visibility: str
ecopart_project: EcoPartProject
ecotaxa_project: EcoTaxaProject
profile: Profile
ship: Ship
cruise: Cruise
| 708 | 231 |
import argparse
import os
import cv2
import tqdm
def convert(fn):
# given a file name, convert it into binary and store at the same position
img = cv2.imread(fn)
gim = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gim = cv2.adaptiveThreshold(gim, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 45, 11)
g3im = cv2.cvtColor(gim, cv2.COLOR_GRAY2BGR)
cv2.imwrite(fn, g3im)
if __name__ == '__main__':
"""
Now only feasible for trackA_XX
"""
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default="../datasets/icdar2019/at_trackA_archival")
args = parser.parse_args()
for fdname in os.listdir(args.root_dir):
if fdname.endswith(".json"):
continue
ffdname = os.path.join(args.root_dir, fdname)
for file in tqdm.tqdm(os.listdir(ffdname)):
if file.endswith(".xml"):
continue
ffile = os.path.join(ffdname, file)
convert(ffile)
| 986 | 386 |
message = """\n\nThere are 3 types of numbers in python-3
Now we will see python-3 numbers,
namely: int, float & complex. (there is no need for long)
ints are whole numbers and floats are real numbers.
where complex has combination of real & imaginary numbers.
print() requires parenthesis '()'
"""
print(message)
a = 10
print(a)
print ("\nYou can also use type(var) to know the type of variable")
print(type(a))
b = 43.12
print(b)
print(type(b))
another_comment = """\nNotice that int and flot here are defined as classes.
There are no size limits in python 3,
Provided that your computer is able to handle that kind of huge number..
Complex numbers are usually represented as z = 46 + 5i
But python uses letter 'j' instead as z = 46 + 5j
"""
print(another_comment)
z = 46 + 5j
print(z)
print(type(z))
print("\nPrint real part only via z.real:\n")
print(z.real)
print("\nPrint imaginary part only via z.imag:\n")
print(z.imag)
| 1,233 | 355 |
import angr
class setvbuf(angr.SimProcedure):
def run(self, stream, buf, type_, size):
return 0
| 109 | 39 |
import random
from collections import deque
import pygame
import constants
import shared
from monster import Monster
from .modeintroduction0 import ModeIntroduction0
from .modeopening import ModeOpening
class ModeOpening3(ModeOpening):
GROUND_LEVEL = constants.SCREEN_SIZE[1] - 8
CENTER_TIME = 2500
TRANSITION_TIME = 750
EMPTY_TIME = 250
FULL_MONSTER_WAIT_TIME = EMPTY_TIME + TRANSITION_TIME + CENTER_TIME + TRANSITION_TIME
__slots__ = (
'monsters',
'wait_time',
'last_level',
'background',
'initial_wait_time',
)
def __init__(self):
super().__init__()
# static elements setup
self.background = pygame.Surface(constants.SCREEN_SIZE).convert(shared.display.screen)
self.background.fill(constants.WHITE)
shared.font_wrap.renderToCentered(
self.background,
(constants.SCREEN_SIZE[0] // 2, constants.SCREEN_SIZE[1] // 2 + 4),
"press any key to start",
False,
constants.BLACK
)
logo = pygame.image.load(constants.CHIKKAI_LOGO).convert(shared.display.screen)
self.background.blit(
logo,
(
constants.SCREEN_SIZE[0] // 2 - logo.get_width() // 2,
constants.SCREEN_SIZE[1] // 4 - logo.get_height() // 2,
)
)
# monster loop setup
self.last_level = 3
self.monsters = deque((), 3)
monster = self._getMonster(0, 3)
# start the first one in the center
monster.rect.midbottom = (constants.SCREEN_SIZE[0] // 2, self.GROUND_LEVEL)
monster.anims.popleft()
monster.anims.popleft()
self.monsters.append(monster)
self.wait_time = self.CENTER_TIME + self.TRANSITION_TIME
self.monsters.append(self._getMonster(self.wait_time))
self.wait_time += self.FULL_MONSTER_WAIT_TIME
self.monsters.append(self._getMonster(self.wait_time))
self.wait_time += self.FULL_MONSTER_WAIT_TIME
self.initial_wait_time = self.wait_time
def _getMonster(self, wait_time, level=None):
# wait_time is how much time until the previous mon is off the screen
if level is None:
level = random.choice(
[i for i in range(1, 4) if i != self.last_level]
)
monster = Monster.atLevel(level)
self.last_level = level
self.all_sprites.add(monster)
monster.rect.midbottom = (
constants.SCREEN_SIZE[0] + monster.rect.width // 2,
self.GROUND_LEVEL
)
monster.addWait(wait_time + self.EMPTY_TIME)
monster.addPosAbs(
Monster.Lerp,
self.TRANSITION_TIME,
constants.SCREEN_SIZE[0] // 2,
self.GROUND_LEVEL - monster.rect.height // 2
)
monster.addWait(self.CENTER_TIME)
monster.addPosAbs(
Monster.Lerp,
self.TRANSITION_TIME,
monster.rect.width // -2,
self.GROUND_LEVEL - monster.rect.height // 2
)
return monster
def _switchMode(self):
self.next_mode = ModeIntroduction0()
def _update(self, dt):
self.wait_time -= dt
# every so often, set up additional looping monsters here, so we don't run out
if self.wait_time < self.initial_wait_time - self.FULL_MONSTER_WAIT_TIME:
monster = self._getMonster(self.wait_time)
self.monsters[0].kill()
self.monsters.append(monster)
self.wait_time += self.FULL_MONSTER_WAIT_TIME
def _drawScreen(self, screen):
screen.blit(self.background, (0, 0))
| 3,685 | 1,223 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 fileencoding=utf-8
# coding: utf-8
from __future__ import unicode_literals, print_function, division, absolute_import
import json
from django.conf import settings
from django.core.management import BaseCommand
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from django.utils.translation import ugettext_lazy
from onadata.apps.logger.models import Instance
class Command(BaseCommand):
help = ugettext_lazy("Fixes deleted instances by syncing "
"deleted items from mongo.")
def handle(self, *args, **kwargs):
# Reset all sql deletes to None
Instance.objects.exclude(
deleted_at=None, xform__downloadable=True).update(deleted_at=None)
# Get all mongo deletes
query = '{"$and": [{"_deleted_at": {"$exists": true}}, ' \
'{"_deleted_at": {"$ne": null}}]}'
query = json.loads(query)
xform_instances = settings.MONGO_DB.instances
cursor = xform_instances.find(query)
for record in cursor:
# update sql instance with deleted_at datetime from mongo
try:
i = Instance.objects.get(
uuid=record["_uuid"], xform__downloadable=True)
except Instance.DoesNotExist:
continue
else:
deleted_at = parse_datetime(record["_deleted_at"])
if not timezone.is_aware(deleted_at):
deleted_at = timezone.make_aware(
deleted_at, timezone.utc)
i.set_deleted(deleted_at)
| 1,651 | 467 |
from douyinspider.structures.hot import *
from douyinspider.structures.base import Base
from douyinspider.structures.music import Music
from douyinspider.structures.user import User
from douyinspider.structures.video import Video
from douyinspider.structures.address import Address
from douyinspider.structures.topic import Topic
from douyinspider.structures.word import Word
from douyinspider.structures.music_collection import MusicCollection
| 445 | 131 |
def convertBack(x, y, w, h):
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
def cvDrawBoxes(detections, img):
# Colored labels dictionary
color_dict = {
'Tin can' : [0, 255, 255], 'Bottle': [238, 123, 158]
}
for label, confidence, bbox in detections:
x, y, w, h = (bbox[0],
bbox[1],
bbox[2],
bbox[3])
name_tag = label
for name_key, color_val in color_dict.items():
if name_key == name_tag:
color = color_val
xmin, ymin, xmax, ymax = convertBack(
float(x), float(y), float(w), float(h))
pt1 = (xmin, ymin)
pt2 = (xmax, ymax)
cv2.rectangle(img, pt1, pt2, color, 1)
cv2.putText(img,
name_tag +
" [" + confidence + "]",
(pt1[0], pt1[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
color, 2)
return img | 1,149 | 425 |
import argparse
import os
from datetime import datetime
from attngan import attngan
from cyclegan import cyclegan
def parse_args():
parser = argparse.ArgumentParser(description="Generate art from text")
parser.add_argument("caption", help="text to generate from", type=str, metavar="\"caption\"")
parser.add_argument("style", help="the style of the artwork", choices=["abstract_expressionism", "impressionism"], type=str)
parser.add_argument("-d", "--dataset", dest="dataset", help="dataset to generate from (default birds)", choices=["birds", "coco"], default="birds", type=str)
parser.add_argument("-n", "--number", dest="number", help="the number of artworks to generate (default 2)", default=2, type=int)
parser.add_argument("-i", "--identity", dest="identity", help="set lambda_idt = 5 instead of lambda_idt = 0.5", action="store_true")
parser.add_argument("-c", "--cpu", dest="cpu", help="use cpu", action="store_true")
parser.add_argument("-v", "--verbose", dest="verb", help="print more details", action="store_true")
args = parser.parse_args()
return args
args = parse_args()
savepath = os.path.join("../results/", datetime.today().strftime("%Y-%m-%d_%H-%M-%S/"))
print("--------------- Generating images ---------------")
attngan(args.caption, args.dataset, args.number, savepath, args.cpu, args.verb)
print("---------------------- End ----------------------\n")
print("----------------- Applying style -----------------")
cyclegan(savepath, args.style, args.dataset, args.identity, args.cpu, args.verb)
print("---------------------- End ----------------------")
with open(savepath + "text", "w") as f:
f.write(args.caption + "\n")
f.write(args.style)
| 1,717 | 524 |
#
# config.py
#
SERVER_HOSTNAME = "eecs325.local"
SERVER_PORT = 80
SERVER_MAX_CONN_QUEUE = 5 # Number of connection requests to queue.
BUFFER_SIZE = 4096
MESSAGE_ENCODING = "utf8"
| 182 | 82 |
from flask import Flask
app = Flask(__name__)
app.config['DEBUG'] = True
from app import models, views
| 115 | 43 |
from django.db import models
class Thumbs(models.Model):
"""
Class for handling thumbs up/thumbs down voting
"""
# Who is voting? Null for anonymous users.
person = models.ForeignKey('scipy_central.Person', null=True, blank=True)
# submission: if voting for a submission, otherwise Null.
submission = models.ForeignKey('scipy_central.Submission', null=True,
blank=True)
# Which comment is being voted on. Can be null.
comment = models.ForeignKey('scipy_central.Comment', null=True,
blank=True)
# When the vote was cast
date_time = models.DateField(auto_now=True)
# IP_address: for abuse prevention
ip_address = models.IPAddressField()
# user_agent: web browser's user agent: for abuse prevention
user_agent = models.CharField(max_length=255)
# vote: ``True`` is thumbs up and ``False`` is thumbs down
vote = models.BooleanField()
| 973 | 281 |
# This file is part of ssh-import-id. See LICENSE file for more info.
_LAST_RELEASE = "5.10"
_PACKAGED_VERSION = '5.10-1'
VERSION = _LAST_RELEASE
if not _PACKAGED_VERSION.startswith("@@"):
VERSION = _PACKAGED_VERSION
# vi: ts=4 expandtab syntax=python
| 260 | 107 |
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
SCALE_DIAG_MIN_MAX = (-20, 2)
EPS = 1e-6
def apply_squashing_func(sample, logp):
"""
Squash the ouput of the gaussian distribution and account for that in the log probability.
:param sample: (tf.Tensor) Action sampled from Gaussian distribution
:param logp: (tf.Tensor) Log probability before squashing
"""
# Squash the output
squashed_action = tf.tanh(sample)
squashed_action_logp = logp - tf.reduce_sum(tf.log(1 - squashed_action ** 2 + 1e-6), axis=1)
# incurred by change of variable
return squashed_action, squashed_action_logp
class OnlineReplayBuffer:
def __init__(self, state_dim, action_dim, buffer_size):
self.buffer_size = buffer_size
self.obs = np.zeros([buffer_size, state_dim])
self.action = np.zeros([buffer_size, action_dim])
self.reward = np.zeros([buffer_size, 1])
self.next_obs = np.zeros([buffer_size, state_dim])
self.done = np.zeros([buffer_size, 1])
self._pointer = 0
self.size = 0
self.buffer = [self.obs, self.action, self.reward, self.next_obs, self.done]
def add_samples(self, *samples):
num_samples = len(samples[0])
index = np.arange(self._pointer, self._pointer + num_samples) % self.buffer_size
for buf, new_samples in zip(self.buffer, samples):
assert len(new_samples) == num_samples
buf[index] = new_samples
self._pointer = (self._pointer + num_samples) % self.buffer_size
self.size = min(self.size + num_samples, self.buffer_size)
def add_sample(self, *sample):
none_sample = [np.array(each)[None] for each in sample]
self.add_samples(*none_sample)
def can_sample(self, batch_size):
return self.size >= batch_size
def sample(self, batch_size):
indices = np.random.randint(0, self.size, size=batch_size)
return [each[indices] for each in self.buffer]
def sample_obs(self, batch_size):
indices = np.random.randint(0, self.size, size=batch_size)
return self.obs[indices]
def format_for_model_training(self):
obs, action, next_obs, reward = self.obs[:self.size], \
self.action[:self.size], self.next_obs[:self.size], self.reward[:self.size]
inputs = np.concatenate([obs, action], axis=-1)
targets = np.concatenate([reward, next_obs - obs], axis=-1)
return inputs, targets
# Simple replay buffer
class OfflineReplayBuffer:
def __init__(self, obs, action, reward, next_obs, done):
self.obs, self.action, self.reward, self.next_obs, self.done \
= obs, action, reward, next_obs, done
self.obs_mean = np.mean(self.obs, axis=0, keepdims=True)
self.obs_std = np.std(self.obs, axis=0, keepdims=True) + 1e-3
self.stan_obs = self.standardizer(np.array(self.obs))
self.stan_next_obs = self.standardizer(np.array(self.next_obs))
def standardizer(self, obs):
return (obs - self.obs_mean) / self.obs_std
def unstandardizer(self, obs):
return obs * self.obs_std + self.obs_mean
def format_for_model_training(self):
inputs = np.concatenate([self.stan_obs, self.action], axis=-1)
delta_obs = self.stan_next_obs - self.stan_obs
targets = np.concatenate([np.array(self.reward)[:, None], delta_obs], axis=-1)
terminals = np.reshape(np.array(self.done), [-1, 1])
return inputs, targets, terminals
def sample(self, batch_size):
obs, action, reward, next_obs, done = [], [], [], [], []
indices = np.random.randint(0, len(self.obs), size=batch_size)
for idx in indices:
obs.append(self.obs[idx])
action.append(self.action[idx])
reward.append(self.reward[idx])
next_obs.append(self.next_obs[idx])
done.append(self.done[idx])
obs, next_obs, action = np.array(obs), np.array(next_obs), np.array(action)
obs, next_obs = self.standardizer(obs), self.standardizer(next_obs)
return obs, action, np.array(reward)[:, None], next_obs, np.array(done)[:, None]
def sample_obs(self, batch_size):
indices = np.random.randint(0, len(self.obs), size=batch_size)
obs = [self.obs[idx] for idx in indices]
return self.standardizer(np.array(obs))
class SquahedGaussianActor(tf.keras.layers.Layer):
def __init__(self, action_dim, hidden_dim=256):
super(SquahedGaussianActor, self).__init__()
self.action_dim = action_dim
# Actor parameters
self.a_l0 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='a/f0')
self.a_l1 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='a/f1')
self.a_l2_mu = tf.keras.layers.Dense(action_dim, name='a/f2_mu')
self.a_l2_log_std = tf.keras.layers.Dense(action_dim, name='a/f2_log_std')
def feedforward(self, obs):
h = self.a_l0(obs)
h = self.a_l1(h)
mean = self.a_l2_mu(h)
log_std = self.a_l2_log_std(h)
std = tf.exp(tf.clip_by_value(log_std, *SCALE_DIAG_MIN_MAX))
return mean, std
def call(self, inputs, **_):
obs, = inputs
mean, std = self.feedforward(obs)
dist = tfp.distributions.MultivariateNormalDiag(mean, std)
dist.shape = mean.shape
sampled_action = dist.sample()
sampled_action_logp = dist.log_prob(sampled_action)
squahsed_action, squahsed_action_logp = \
apply_squashing_func(sampled_action, sampled_action_logp)
deterministic_action, _ = \
apply_squashing_func(mean, dist.log_prob(mean))
return deterministic_action, squahsed_action, squahsed_action_logp, dist
def nlogp(self, dist, action):
''' negative logp of unnormalized action '''
before_squahed_action = tf.atanh(
tf.clip_by_value(action, -1 + EPS, 1 - EPS))
log_likelihood = dist.log_prob(before_squahed_action)
log_likelihood -= tf.reduce_sum(
tf.log(1 - action ** 2 + EPS), axis=1)
return -tf.reduce_mean(log_likelihood)
class VNetwork(tf.keras.layers.Layer):
def __init__(self, output_dim=1, hidden_dim=64):
super(VNetwork, self).__init__()
self.v_l0 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='v/f0')
self.v_l1 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='v/f1')
self.v_l2 = tf.keras.layers.Dense(output_dim, name='v/f2')
def call(self, inputs, **_):
obs, = inputs
h = self.v_l0(obs)
h = self.v_l1(h)
return self.v_l2(h)
class QNetwork(tf.keras.layers.Layer):
def __init__(self, output_dim=1, num_critics=2, hidden_dim=64):
super(QNetwork, self).__init__()
self.num_critics = num_critics
self.qs_l0, self.qs_l1, self.qs_l2 = [], [], []
for i in range(self.num_critics):
self.qs_l0.append(tf.keras.layers.Dense(hidden_dim, activation='relu', name=f'q{i}/f0'))
self.qs_l1.append(tf.keras.layers.Dense(hidden_dim, activation='relu', name=f'q{i}/f1'))
self.qs_l2.append(tf.keras.layers.Dense(output_dim, name=f'q{i}/f2'))
def call(self, inputs, **_):
obs, action = inputs
obs_action = tf.concat([obs, action], axis=1)
outputs = []
for i in range(self.num_critics):
h = self.qs_l0[i](obs_action)
h = self.qs_l1[i](h)
outputs.append(self.qs_l2[i](h))
return outputs
| 7,590 | 2,673 |
'''
Import data saved with XIO-sensors
'''
'''
Author: Thomas Haslwanter
Version: 0.2
Date: May-2016
'''
import os
import pandas as pd
def read_ratefile(reg_file):
'''Read send-rates from an XIO sensor.
"Disabled" channels have the "rate" set to "None".
Parameters
----------
in_file : string
Has to be the "Registers"-file.
Returns
-------
rates: directory
Contains the send-rates for the different "params".
'''
params = ['Sensor',
'DateTime',
'BatteryAndThermometer',
'InertialAndMagnetic',
'Quaternion'
]
rates = {}
# The the file content
with open(reg_file, 'r') as in_file:
lines = in_file.readlines()
# Get the send rates
for param in params:
for line in lines:
if line.find(param) > 0:
rate_flag = int(line.split(',')[2])
if rate_flag:
'''
0 ... 1 Hz
1 ... 2 Hz
10 ... 512 Hz
'''
rates[param] = 2 ** (rate_flag-1)
else:
# Disabled
rates[param] = None
return rates
def read_datafile(in_file):
'''Read data from an XIO "CalInertialAndMag"-file.
Parameters
----------
in_file : string
Has to be the name of the "CalInertialAndMag"-file.
Returns
-------
out_list: list
Contains the following parameters:
- acceleration
- angular_velocity
- mag_field_direction
- packet_nr
'''
data = pd.read_csv(in_file)
out_list = []
# Extract the columns that you want, by name
param_list=['Acc', 'Gyr', 'Mag', 'Packet']
for Expression in param_list:
out_list.append(data.filter(regex=Expression).values)
return out_list
def get_data(in_selection):
'''Get the sampling rates, as well as the recorded data.
Parameters
----------
in_selection : string
Directory containing all the data-files, or
filename of one file in that directory
Returns
-------
out_list: list
Contains the following parameters:
- rate
- acceleration
- angular_velocity
- mag_field_direction
- packet_nr
'''
if os.path.isdir(in_selection):
in_dir = in_selection
else:
in_file = in_selection
in_dir = os.path.split(in_file)[0]
file_list = os.listdir(in_dir)
# Get the filenames, based on the XIO-definitions
files = {}
for file in file_list:
if file.find('Registers') > 0:
files['register'] = os.path.join(in_dir, file)
if file.find('CalInertialAndMag') > 0:
files['data'] = os.path.join(in_dir, file)
# Read in the registers-file, and extract the sampling rates
rates = read_ratefile(files['register'])
# Read the sensor-data
data = read_datafile(files['data'])
return ([rates['InertialAndMagnetic']] + data)
if __name__=='__main__':
test_dir = r'../../tests/data/data_xio'
assert os.path.exists(test_dir)
data = get_data(test_dir)
print('Rate: {0} [Hz]'.format(data[0]))
print('Acceleration [m/s^2]:\n {0}'.format(data[1]))
| 3,565 | 1,092 |
def test_load(fake_application_context_class, fake_service_class):
assert fake_application_context_class.instance.repositories == []
assert fake_application_context_class.instance.services == []
assert not hasattr(
fake_application_context_class.instance, fake_service_class.__class__.__name__
)
context = fake_application_context_class()
assert context == fake_application_context_class.instance
assert len(context.repositories) == 1
assert len(context.services) == 1
from fractal.core.utils.string import camel_to_snake
assert hasattr(context, camel_to_snake(fake_service_class.__name__))
def test_reload(
fake_application_context_class,
fake_service_class,
another_fake_service_class,
settings,
):
context = fake_application_context_class()
assert type(context.fake_service) == fake_service_class
settings.reload(
{
"FAKE_SERVICE": "another",
}
)
context.reload()
assert context == fake_application_context_class.instance
assert len(context.repositories) == 1
assert len(context.services) == 1
from fractal.core.utils.string import camel_to_snake
assert hasattr(context, camel_to_snake(fake_service_class.__name__))
assert type(context.fake_service) == another_fake_service_class
def test_adapters(
fake_application_context_class, inmemory_repository, fake_service_class, settings
):
settings.reload(
{
"FAKE_SERVICE": "",
}
)
context = fake_application_context_class()
assert len(list(context.adapters())) == 2
assert list(context.adapters())[0] == inmemory_repository
assert list(context.adapters())[1].__class__ == fake_service_class
| 1,746 | 539 |
# -*- coding: utf-8 -*-
"""
@date: 2021/7/26 下午10:10
@file: cutmix.py
@author: zj
@description:
refer to [ clovaai/CutMix-PyTorch](https://github.com/clovaai/CutMix-PyTorch)
"""
import torch
import numpy as np
from zcls.config.key_word import KEY_LOSS
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def cutmix_data(images, targets, alpha=1.0, device=torch.device('cpu')):
'''
Returns mixed inputs, pairs of targets, and lambda
'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = images.size()[0]
rand_index = torch.randperm(batch_size).to(device)
targets_a = targets
targets_b = targets[rand_index]
bbx1, bby1, bbx2, bby2 = rand_bbox(images.size(), lam)
images[:, :, bbx1:bbx2, bby1:bby2] = images[rand_index, :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (images.size()[-1] * images.size()[-2]))
return images, targets_a, targets_b, lam
def cutmix_criterion(criterion, output_dict: dict, targets_a: torch.Tensor, targets_b: torch.Tensor, lam):
loss_a = criterion(output_dict, targets_a)[KEY_LOSS]
loss_b = criterion(output_dict, targets_b)[KEY_LOSS]
total_loss = lam * loss_a + (1 - lam) * loss_b
return {KEY_LOSS: total_loss}
def cutmix_evaluate(evaluator, output_dict, targets_a, targets_b, lam):
acc_dict_a = evaluator.evaluate_train(output_dict, targets_a)
acc_dict_b = evaluator.evaluate_train(output_dict, targets_b)
total_acc_dict = dict()
for (a_key, a_value), (b_key, b_value) in zip(acc_dict_a.items(), acc_dict_b.items()):
assert a_key == b_key
total_acc_dict[a_key] = lam * a_value + (1 - lam) * b_value
return total_acc_dict
| 2,166 | 949 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os
import yaml
import gzip
import cPickle as pickle
import cv2
from catkin import terminal_color
import rospy
from jsk_2015_05_baxter_apc.srv import ObjectMatch, ObjectMatchResponse
def get_data_dir():
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../data')
sub_data_dir = lambda x: os.path.join(data_dir, x)
for sub in ['siftdata', 'histogram_data', 'bof_data']:
if not os.path.exists(sub_data_dir(sub)):
os.mkdir(sub_data_dir(sub))
return data_dir
def load_img(imgpath):
img = cv2.imread(imgpath)
if img is None:
rospy.logerr('not found {}'.format(imgpath))
return img
def save_siftdata(obj_name, siftdata):
"""Save sift data to data/siftdata/{obj_name}.pkl.gz"""
data_dir = get_data_dir()
siftdata_dir = os.path.join(data_dir, 'siftdata')
if not os.path.exists(siftdata_dir):
os.mkdir(siftdata_dir)
filename = os.path.join(siftdata_dir, obj_name+'.pkl.gz')
rospy.loginfo('save siftdata: {o}'.format(o=obj_name))
with gzip.open(filename, 'wb') as f:
pickle.dump(siftdata, f)
def load_siftdata(obj_name, return_pos=True, dry_run=False, data_dir=None):
"""Load sift data from pkl file"""
if data_dir is None:
data_dir = os.path.join(get_data_dir(), 'siftdata')
datafile = os.path.join(data_dir, '{0}.pkl.gz'.format(obj_name))
if dry_run: # check if exists
if os.path.exists(datafile):
return datafile
else:
return
if not os.path.exists(datafile):
print('not found siftdata: {0}'.format(obj_name))
return # does not exists
print('load siftdata: {0}'.format(obj_name))
with gzip.open(datafile, 'rb') as f:
siftdata = pickle.load(f)
if return_pos:
return siftdata
return siftdata['descriptors']
def get_train_imgs(
obj_name,
data_dir=None,
only_appropriate=True,
with_mask=True,
):
"""Find train image paths from data/obj_name"""
if data_dir is None:
data_dir = get_data_dir()
obj_dir = os.path.join(data_dir, obj_name)
if not os.path.exists(obj_dir):
print(terminal_color.fmt(
'@{yellow}[WARNING] not found object data: {0}'
).format(obj_name))
else:
os.chdir(obj_dir)
for imgfile in os.listdir('.'):
if not imgfile.endswith('.jpg'):
continue
if only_appropriate:
# N1_30.jpg -> N1_30
basename, _ = os.path.splitext(imgfile)
# N1_30 -> N1, 30
camera_pos, rotation_deg = basename.split('_')
rotation_deg = int(rotation_deg)
with open(os.path.join(data_dir, 'appropriate_images.yml')) as f:
# {'N1': ['0-30']}
appropriate_data = yaml.load(f)[obj_name]
if (not appropriate_data) or (camera_pos not in appropriate_data):
continue
skip = True
for min_max in appropriate_data[camera_pos]:
_min, _max = map(int, min_max.split('-'))
if _min <= rotation_deg <= _max:
skip = False
break
if skip:
continue
train_path = os.path.join(obj_dir, imgfile)
train_img = cv2.imread(train_path)
if with_mask:
maskfile = os.path.splitext(imgfile)[0] + '_mask.pbm'
mask_path = os.path.join(obj_dir, 'masks', maskfile)
mask = cv2.imread(mask_path)
train_img = cv2.add(mask, train_img)
yield train_img
os.chdir(data_dir)
class ObjectMatcher(object):
def __init__(self, service_name):
rospy.Service(service_name, ObjectMatch, self._cb_matcher)
def _cb_matcher(self, req):
"""Callback function for sift match request"""
rospy.loginfo('received request: {}'.format(req.objects))
probs = self.match(req.objects)
return ObjectMatchResponse(probabilities=probs)
def match(self, obj_names):
"""Get object match probabilities"""
raise NotImplementedError('override this method')
def is_imgfile(filename):
_, ext = os.path.splitext(filename)
if ext in ['.jpg', '.jpeg', '.png', '.pgm']:
return True
return False
def listdir_for_img(data_dir):
for f in os.listdir(data_dir):
if is_imgfile(f):
yield f
| 4,625 | 1,552 |
#!/usr/bin/env python3
import asab
class TimerApplication(asab.Application):
async def initialize(self):
# The timer will trigger a message publishing at every second
self.Timer = asab.Timer(self, self.on_tick, autorestart=True)
self.Timer.start(1)
async def on_tick(self):
print("Think!")
if __name__ == '__main__':
app = TimerApplication()
app.run()
| 372 | 134 |
import discord as dc
from dotenv import load_dotenv
from os import getenv
import datetime as dt
import json, string
load_dotenv()
#*#*#*# variables #*#*#*#
config_relative_path = getenv("CONFIG")
database_relative_path = getenv("DATABASE")
token = getenv("TOKEN")
#*#*#*#*#*#*#*#*#*#*#*#*#
with open(config_relative_path) as f:
cfg = json.load(f)
with open(database_relative_path) as f:
db = json.load(f)
class BOT(dc.Client):
def __init__(self, intents=None, *args, **kwargs):
super().__init__(*args, **kwargs, intents=intents)
self.prefix = cfg['prefix']
self.perms = cfg['perms']
self.debugging = db['debugMode']
async def on_ready(self):
await self.loadLogsChannel()
for guild in self.guilds:
print(f"{self.user} connected to {guild.name}, id: {guild.id}")
print(f"{self.user.name} is alive!")
async def on_message(self, message):
if message.author == self.user:
return
elif db["groupReg"]["active"] and message.channel.id == db["groupReg"]["channel_id"]:
if "lab" in message.content.lower() or "mat" in message.content.lower():
await self.groupReg(message)
elif message.content.startswith(self.prefix):
await self.command(message)
elif (self.user.name + " ssie") in message.content or (self.user.name + " sucks") in message.content:
await message.reply("૮( ᵒ̌▱๋ᵒ̌ )ა ?!")
async def command(self, message):
content = message.content[len(self.prefix):]
args = content.split()[1::] if len(content.split()) > 1 else [None]
command = content.split()[0]
# say command
if command == "say" and await self.checkPerms(message, "say"):
await message.delete()
if any(args):
await message.channel.send(" ".join([arg for arg in args]))
# message purge
elif command == "purge" and await self.checkPerms(message, "purge"):
try:
delRan = int(args[0])
except:
await message.reply("Please specify how many messages to purge.")
else:
if delRan in range(1,51):
await message.channel.purge(limit=delRan+1, bulk=True)
if self.logsActive: await self.log(message)
else:
await message.reply("Purge amount must be in range from `1` to `50`.")
# user info embed getter
elif command == "me" and await self.checkPerms(message, "me"):
if len(message.mentions) == 1:
await message.channel.send(embed=self.getMeEmbed(message, message.mentions[0]))
else:
await message.channel.send(embed=self.getMeEmbed(message))
# role/channel ID getter
elif command == "id" and await self.checkPerms(message, "id"):
if len(args) == 1:
if len(message.role_mentions) == 1:
await message.channel.send(f"id: `{message.role_mentions[0].id}`")
elif len(message.channel_mentions) == 1:
await message.channel.send(f"id: `{message.channel_mentions[0].id}`")
elif len(message.mentions) == 1:
await message.channel.send(f"id: `{message.mentions[0].id}`")
# avatar getter
elif command == "avatar" or command == "av" and await self.checkPerms(message, "avatar"):
if message.mentions:
avatar_url = self.getAvatarURL(message.mentions[0])
else:
avatar_url = self.getAvatarURL(message.author)
await message.reply(avatar_url)
# perms getter/setter
elif command == "perms" or command == "permissions" and await self.checkPerms(message, "permissions"):
if args[0] == "set" and len(args) == 3 and await self.checkPerms(message, "permissions_manage"):
try:
lvl = int(args[2])
if len(message.role_mentions) == 1:
role_id = message.raw_role_mentions[0]
else:
role_id = args[1]
except:
await message.reply(f"Please specify a permission level and role to assign the permission to.")
else:
if lvl not in range(1,3):
await message.reply("Perms level can only be 1 or 2")
else:
if self.managePerms("set", level=lvl, role=role_id):
await message.reply("Role permission changed successfully")
if self.logsActive: await self.log(message)
else:
await message.reply("Error occured while changing role permissions.")
elif (args[0] == "delete" or args[0] == "del") and await self.checkPerms(message, "permissions_manage"):
if len(args) == 2:
if len(message.role_mentions) == 1:
role_id = message.raw_role_mentions[0]
else:
role_id = args[1]
if self.managePerms("delete", role=role_id):
if self.logsActive: await self.log(message)
await message.reply("Role permission deleted successfully")
else:
await message.reply("Error occured while deleting role permissions.")
else:
await message.reply(f"Please specify a role to delete the permission from.")
elif not any(args):
perm_lvl = self.getUserPerms(message.author)
await message.reply(f"Your permission level: `{perm_lvl if perm_lvl < 3 else 'GOD'}`")
# bot prefix setter
elif command == "prefix" and await self.checkPerms(message, "prefix"):
if args[0]:
self.setPrefix(args[0])
await message.channel.send(f"Prefix successfully set to: `{args[0]}`")
if self.logsActive: await self.log(message)
# leaderboard getter
elif command == "leaderboard" and await self.checkPerms(message, "leaderboard"):
lb_len = 5
if args[0]:
try:
lb_len = int(args[0])
except:
await message.reply(f"Please specify the leaderboard lenght like: `{self.prefix}leaderboard 10`")
lb = self.getLeaderboard(message.guild, lb_len)
await message.channel.send(lb)
# debug mode
elif (command == "debug" or command == "debugging") and await self.checkPerms(message, "debugging"):
if args[0] == "on" or args[0] == "true" or args[0] == "1":
if self.debugging:
await message.reply("Debugging mode is already `on`")
else:
self.debugging = db['debugMode'] = True
self.saveDatabase()
if self.logsActive: await self.log(message)
await message.reply("Debugging mode has been successfully turned `on`")
elif args[0] == "off" or args[0] == "false" or args[0] == "0":
if not self.debugging:
await message.reply("Debugging mode is already `off`")
else:
self.debugging = db['debugMode'] = False
self.saveDatabase()
if self.logsActive: await self.log(message)
await message.reply("Debugging mode has been successfully turned `off`")
# logs management
elif command == "logs" and await self.checkPerms(message, "logs"):
if args[0] == "set":
if len(args) == 2 and len(message.channel_mentions) == 1:
await self.setLogsChannel(message.channel_mentions[0].id)
await message.reply(f"Logs channel successfully set to {message.channel_mentions[0].mention}")
else:
await message.reply(f"Please specify a log channel like: `{self.prefix}logs set #someLogsChannel`")
elif len(args) == 1 and (args[0] == "on" or args[0] == "true" or args[0] == "1"):
self.logsActive = True
db['logs']['active'] = True
self.saveDatabase()
if self.logsActive: await self.log(message)
await message.reply("Logs are now turned `on`")
elif len(args) == 1 and (args[0] == "off" or args[0] == "false" or args[0] == "0"):
if self.logsActive: await self.log(message)
self.logsActive = False
db['logs']['active'] = False
self.saveDatabase()
await message.reply("Logs are now turned `off`")
# semester management
elif (command == "semester" or command == "sem") and await self.checkPerms(message, "semester_manage"):
if args[0] == "new" or args[0] == "start":
if not db["groupReg"]["active"]:
try:
group_count = int(args[1])
except:
await message.reply(f"Please specify the number of groups like: `{self.prefix}semester new 8`")
else:
if await self.openGroupReg(message, group_count):
await message.reply("New semester started successfully!")
if self.logsActive: await self.log(message)
else:
await message.reply("An error has occured while creating new semester.")
else:
await message.reply("Group registration is already open!")
elif args[0] == "close" or args[0] == "end":
if db["groupReg"]["active"]:
await self.closeGroupReg(message)
if self.logsActive: await self.log(message)
await message.reply("Group registration has successfully been closed.")
else:
await message.reply("There's no group registration currently ongoing to close!")
# *=*=*=*=*=*=*=*=* COMMANDS *=*=*=*=*=*=*=*=* #
def saveDatabase(self):
with open(database_relative_path, mode="w") as f:
json.dump(db, f, indent=4)
async def loadLogsChannel(self):
channel = await self.fetch_channel(db['logs']['id'])
if channel:
self.logsChannel = channel
self.logsActive = db['logs']['active']
else:
self.logsActive = db['logs']['active'] = False
self.saveDatabase()
print("Logs channel could not be found by id -- Logs were turned off.")
async def setLogsChannel(self, channel_id):
db['logs']['id'] = channel_id
self.saveDatabase()
await self.loadLogsChannel()
def getUserPerms(self, user):
lvls = [0]
for pLvl, pRoles in db['permRoles'].items():
if any([role.id in pRoles for role in user.roles]):
lvls.append(int(pLvl))
permLevel = max(lvls)
if permLevel == 0 and self.debugging: return -1
return permLevel
async def checkPerms(self, message, command):
perm_lvl = self.getUserPerms(message.author)
if self.debugging and perm_lvl == -1:
await message.reply("Can't use commands while bot is in debugging mode.")
return False
try:
required = cfg["perms"][command]
except:
required = float('infinity')
if self.getUserPerms(message.author) >= required:
return True
else:
await message.reply("You don't have the permission to use this command.")
return False
def getAvatarURL(self, user):
base = "https://cdn.discordapp.com/avatars/"
return base + str(user.id) + "/" + str(user.avatar)
def getMeEmbed(self, message, user = None):
embed = dc.Embed(title="User info")
if not user:
user = message.author
embed.color = user.color
embed.set_image(url=self.getAvatarURL(user))
joined_info = f"Joined server on `{user.joined_at.strftime('%d/%m/%Y')}`"
joined_info += f"\nBeen here for: `{str(dt.datetime.now() - user.joined_at).split(',')[0]}`"
user_roles = [role.mention for role in user.roles if role.name != "@everyone"]
if not any(user_roles):
roles_info = "No roles to see here!"
else:
roles_info = ", ".join(user_roles)
# ranking_info =
embed.add_field(name="Join Date", value=joined_info, inline=False)
embed.add_field(name="User Roles", value=roles_info, inline=False)
# embed.add_field(name="Ranking", value=ranking_info, inline=False)
return embed
def setPrefix(self, new_prefix):
cfg["prefix"] = new_prefix
with open(config_relative_path, mode="w") as f:
json.dump(cfg, f, indent=4)
self.prefix = new_prefix
def getLeaderboard(self, guild, lenght = 5):
ranking = db["ranking"]
ranking.sort(key = lambda x: x["exp"], reverse = True)
lb = ""
r=1
for i in range(min(len(ranking), lenght, 15)):
user = ranking[i]
if not guild.get_member(user['id']):
lb+=f"#{r} {guild.get_member(user['id'])}: {user.get('exp')}\n"
r+=1
print(lb)
return lb
def managePerms(self, command, **args):
if command == "set":
try:
level = args["level"]
role = args["role"]
except:
return False
else:
for pLvl, pRoles in db["permRoles"].items():
if role in pRoles:
if int(pLvl) == level:
return True
db["permRoles"][pLvl] = [r for r in db["permRoles"][pLvl] if r != role]
break
db["permRoles"][str(level)].append(role)
self.saveDatabase()
return True
elif command == "delete":
try:
role = args["role"]
except:
return False
else:
for pLvl, pRoles in db["permRoles"].items():
if role in pRoles:
db["permRoles"][pLvl] = [r for r in db["permRoles"][pLvl] if r != role]
self.saveDatabase()
return True
return False
async def log(self, message, custom = False):
if not custom:
case = db['logs']['cases']
db['logs']['cases'] = case+1
self.saveDatabase()
embed = dc.Embed(title=f"Log Case #{case}")
embed.color = message.author.color
embed.add_field(name="Author", value=message.author.mention, inline=True)
embed.add_field(name="Channel", value=message.channel.mention, inline=True)
embed.add_field(name="Date", value=dt.datetime.now().strftime("%d/%m/%Y %H:%M:%S"), inline=True)
embed.add_field(name="Command", value=f"`{message.content}`", inline=True)
await self.logsChannel.send(embed=embed)
else:
await self.logsChannel.send(message)
async def resetGroupRoles(self, channel, group_count):
role_template = cfg["nameSpace"]["labRoleTemplate"].split('#')
math_role_template = cfg["nameSpace"]["mathRoleTemplate"].split('#')
if len(role_template) != 2:
print("config group role template invalid: missing '#'?")
return False
elif len(math_role_template) != 2:
print("config math group role template invalid: missing '#'?")
return False
# initialize flags to see which roles exist and create the nonexistent ones later
lab_flags = [0 for _ in range(group_count)]
mat_flags = [0 for _ in range((group_count-1)//2 + 1)]
records = {} # keep record of removed data to save and log it later
for role in await channel.guild.fetch_roles():
if (role.name.startswith(role_template[0]) and role.name.endswith(role_template[1])) or (role.name.startswith(math_role_template[0]) and role.name.endswith(math_role_template[1])):
role_type = "LAB" if role.name.startswith(role_template[0]) else "MAT"
records[str(role.name)] = []
members = role.members
# g_id determines the current group's number
if role_type == "LAB":
g_id = int(role.name[len(role_template[0]):-len(role_template[1])])
elif role_type == "MAT":
g_id = int(role.name[len(math_role_template[0]):-len(math_role_template[1])])
# clear role from every user and store the changes in records
await channel.send(f"Clearing `{role.name}` from `{len(members)}` users..")
for member in members:
records[role.name].append(str(member.name + '#' + member.discriminator))
await member.remove_roles(role)
# remove the role entirely if it's not in range of new semester's group length
if g_id not in range(1,group_count+1):
await channel.send(f"Removing `{role.name}`..")
await role.delete()
elif role_type == "MAT" and g_id not in range(1,len(mat_flags)+1):
await channel.send(f"Removing `{role.name}`..")
await role.delete()
else:
# set flags for roles kept for next semester and save their id's in db for future registration management
if role_type == "LAB":
lab_flags[g_id-1] = 1
db["groupReg"]["role_ids"][str(g_id)] = role.id
elif role_type == "MAT":
mat_flags[g_id-1] = 1
db["groupReg"]["math_role_ids"][str(g_id)] = role.id
self.saveDatabase()
# create nonexistent roles based on gaps in flags
for ID, flag in enumerate(lab_flags):
if not flag:
name = f"{role_template[0]}{ID+1}{role_template[1]}"
await channel.send(f"Creating `{name}`..")
role = await channel.guild.create_role(name=name,mentionable=True,hoist=True,color=dc.Color.random())
db["groupReg"]["role_ids"][str(ID+1)] = role.id
for ID, flag in enumerate(mat_flags):
if not flag:
name = f"{math_role_template[0]}{ID+1}{math_role_template[1]}"
await channel.send(f"Creating `{name}`..")
role = await channel.guild.create_role(name=name,mentionable=True,color=dc.Color.random())
db["groupReg"]["math_role_ids"][str(ID+1)] = role.id
self.saveDatabase()
# save records to file and log them to logs channel if active
with open('archives.txt', 'a') as f:
json.dump(records, f, indent=4)
# if self.logsActive:
# await self.log(f'```json\n{json.dumps(records,indent=4)}\n```', custom=True)
# await channel.send(f'`Archive sent to logs channel and saved on machine.`')
# else:
await channel.send(f'`Archive saved on machine.`')
return True
async def openGroupReg(self, message, group_count):
if await self.resetGroupRoles(message.channel, group_count):
db["groupReg"]["active"] = True
db["groupReg"]["groupCount"] = group_count # group_count determines the len of lab groups in new semester
# rid of registration category and text channels if they exist
for category in message.guild.categories:
if category.name == cfg["nameSpace"]["groupsRegCategory"]:
for channel in category.channels:
await channel.delete()
await category.delete()
break
# create new category with its text channels for registration
GRC = await message.guild.create_category(name=cfg["nameSpace"]["groupsRegCategory"], position=2)
GRIC = await GRC.create_text_channel(name=cfg["nameSpace"]["groupsRegInfoChannel"])
await GRIC.set_permissions(message.guild.roles[0], send_messages = False, read_messages = True)
GRC = await GRC.create_text_channel(name=cfg["nameSpace"]["groupsRegChannel"])
# save the channel id used for registration for command management purposes
db["groupReg"]["channel_id"] = GRC.id
self.saveDatabase()
# send registration opening notification to GRIC
await message.channel.send(f'`Group registration channel created.`')
info = f''':warning: @everyone Rejestracja do grup w nowym semestrze została otwarta! :warning: \n
**Aby poprawnie zarejestrować się do grupy LAB oraz MAT wyślij** `lab #numerGrupy` **oraz** `mat #numerGrupy` **na kanale** {GRC.mention}, np. `lab 4`; `mat 2` lub `lab 4 mat 2`.
Dla osób będących w kilku grupach laboratoryjnych jednocześnie - proszę kontaktować się z administracją serwera.'''
await GRIC.send(info)
# send new semester decorator on all group channels
for channel in message.guild.channels:
if channel.name.endswith(cfg["nameSpace"]["generalChannelTemplate"]):
await channel.send(cfg["nameSpace"]["newSemesterDecorator"])
elif channel.name.endswith(cfg["nameSpace"]["datesChannelTemplate"]):
await channel.send(cfg["nameSpace"]["newSemesterDecorator"])
elif channel.name.startswith(cfg["nameSpace"]["mathChannelTemplate"]):
await channel.send(cfg["nameSpace"]["newSemesterDecorator"])
return True
return False
async def groupReg(self, message):
user = message.author
content = message.content.lower()
l_id = content.find('lab')
m_id = content.find('mat')
digits = string.digits
lab_gr = mat_gr = None
# do some string magic to extract lab group number from message if it inclues "lab" keyword
if l_id >= 0:
if m_id > l_id: # dont include the "mat" keyword if it appears after "lab"
cntnt = content[l_id+3:m_id].lstrip()
else: cntnt = content[l_id+3:].lstrip()
lab_gr = int("".join([v for vID, v in enumerate(cntnt) if v in digits and not any([c not in digits for c in cntnt[:vID]])]))
# return with an exception if the number is not in current lab groups range
if lab_gr not in range(1,db["groupReg"]["groupCount"]+1):
await message.reply(f"Lab group needs to be between `1` and `{db['groupReg']['groupCount']}`.")
return
# same string magic for mat group number
if m_id >= 0:
if l_id > m_id: # dont include the "lab" keyword if it appears after "mat"
cntnt = content[m_id+3:l_id].lstrip()
else: cntnt = content[m_id+3:].lstrip()
mat_gr = int("".join([v for vID, v in enumerate(cntnt) if v in digits and not any([c not in digits for c in cntnt[:vID]])]))
# return with an exception if the number is not in current mat groups range
if mat_gr not in range(1,(db["groupReg"]["groupCount"]-1)//2 + 2):
await message.reply(f"Mat group needs to be between `1` and `{(db['groupReg']['groupCount']-1)//2 + 1}`.")
return
# assign group roles to user and catch the output
out = await self.regToGroups(user, lab_gr, mat_gr)
if out:
await message.reply(f"Successfully registered to: `{'`, `'.join(out)}`")
else:
await message.reply("An error occured while registering to group, please try again.")
async def regToGroups(self, user, labGroup=None, matGroup=None):
if not (labGroup or matGroup): return False
for role in user.roles:
if labGroup and role.id in tuple(db["groupReg"]["role_ids"].values()):
await user.remove_roles(role)
elif matGroup and role.id in tuple(db["groupReg"]["math_role_ids"].values()):
await user.remove_roles(role)
output = [] # store successfully applied roles in output
if labGroup:
lab_id = db["groupReg"]["role_ids"][str(labGroup)]
role = user.guild.get_role(lab_id)
output.append(role.name)
await user.add_roles(role)
if matGroup:
mat_id = db["groupReg"]["math_role_ids"][str(matGroup)]
role = user.guild.get_role(mat_id)
output.append(role.name)
await user.add_roles(role)
return output
async def closeGroupReg(self, message):
# reset group registration database
db["groupReg"]["active"] = False
db["groupReg"]["channel_id"] = None
db["groupReg"]["groupCount"] = 0
db["groupReg"]["role_ids"] = {}
db["groupReg"]["math_role_ids"] = {}
self.saveDatabase()
# rid of registration category and text channels if they exist
for category in message.guild.categories:
if category.name == cfg["nameSpace"]["groupsRegCategory"]:
for channel in category.channels:
await channel.delete()
await category.delete()
break
intents = dc.Intents.all()
bot_client = BOT(intents=intents)
bot_client.run(token) | 26,708 | 7,885 |
#!/usr/bin/env python -u
# Filter out emission from a THOR HI cube. Will 2D Fourier transform each plane of the cube and zero out the centre
# of the Fourier image and then inverse Fouroer transform back to the image domain. This produces a cube without the
# large scale emission
# Author James Dempsey
# Date 26 Nov 2017
from __future__ import print_function, division
import argparse
import sys
import time
from astropy.io import fits
import numpy as np
import pyfftw
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(
description="Filter the large scale emission from an imag cube using Fourier transforms",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input", help="The name of the file to be filtered.")
parser.add_argument("output", help="The name of the filtered file to be produced.")
parser.add_argument("-r", "--radius", help="The radius of the filter to apply to the centre of the Fourier image.",
default=20, type=int)
parser.add_argument("-t", "--threads", help="The number of threads to be used for the Fourier transform.",
default=4, type=int)
args = parser.parse_args()
return args
def do_fftw(image, threads=2):
"""
Calculate the Fourier transform of the input 2 dimensional image using the
pyFFTW library.
:param image: The square float64 image to be transformed.
:param threads: The number of threads to be used by pyFFTW.
:return: The fourier transform.
"""
image_in = pyfftw.empty_aligned(image.shape, dtype='float64')
image_in[:] = image
fft_object = pyfftw.builders.fft2(image_in, axes=(0, 1), threads=threads)
image_out = fft_object()
return image_out
def do_ifftw(image, threads=2):
"""
Calculate the inverse Fourier transform of the input 2 dimensional Fourier image using the
pyFFTW library.
:param image: The square complex128 image to be transformed.
:param threads: The number of threads to be used by pyFFTW.
:return: The fourier transform.
"""
image_in = pyfftw.empty_aligned(image.shape, dtype='complex128')
image_in[:] = image
fft_object = pyfftw.builders.ifft2(image_in, axes=(0, 1), threads=threads)
image_out = fft_object()
return image_out
def fft_image(image, threads=4):
"""
Produce a processed Fourier transform of the input image. The image must be
square and of type float64 and real only. The Fourier transform will be
shifted to have the zero-frequency component in the centre of the image.
:param image: The square image to be transformed.
:param threads: The number of threads to be used by pyFFTW.
:return: The centred complex Fourier transform.
"""
#ft_img = np.fft.fft2(image)
ft_img = do_fftw(image, threads)
#print(ft_img.shape)
ft_shift = np.fft.fftshift(ft_img)
return ft_shift
def ifft_image(ft_shift, threads=4):
"""
Invert a Fourier transform of an image. The resulting image will be
square and of type complex128. The real aspect of this image will represent the image.
The Fourier transform will be unshifted to move the zero-frequency component away from the centre of the image.
:param ft_shift: The centred complex Fourier transform.
:param threads: The number of threads to be used by pyFFTW.
:return: The complex inverse Fourier transformed image.
"""
unshifted = np.fft.ifftshift(ft_shift)
#inverted = np.fft.ifft2(unshifted)
inverted = do_ifftw(unshifted, threads=threads)
return inverted
def filter_plane(plane, radius=20, threads=4):
# Prepare the spatial slice for fft
start = time.time()
flipped = np.concatenate((plane, np.fliplr(plane)), axis=1)
mirrored = np.concatenate((flipped, np.flipud(flipped)), axis=0)
x_pad = (mirrored.shape[0] - mirrored.shape[1]) // 2
padded = np.lib.pad(mirrored, ((0, 0), (x_pad, x_pad)), 'constant')
prep_end = time.time()
print(' Prep for plane took %.02f s' % (prep_end - start))
sys.stdout.flush()
# Do the fft
ft_img = fft_image(padded, threads)
ft_end = time.time()
print(' FFT for plane took %.02f s' % (ft_end - prep_end))
sys.stdout.flush()
# Filter out the large scsle emission
centre_y = ft_img.shape[0] // 2
centre_x = ft_img.shape[1] // 2
ft_img[centre_y - radius:centre_y + radius, centre_x - radius:centre_x + radius] = 0
# Invert the fft to get back the image
inverted = ifft_image(ft_img, threads)
ift_end = time.time()
print(' iFFT for plane took %.02f s' % (ift_end - ft_end))
sys.stdout.flush()
post_psd_2d = inverted.real
centre_y = post_psd_2d.shape[0] // 2
centre_x = post_psd_2d.shape[1] // 2
post_plane = post_psd_2d[:centre_y, x_pad:centre_x].astype(np.float32)
return post_plane
def filter_image(image, radius=40, threads=4):
#pyfftw.interfaces.cache.enable()
filtered = np.zeros(image.shape, dtype=np.float32)
for idx in range(image.shape[0]):
print("Processing plane", idx)
sys.stdout.flush()
plane = image[idx, :, :]
post_plane = filter_plane(plane, radius, threads)
filtered[idx, :, :] = post_plane
return filtered
def load_image(filename):
hdulist = fits.open(filename, memmap=True)
image = hdulist[0].data
print("Image shape is", image.shape)
header = hdulist[0].header
return image, header
def save_image(filename, image, header, radius):
header['history'] = "Emission filtered with radius {} Fourier filter.".format(radius)
hdu = fits.PrimaryHDU(image, header)
hdu.writeto(filename, overwrite=True)
def main():
"""
Main script for filter_cube
:return: The exit code
"""
args = parseargs()
start = time.time()
print("#### Started filtering of cube {} at {} ####".format(args.input,
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))))
# Filter the image
orig_image, header = load_image(args.input)
filtered = filter_image(orig_image, radius=args.radius, threads=args.threads)
save_image(args.output, filtered, header, args.radius)
# Report
end = time.time()
print('#### Filtering completed at %s ####' %
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)))
print('Filtering took %.02f s' %
(end - start))
return 0
# Run the script if it is called from the command line
if __name__ == "__main__":
exit(main())
| 6,635 | 2,156 |
import numpy as np
import scipy.stats as st
from niscv_v2.basics.exp import Exp
from niscv_v2.basics import utils
import multiprocessing
import os
from functools import partial
from datetime import datetime as dt
import pickle
def experiment(dim, fun, size_est, sn, show, size_kn, ratio, bootstrap):
mean = np.zeros(dim)
target = lambda x: st.multivariate_normal(mean=mean).pdf(x)
proposal = st.multivariate_normal(mean=mean + 0.5, cov=4)
grid_x = np.linspace(-5, 5, 200)
exp = Exp(dim, target, fun, proposal, size_est, sn=sn, adjust=False, show=show)
exp.initial_estimation()
exp.resampling(size_kn, ratio, bootstrap=bootstrap)
if exp.show:
exp.draw(grid_x, name='initial')
exp.density_estimation(mode=1, local=False, gamma=0.3, bdwth=1.0, alpha0=0.1)
exp.nonparametric_estimation(mode=0)
exp.nonparametric_estimation(mode=1)
exp.nonparametric_estimation(mode=2)
if exp.show:
exp.draw(grid_x, name='nonparametric')
exp.control_calculation()
exp.regression_estimation()
if exp.show:
exp.draw(grid_x, name='regression')
return exp.result, exp.params
def run(it, dim, bootstrap):
settings = [1, 2, 3, 4, -1, -2]
ratios = [0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
Results = []
Params = []
for setting in settings:
results = []
params = []
for ratio in ratios:
np.random.seed(19971107 + it)
print(dim, bootstrap, it, setting, ratio)
res, par = experiment(dim=dim, fun=utils.integrand(setting), size_est=10000, sn=True,
show=False, size_kn=300, ratio=ratio, bootstrap=bootstrap)
results.append(res)
params.append(par)
Results.append(results)
Params.append(params)
return [Results, Params]
def main(dim, bootstrap):
os.environ['OMP_NUM_THREADS'] = '1'
with multiprocessing.Pool(processes=60) as pool:
begin = dt.now()
its = np.arange(1000)
R = pool.map(partial(run, dim=dim, bootstrap=bootstrap), its)
end = dt.now()
print((end - begin).seconds)
with open('../../data/simulation/resampling_ratio_{}D_{}'.format(dim, bootstrap), 'wb') as file:
pickle.dump(R, file)
if __name__ == '__main__':
main(4, 'st')
main(6, 'st')
main(4, 'sp')
main(6, 'sp')
| 2,383 | 884 |
# -*- coding: utf-8 -*-
# 220. Contains Duplicate III
#
# Given an array of integers,
# find out whether there are two distinct indices i and j in the array
# such that the absolute difference between nums[i] and nums[j] is at most t
# and the absolute difference between i and j is at most k.
import collections
class Solution(object):
def containsNearbyAlmostDuplicate(self, nums, k, t):
"""
:type nums: List[int]
:type k: int
:type t: int
:rtype: bool
http://bookshadow.com/weblog/2015/06/03/leetcode-contains-duplicate-iii/
解法I:“滑动窗口” + 字典(桶)
如果: | nums[i] - nums[j] | <= t 式a
等价: | nums[i] / t - nums[j] / t | <= 1 式b
推出: | floor(nums[i] / t) - floor(nums[j] / t) | <= 1 式c
等价: floor(nums[j] / t) ∈ {floor(nums[i] / t) - 1, floor(nums[i] / t), floor(nums[i] / t) + 1} 式d
其中式b是式c的充分非必要条件,因为逆否命题与原命题等价,所以:
如果: floor(nums[j] / t) ∉ {floor(nums[i] / t) - 1, floor(nums[i] / t), floor(nums[i] / t) + 1} 非d
推出: | nums[i] - nums[j] | > t 非a
因此只需要维护一个大小为k的窗口(字典)numDict,其中键为nums[i] / t,值为nums[i]。
遍历数组nums时,检查nums[i]与键集{floor(nums[i] / t) - 1, floor(nums[i] / t), floor(nums[i] / t) + 1}对应的值的差值即可。
"""
if k < 1 or t < 0:
return False
numDict = collections.OrderedDict()
for i in range(len(nums)):
key = nums[i] / max(1, t) # in case t==0
for m in (key, key - 1, key + 1):
if m in numDict and abs(nums[i] - numDict[m]) <= t:
return True
numDict[key] = nums[i]
if i >= k:
numDict.popitem(last=False)
return False
# https://www.hrwhisper.me/leetcode-contains-duplicate-i-ii-iii/
# 桶的方法 O(n)
#
# 思想是分成t+1个桶,对于一个数,将其分到第num / (t + 1) 个桶中,我们只需要查找相同的和相邻的桶的元素就可以判断有无重复。
#
# 比如t = 4,那么0~4为桶0,5~9为桶1,10~14为桶2 然后你懂的- –
def containsNearbyAlmostDuplicate(self, nums, k, t):
if t < 0:
return False
div = t + 1
vis = {}
for i, num in enumerate(nums):
index = num / div
if index in vis \
or index - 1 in vis and abs(vis[index - 1] - num) <= t \
or index + 1 in vis and abs(vis[index + 1] - num) <= t:
return True
vis[index] = num
if i >= k:
del vis[nums[i - k] / div]
return False
if __name__ == '__main__':
print Solution().containsNearbyAlmostDuplicate([2, 5, 3, 9], 1, 1)
print Solution().containsNearbyAlmostDuplicate([-1, -1], 1, 0)
| 2,641 | 1,202 |
from . import BaseAgent
from .. import constants
class FrozenAgent(BaseAgent):
"""This is a agent that just sits there."""
def __init__(self, *args, **kwargs):
super(FrozenAgent, self).__init__(*args, **kwargs)
def act(self, obs, action_space):
return constants.Action.Stop.value
| 312 | 96 |
"""
Planner for Yahtzee
Simplifications: only allow discard and roll, only score against upper level
"""
# Used to increase the timeout, if necessary
import codeskulptor
codeskulptor.set_timeout(20)
def gen_all_sequences(outcomes, length):
"""
Iterative function that enumerates the set of all sequences of
outcomes of given length.
"""
answer_set = set([()])
for dummy_idx in range(length):
temp_set = set()
for partial_sequence in answer_set:
for item in outcomes:
new_sequence = list(partial_sequence)
new_sequence.append(item)
temp_set.add(tuple(new_sequence))
answer_set = temp_set
return answer_set
def score(hand):
"""
Compute the maximal score for a Yahtzee hand according to the
upper section of the Yahtzee score card.
hand: full yahtzee hand
Returns an integer score
"""
max_score = 0
dice = set(hand)
for die in dice:
temp_score = hand.count(die)*die
if (temp_score > max_score):
max_score = temp_score
return max_score
def expected_value(held_dice, num_die_sides, num_free_dice):
"""
Compute the expected value based on held_dice given that there
are num_free_dice to be rolled, each with num_die_sides.
held_dice: dice that you will hold
num_die_sides: number of sides on each die
num_free_dice: number of dice to be rolled
Returns a floating point expected value
"""
free_rolls = gen_all_sequences(range(1, num_die_sides + 1), num_free_dice)
sum_scores = 0.0
for roll in free_rolls:
hand = tuple(held_dice) + tuple(roll)
sum_scores += score(hand)
expected = sum_scores/len(free_rolls)
return expected
def gen_all_holds(hand):
"""
Generate all possible choices of dice from hand to hold.
hand: full yahtzee hand
Returns a set of tuples, where each tuple is dice to hold
"""
if (len(hand) == 0):
return set([()])
holds = set([()])
for die in hand:
for element in holds.copy():
new_list = list(element)
new_list.append(die)
holds.add(tuple(new_list))
return holds
def strategy(hand, num_die_sides):
"""
Compute the hold that maximizes the expected value when the
discarded dice are rolled.
hand: full yahtzee hand
num_die_sides: number of sides on each die
Returns a tuple where the first element is the expected score and
the second element is a tuple of the dice to hold
"""
all_holds = gen_all_holds(hand)
best_hold = ()
max_expected_score = 0.0
for hold in all_holds:
temp_expected_score = expected_value(hold, num_die_sides, len(hand) - len(hold))
if (temp_expected_score > max_expected_score):
max_expected_score = temp_expected_score
best_hold = hold
return (max_expected_score, best_hold)
def run_example():
"""
Compute the dice to hold and expected score for an example hand
"""
num_die_sides = 6
hand = (1, 1, 1, 5, 6)
hand_score, hold = strategy(hand, num_die_sides)
print "Best strategy for hand", hand, "is to hold", hold, "with expected score", hand_score
run_example()
#import poc_holds_testsuite
#poc_holds_testsuite.run_suite(gen_all_holds)
| 3,630 | 1,213 |
#
# PySNMP MIB module AVAM-SNMPv1 (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AVAM-SNMPv1
# Produced by pysmi-0.3.4 at Mon Apr 29 17:16:06 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
DateAndTime, = mibBuilder.importSymbols("HOST-RESOURCES-MIB", "DateAndTime")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, IpAddress, NotificationType, iso, MibIdentifier, Bits, Counter64, Gauge32, Counter32, ObjectIdentity, ModuleIdentity, enterprises, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "IpAddress", "NotificationType", "iso", "MibIdentifier", "Bits", "Counter64", "Gauge32", "Counter32", "ObjectIdentity", "ModuleIdentity", "enterprises", "NotificationType")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
availant = MibIdentifier((1, 3, 6, 1, 4, 1, 5910))
avProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 5910, 1))
avamMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 5910, 1, 3))
avamVisObj = MibIdentifier((1, 3, 6, 1, 4, 1, 5910, 1, 3, 1))
avamNotify = MibIdentifier((1, 3, 6, 1, 4, 1, 5910, 1, 3, 2))
avVersionString = MibScalar((1, 3, 6, 1, 4, 1, 5910, 1, 3, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: avVersionString.setStatus('mandatory')
avEventDateTime = MibScalar((1, 3, 6, 1, 4, 1, 5910, 1, 3, 2, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: avEventDateTime.setStatus('mandatory')
avEventAgent = MibScalar((1, 3, 6, 1, 4, 1, 5910, 1, 3, 2, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: avEventAgent.setStatus('mandatory')
avHostURL = MibScalar((1, 3, 6, 1, 4, 1, 5910, 1, 3, 2, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: avHostURL.setStatus('mandatory')
avEventNotify = NotificationType((1, 3, 6, 1, 4, 1, 5910, 1, 3) + (0,1)).setObjects(("AVAM-SNMPv1", "avEventDateTime"), ("AVAM-SNMPv1", "avEventAgent"), ("AVAM-SNMPv1", "avHostURL"))
mibBuilder.exportSymbols("AVAM-SNMPv1", avamNotify=avamNotify, availant=availant, avamMIB=avamMIB, avHostURL=avHostURL, avVersionString=avVersionString, avEventNotify=avEventNotify, avEventDateTime=avEventDateTime, avamVisObj=avamVisObj, avEventAgent=avEventAgent, avProducts=avProducts)
| 3,079 | 1,263 |
#!/usr/bin/env python3
# MIT License
# Copyright (c) 2022 catt0
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
PoC for an engraving calculator.
For now you need to edit the data in this file directly.
Check the comments for what to change.
"""
# Disclaimer notes:
# the simple algo has its limits, it only does works 100% and impossible
# it also prefers +3 instead of going cheaper if possible, but you see the acc list, so you can tweak that on your own
# also it always grabs the books for the first 1 or 2 engravings even though it could be better to do it for the less prio ones, but I guess that is how it is usually done anyway
from enum import Enum, auto
from pprint import pprint
class Engraving(Enum):
def __repr__(self):
return '<%s.%s>' % (self.__class__.__name__, self.name)
# add new engravings below, names must not contain spaces
Grudge = auto()
CursedDoll = auto()
KeenBlunt = auto()
Firepower = auto()
BlessedAura = auto()
# your target points
# order determines the priority
# it will first use books to fulfill these
target = [
(Engraving.Grudge, 15),
(Engraving.CursedDoll, 15),
(Engraving.KeenBlunt, 15),
(Engraving.Firepower, 15),
]
# the points you get from books
books = {
Engraving.Grudge: 12,
Engraving.Firepower: 12,
}
# the points you get from your stone
stone = {
Engraving.KeenBlunt: 8,
Engraving.Grudge: 5,
}
MAX_ACCS = 5
def find_acc_slots(accs, engraving, needed_levels):
if needed_levels <= 0:
return True
added_levels = 0
while True:
if added_levels >= needed_levels:
return True
if len(accs) < MAX_ACCS:
level_to_add = min(needed_levels - added_levels, 3)
accs.append(((engraving, level_to_add), None))
# print('Added acc with {}: {}'.format(engraving, level_to_add))
needed_levels -= level_to_add
else:
for i in range(len(accs)):
acc = accs[i]
if acc[1] is None:
level_to_add = min(needed_levels - added_levels, 3)
accs[i] = (acc[0], (engraving, level_to_add))
needed_levels -= level_to_add
# print('Added acc with {}: {}'.format(engraving, level_to_add))
if added_levels >= needed_levels:
return True
return False
accs = []
books_equipped = []
success = True
for engraving, target_level in target:
print("Trying to reach {} on {}".format(target_level, engraving))
current_level = 0
if engraving in stone:
current_level += stone[engraving]
# print('Used {} from stone'.format(engraving))
if current_level >= target_level:
print('{} reached target {}'.format(engraving, target_level))
continue
if engraving in books and len(books_equipped) < 2:
current_level += books[engraving]
books_equipped.append(engraving)
# print('Used {} from book'.format(engraving))
if current_level >= target_level:
print('{} reached target {}'.format(engraving, target_level))
continue
# print('{} at {} after books and stone'.format(engraving, current_level))
if not find_acc_slots(accs, engraving, target_level - current_level):
print('Unable to reach target {} for {}'.format(target_level, engraving))
success = False
break
print('{} reached target {}'.format(engraving, target_level))
if not success:
print('Impossible')
else:
print('Books:')
pprint(books_equipped)
print('Accessories:')
pprint(accs)
| 4,649 | 1,511 |
#!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import
from __future__ import print_function
import os
import glob
import tempfile
import matplotlib.pyplot as plt
from clawpack.geoclaw import topotools
import clawpack.geoclaw.topo as topo
import numpy as np
def test1():
"""
Make two topo files and then read them in and plot them.
The second file is a finer grid over a smaller region.
"""
fname = 'bowl.tt2'
maketopo1a(fname)
tpd = topotools.TopoPlotData(fname)
tpd.imshow = True
tpd.cmin = -1000.
tpd.cmax = 2000.
tpd.addcolorbar = True
tpd.plot()
fname = 'hill.tt2'
maketopo1b(fname)
tpd = topotools.TopoPlotData(fname)
tpd.imshow = True
tpd.cmin = -1000.
tpd.cmax = 2000.
tpd.addcolorbar = False
tpd.plot()
plt.title('Bathymetry / topography')
fname = 'topotest1.png'
plt.savefig(fname)
# print 'Created ',fname
def topo1(x,y):
"""
Sample topography
"""
# Parabolic bowl
z = 1000.*(x**2 + y**2 - 1.)
# Add a Gaussian hill
z = z + 1000.*np.exp(-100*((x-0.7)**2 + (y-0.8)**2))
return z
def maketopo1a(path):
"""
Output topography file for the entire domain
"""
nxpoints = 101
nypoints = 76
xlower = -1.5
xupper = 2.5
ylower = -1.
yupper = 2.
topotools.topo2writer(path,topo1,xlower,xupper,ylower,yupper,\
nxpoints,nypoints)
def maketopo1b(path):
"""
Output topography file for the entire domain
"""
nxpoints = 101
nypoints = 71
xlower = 0.0
xupper = 1.0
ylower = 0.5
yupper = 1.2
topotools.topo2writer(path,topo1,xlower,xupper,ylower,yupper,\
nxpoints,nypoints)
def test_topography_object(plot=False):
"""
Test the Topography object's functionality
"""
try:
base_path = tempfile.mkdtemp()
# Create initial test bathymetry
maketopo1a(os.path.join(base_path, 'bowl.tt2'))
maketopo1b(os.path.join(base_path, 'hill.tt2'))
hill_topo = []
bowl_topo = []
topo_types = [2,3,1,2]
for (n, topo_type) in enumerate(topo_types):
bowl_path = os.path.join(base_path, 'bowl.tt%s' % topo_type)
hill_path = os.path.join(base_path, 'hill.tt%s' % topo_type)
bowl_topo.append(topo.Topography(bowl_path))
hill_topo.append(topo.Topography(hill_path))
if plot:
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
hill_topo[-1].plot(axes=axes, limits=[-2000,0])
bowl_topo[-1].plot(axes=axes, limits=[-2000,0])
fig.suptitle('Bathymetry / topography, topo type = %s' % topo_type)
plt.savefig('topotest%s.png' % (n + 2))
print(n, topo_type)
if n + 1 != len(topo_types):
bowl_path = os.path.join(base_path, 'bowl.tt%s' % topo_types[n+1])
hill_path = os.path.join(base_path, 'hill.tt%s' % topo_types[n+1])
bowl_topo[-1].write(bowl_path)
hill_topo[-1].write(hill_path)
# Check data
for (n,topo_type) in enumerate(topo_types):
for (m,topo_type) in enumerate(topo_types):
assert np.all(bowl_topo[n].X == bowl_topo[m].X), \
"bowl[%s].X != bowl[%s].X" % (n,m)
assert np.all(bowl_topo[n].Y == bowl_topo[m].Y), \
"bowl[%s].Y != bowl[%s].Y" % (n,m)
assert np.all(bowl_topo[n].Z == bowl_topo[m].Z), \
"bowl[%s].Z != bowl[%s].Z" % (n,m)
assert np.all(hill_topo[n].X == hill_topo[m].X), \
"hill[%s].X != hill[%s].X" % (n,m)
assert np.all(hill_topo[n].Y == hill_topo[m].Y), \
"hill[%s].Y != hill[%s].Y" % (n,m)
assert np.all(hill_topo[n].Z == hill_topo[m].Z), \
"hill[%s].Z != hill[%s].Z" % (n,m)
finally:
paths = glob.glob(os.path.join(base_path,"*"))
for path in paths:
os.remove(path)
os.rmdir(base_path)
if __name__=='__main__':
print("Starting procedural test...")
test1()
print("Done performing procedural test.")
print("Starting object test...")
test_topography_object(plot=True)
print("Done performing object test...") | 4,447 | 1,721 |
#!/usr/bin/env python
import argparse
import json
from collections import defaultdict
def read_data(fname):
raw_data = defaultdict(list)
with open(fname) as fh:
for line in fh.readlines():
event = json.loads(line.strip())
if event['type'] == 'rebalanceStart': # only last rebalance events
raw_data = defaultdict(list)
if event.get('bucket') not in (None, 'undefined'):
raw_data[event['bucket']].append(event)
return raw_data
def parse_events(data):
_data = defaultdict(dict)
for bucket, events in data.items():
_data[bucket] = defaultdict(list)
for event in sorted(events, key=lambda event: event['ts']):
vbucket = event.get('vbucket')
if vbucket:
_data[bucket][vbucket].append((event['type'], event['ts']))
return _data
def calc_total_time(events):
done = start = None
for event, ts in events:
if event == 'vbucketMoveDone':
done = ts
elif event == 'dcpAddStream' and start is None:
start = ts
if done and start:
return done - start
def find_hot_spots(events, total_time, threshold):
prev = prev_event = None
for event, ts in events:
if event in ('updateFastForwardMap', 'vbucketStateChange'):
continue
if prev:
delta = 100 * (ts - prev) / total_time
if delta > threshold:
yield (prev_event, event, delta)
prev = ts
prev_event = event
def analyze_events(data, threshold):
for bucket, vbuckets in data.items():
timings = []
hotspots = defaultdict(list)
for vbucket, events in vbuckets.items():
total_time = calc_total_time(events)
if not total_time:
continue
timings.append((vbucket, total_time))
for hotspot in find_hot_spots(events, total_time, threshold):
hotspots[vbucket].append(hotspot)
try:
report(bucket, timings, hotspots)
except IOError:
pass
def report(bucket, timings, hotspots):
mean = sum(total for vbucket, total in timings) / len(timings)
_max = max(total for vbucket, total in timings)
_min = min(total for vbucket, total in timings)
summary = '{{:>{}}}: {{}} movements, ' \
'mean: {{:.1f}}s, max: {{:.1f}}s, min: {{:.1f}}s'.format(len(bucket))
vb_summary = '{{:>{}}}: {{:.1f}}s'.format(len(bucket))
hotspot = '{}{{}} -> {{}}: {{:.1f}}%'.format(''.rjust(len(bucket) + 2))
print(summary.format(bucket, len(timings), mean, _max, _min))
for vbucket, total_time in timings:
print(vb_summary.format(vbucket, total_time))
for prev_event, event, delta in hotspots[vbucket]:
print(hotspot.format(prev_event, event, delta))
def main():
parser = argparse.ArgumentParser(prog='moveit')
parser.add_argument('-t', dest='threshold', type=float, default=0,
help='hotspot threshold in %%')
parser.add_argument('filename', type=str, help='path to master events log')
args = parser.parse_args()
if not 0 <= args.threshold <= 100:
parser.error('threshold must be in 0 to 100 range')
raw_data = read_data(fname=args.filename)
data = parse_events(data=raw_data)
analyze_events(data, args.threshold)
if __name__ == '__main__':
main()
| 3,443 | 1,068 |
from typing import Union, Tuple
import pytest
import torch
import torch.nn as nn
from torch.testing import assert_allclose
from torch.autograd import gradcheck
import kornia
import kornia.testing as utils # test utils
from kornia.constants import pi
from kornia.augmentation import (
RandomDepthicalFlip3D,
RandomHorizontalFlip3D,
RandomVerticalFlip3D,
RandomAffine3D,
RandomRotation3D,
RandomCrop3D,
CenterCrop3D,
RandomEqualize3D
)
class TestRandomHorizontalFlip3D:
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self):
f = RandomHorizontalFlip3D(0.5)
repr = "RandomHorizontalFlip3D(p=0.5, p_batch=1.0, same_on_batch=False, return_transform=0.5)"
assert str(f) == repr
def test_random_hflip(self, device):
f = RandomHorizontalFlip3D(p=1.0, return_transform=True)
f1 = RandomHorizontalFlip3D(p=0., return_transform=True)
f2 = RandomHorizontalFlip3D(p=1.)
f3 = RandomHorizontalFlip3D(p=0.)
input = torch.tensor([[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 1., 2.]],
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 1., 2.]]]) # 2 x 3 x 4
input = input.to(device)
expected = torch.tensor([[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[2., 1., 0., 0.]],
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[2., 1., 0., 0.]]]) # 2 x 3 x 4
expected = expected.to(device)
expected_transform = torch.tensor([[-1., 0., 0., 3.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]) # 4 x 4
expected_transform = expected_transform.to(device)
identity = torch.tensor([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]) # 4 x 4
identity = identity.to(device)
assert (f(input)[0] == expected).all()
assert (f(input)[1] == expected_transform).all()
assert (f1(input)[0] == input).all()
assert (f1(input)[1] == identity).all()
assert (f2(input) == expected).all()
assert (f3(input) == input).all()
def test_batch_random_hflip(self, device):
f = RandomHorizontalFlip3D(p=1.0, return_transform=True)
f1 = RandomHorizontalFlip3D(p=0.0, return_transform=True)
input = torch.tensor([[[[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]]]) # 1 x 1 x 1 x 3 x 3
input = input.to(device)
expected = torch.tensor([[[[[0., 0., 0.],
[0., 0., 0.],
[1., 1., 0.]]]]]) # 1 x 1 x 1 x 3 x 3
expected = expected.to(device)
expected_transform = torch.tensor([[[-1., 0., 0., 2.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]]) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
identity = torch.tensor([[[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]]) # 1 x 4 x 4
identity = identity.to(device)
input = input.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected = expected.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected_transform = expected_transform.repeat(5, 1, 1) # 5 x 4 x 4
identity = identity.repeat(5, 1, 1) # 5 x 4 x 4
assert (f(input)[0] == expected).all()
assert (f(input)[1] == expected_transform).all()
assert (f1(input)[0] == input).all()
assert (f1(input)[1] == identity).all()
def test_same_on_batch(self, device):
f = RandomHorizontalFlip3D(p=0.5, same_on_batch=True)
input = torch.eye(3).unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 1, 1, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
def test_sequential(self, device):
f = nn.Sequential(
RandomHorizontalFlip3D(p=1.0, return_transform=True),
RandomHorizontalFlip3D(p=1.0, return_transform=True),
)
f1 = nn.Sequential(
RandomHorizontalFlip3D(p=1.0, return_transform=True),
RandomHorizontalFlip3D(p=1.0),
)
input = torch.tensor([[[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]]) # 1 x 1 x 3 x 3
input = input.to(device)
expected_transform = torch.tensor([[[-1., 0., 0., 2.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]]) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
expected_transform_1 = expected_transform @ expected_transform
expected_transform_1 = expected_transform_1.to(device)
assert(f(input)[0] == input).all()
assert(f(input)[1] == expected_transform_1).all()
assert(f1(input)[0] == input).all()
assert(f1(input)[1] == expected_transform).all()
@pytest.mark.skip(reason="turn off all jit for a while")
def test_jit(self, device):
@torch.jit.script
def op_script(data: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
return kornia.random_hflip(data)
input = torch.tensor([[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]) # 1 x 3 x 3
# Build jit trace
op_trace = torch.jit.trace(op_script, (input, ))
# Create new inputs
input = torch.tensor([[0., 0., 0.],
[5., 5., 0.],
[0., 0., 0.]]) # 3 x 3
input = input.repeat(2, 1, 1) # 2 x 3 x 3
expected = torch.tensor([[[0., 0., 0.],
[0., 5., 5.],
[0., 0., 0.]]]) # 3 x 3
expected = expected.repeat(2, 1, 1)
actual = op_trace(input)
assert_allclose(actual, expected)
def test_gradcheck(self, device):
input = torch.rand((1, 3, 3)).to(device) # 3 x 3
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(RandomHorizontalFlip3D(p=1.), (input, ), raise_exception=True)
class TestRandomVerticalFlip3D:
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self):
f = RandomVerticalFlip3D(0.5)
repr = "RandomVerticalFlip3D(p=0.5, p_batch=1.0, same_on_batch=False, return_transform=0.5)"
assert str(f) == repr
def test_random_vflip(self, device):
f = RandomVerticalFlip3D(p=1.0, return_transform=True)
f1 = RandomVerticalFlip3D(p=0., return_transform=True)
f2 = RandomVerticalFlip3D(p=1.)
f3 = RandomVerticalFlip3D(p=0.)
input = torch.tensor([[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]) # 2 x 3 x 3
input = input.to(device)
expected = torch.tensor([[[0., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]]]) # 2 x 3 x 3
expected = expected.to(device)
expected_transform = torch.tensor([[1., 0., 0., 0.],
[0., -1., 0., 2.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]) # 4 x 4
expected_transform = expected_transform.to(device)
identity = torch.tensor([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]) # 4 x 4
identity = identity.to(device)
assert_allclose(f(input)[0], expected)
assert_allclose(f(input)[1], expected_transform)
assert_allclose(f1(input)[0], input)
assert_allclose(f1(input)[1], identity)
assert_allclose(f2(input), expected)
assert_allclose(f3(input), input)
def test_batch_random_vflip(self, device):
f = RandomVerticalFlip3D(p=1.0, return_transform=True)
f1 = RandomVerticalFlip3D(p=0.0, return_transform=True)
input = torch.tensor([[[[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]]]) # 1 x 1 x 1 x 3 x 3
input = input.to(device)
expected = torch.tensor([[[[[0., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]]]]]) # 1 x 1 x 1 x 3 x 3
expected = expected.to(device)
expected_transform = torch.tensor([[[1., 0., 0., 0.],
[0., -1., 0., 2.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]]) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
identity = torch.tensor([[[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]]) # 1 x 4 x 4
identity = identity.to(device)
input = input.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected = expected.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected_transform = expected_transform.repeat(5, 1, 1) # 5 x 4 x 4
identity = identity.repeat(5, 1, 1) # 5 x 4 x 4
assert_allclose(f(input)[0], expected)
assert_allclose(f(input)[1], expected_transform)
assert_allclose(f1(input)[0], input)
assert_allclose(f1(input)[1], identity)
def test_same_on_batch(self, device):
f = RandomVerticalFlip3D(p=0.5, same_on_batch=True)
input = torch.eye(3).unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 1, 1, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
def test_sequential(self, device):
f = nn.Sequential(
RandomVerticalFlip3D(p=1.0, return_transform=True),
RandomVerticalFlip3D(p=1.0, return_transform=True),
)
f1 = nn.Sequential(
RandomVerticalFlip3D(p=1.0, return_transform=True),
RandomVerticalFlip3D(p=1.0),
)
input = torch.tensor([[[[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]]]) # 1 x 1 x 1 x 4 x 4
input = input.to(device)
expected_transform = torch.tensor([[[1., 0., 0., 0.],
[0., -1., 0., 2.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]]) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
expected_transform_1 = expected_transform @ expected_transform
assert_allclose(f(input)[0], input.squeeze())
assert_allclose(f(input)[1], expected_transform_1)
assert_allclose(f1(input)[0], input.squeeze())
assert_allclose(f1(input)[1], expected_transform)
@pytest.mark.skip(reason="turn off all jit for a while")
def test_jit(self, device):
@torch.jit.script
def op_script(data: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
return kornia.random_vflip(data)
input = torch.tensor([[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]) # 4 x 4
# Build jit trace
op_trace = torch.jit.trace(op_script, (input, ))
# Create new inputs
input = torch.tensor([[[0., 0., 0.],
[5., 5., 0.],
[0., 0., 0.]]]) # 1 x 4 x 4
input = input.repeat(2, 1, 1) # 2 x 4 x 4
expected = torch.tensor([[[[0., 0., 0.],
[5., 5., 0.],
[0., 0., 0.]]]]) # 1 x 4 x 4
expected = expected.repeat(2, 1, 1)
actual = op_trace(input)
assert_allclose(actual, expected)
def test_gradcheck(self, device):
input = torch.rand((1, 3, 3)).to(device) # 4 x 4
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(RandomVerticalFlip3D(p=1.), (input, ), raise_exception=True)
class TestRandomDepthicalFlip3D:
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self):
f = RandomDepthicalFlip3D(0.5)
repr = "RandomDepthicalFlip3D(p=0.5, p_batch=1.0, same_on_batch=False, return_transform=0.5)"
assert str(f) == repr
def test_random_dflip(self, device):
f = RandomDepthicalFlip3D(p=1.0, return_transform=True)
f1 = RandomDepthicalFlip3D(p=0., return_transform=True)
f2 = RandomDepthicalFlip3D(p=1.)
f3 = RandomDepthicalFlip3D(p=0.)
input = torch.tensor([[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]],
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 2.]]]) # 2 x 3 x 4
input = input.to(device)
expected = torch.tensor([[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 2.]],
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]]]) # 2 x 3 x 4
expected = expected.to(device)
expected_transform = torch.tensor([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., -1., 1.],
[0., 0., 0., 1.]]) # 4 x 4
expected_transform = expected_transform.to(device)
identity = torch.tensor([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]) # 4 x 4
identity = identity.to(device)
assert_allclose(f(input)[0], expected)
assert_allclose(f(input)[1], expected_transform)
assert_allclose(f1(input)[0], input)
assert_allclose(f1(input)[1], identity)
assert_allclose(f2(input), expected)
assert_allclose(f3(input), input)
def test_batch_random_dflip(self, device):
f = RandomDepthicalFlip3D(p=1.0, return_transform=True)
f1 = RandomDepthicalFlip3D(p=0.0, return_transform=True)
input = torch.tensor([[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]],
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 2.]]]) # 2 x 3 x 4
input = input.to(device)
expected = torch.tensor([[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 2.]],
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]]]) # 2 x 3 x 4
expected = expected.to(device)
expected_transform = torch.tensor([[[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., -1., 1.],
[0., 0., 0., 1.]]]) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
identity = torch.tensor([[[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]]) # 1 x 4 x 4
identity = identity.to(device)
input = input.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected = expected.repeat(5, 3, 1, 1, 1) # 5 x 3 x 3 x 3 x 3
expected_transform = expected_transform.repeat(5, 1, 1) # 5 x 4 x 4
identity = identity.repeat(5, 1, 1) # 5 x 4 x 4
assert_allclose(f(input)[0], expected)
assert_allclose(f(input)[1], expected_transform)
assert_allclose(f1(input)[0], input)
assert_allclose(f1(input)[1], identity)
def test_same_on_batch(self, device):
f = RandomDepthicalFlip3D(p=0.5, same_on_batch=True)
input = torch.eye(3).unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 1, 2, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
def test_sequential(self, device):
f = nn.Sequential(
RandomDepthicalFlip3D(p=1.0, return_transform=True),
RandomDepthicalFlip3D(p=1.0, return_transform=True),
)
f1 = nn.Sequential(
RandomDepthicalFlip3D(p=1.0, return_transform=True),
RandomDepthicalFlip3D(p=1.0),
)
input = torch.tensor([[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]],
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 2.]]]) # 2 x 3 x 4
input = input.to(device)
expected_transform = torch.tensor([[[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., -1., 1.],
[0., 0., 0., 1.]]]) # 1 x 4 x 4
expected_transform = expected_transform.to(device)
expected_transform_1 = expected_transform @ expected_transform
assert_allclose(f(input)[0], input.squeeze())
assert_allclose(f(input)[1], expected_transform_1)
assert_allclose(f1(input)[0], input.squeeze())
assert_allclose(f1(input)[1], expected_transform)
@pytest.mark.skip(reason="turn off all jit for a while")
def test_jit(self, device):
@torch.jit.script
def op_script(data: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
return kornia.random_vflip(data)
input = torch.tensor([[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]) # 4 x 4
# Build jit trace
op_trace = torch.jit.trace(op_script, (input, ))
# Create new inputs
input = torch.tensor([[[0., 0., 0.],
[5., 5., 0.],
[0., 0., 0.]]]) # 1 x 4 x 4
input = input.repeat(2, 1, 1) # 2 x 4 x 4
expected = torch.tensor([[[[0., 0., 0.],
[5., 5., 0.],
[0., 0., 0.]]]]) # 1 x 4 x 4
expected = expected.repeat(2, 1, 1)
actual = op_trace(input)
assert_allclose(actual, expected)
def test_gradcheck(self, device):
input = torch.rand((1, 3, 3)).to(device) # 4 x 4
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(RandomDepthicalFlip3D(p=1.), (input, ), raise_exception=True)
class TestRandomRotation3D:
torch.manual_seed(0) # for random reproductibility
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self):
f = RandomRotation3D(degrees=45.5)
repr = """RandomRotation3D(degrees=tensor([[-45.5000, 45.5000],
[-45.5000, 45.5000],
[-45.5000, 45.5000]]), resample=BILINEAR, align_corners=False, p=0.5, """\
"""p_batch=1.0, same_on_batch=False, return_transform=False)"""
assert str(f) == repr
def test_random_rotation(self, device, dtype):
# This is included in doctest
torch.manual_seed(0) # for random reproductibility
f = RandomRotation3D(degrees=45.0, return_transform=True)
f1 = RandomRotation3D(degrees=45.0)
input = torch.tensor([[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]],
[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]],
[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]]], device=device, dtype=dtype) # 3 x 4 x 4
expected = torch.tensor([[[[[0.2771, 0.0000, 0.0036, 0.0000],
[0.5751, 0.0183, 0.7505, 0.4702],
[0.0262, 0.2591, 0.5776, 0.4764],
[0.0000, 0.0093, 0.0000, 0.0393]],
[[0.0000, 0.0000, 0.0583, 0.0222],
[0.1665, 0.0000, 1.0424, 1.0224],
[0.1296, 0.4846, 1.4200, 1.2287],
[0.0078, 0.3851, 0.3965, 0.3612]],
[[0.0000, 0.7704, 0.6704, 0.0000],
[0.0000, 0.0332, 0.2414, 0.0524],
[0.0000, 0.3349, 1.4545, 1.3689],
[0.0000, 0.0312, 0.5874, 0.8702]]]]], device=device, dtype=dtype)
expected_transform = torch.tensor([[[0.5784, 0.7149, -0.3929, -0.0471],
[-0.3657, 0.6577, 0.6585, 0.4035],
[0.7292, -0.2372, 0.6419, -0.3799],
[0.0000, 0.0000, 0.0000, 1.0000]]], device=device, dtype=dtype)
expected_2 = torch.tensor([[[[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]],
[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]],
[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]]]]], device=device, dtype=dtype)
out, mat = f(input)
assert_allclose(out, expected, rtol=1e-6, atol=1e-4)
assert_allclose(mat, expected_transform, rtol=1e-6, atol=1e-4)
assert_allclose(f1(input), expected_2, rtol=1e-6, atol=1e-4)
def test_batch_random_rotation(self, device, dtype):
torch.manual_seed(0) # for random reproductibility
f = RandomRotation3D(degrees=45.0, return_transform=True)
input = torch.tensor([[[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]],
[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]],
[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]]]], device=device, dtype=dtype) # 1 x 1 x 4 x 4
expected = torch.tensor([[[[[0.0000, 0.5106, 0.1146, 0.0000],
[0.0000, 0.1261, 0.0000, 0.4723],
[0.1714, 0.9931, 0.5442, 0.4684],
[0.0193, 0.5802, 0.4195, 0.0000]],
[[0.0000, 0.2386, 0.0000, 0.0000],
[0.0187, 0.3527, 0.0000, 0.6119],
[0.1294, 1.2251, 0.9130, 0.0942],
[0.0962, 1.0769, 0.8448, 0.0000]],
[[0.0000, 0.0202, 0.0000, 0.0000],
[0.1092, 0.5845, 0.1038, 0.4598],
[0.0000, 1.1218, 1.0796, 0.0000],
[0.0780, 0.9513, 1.1278, 0.0000]]]],
[[[[1.0000, 0.0000, 0.0000, 2.0000],
[0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 2.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 2.0000]],
[[1.0000, 0.0000, 0.0000, 2.0000],
[0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 2.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 2.0000]],
[[1.0000, 0.0000, 0.0000, 2.0000],
[0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 2.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 2.0000]]]]], device=device, dtype=dtype)
expected_transform = torch.tensor([[[0.7894, -0.6122, 0.0449, 1.1892],
[0.5923, 0.7405, -0.3176, -0.1816],
[0.1612, 0.2773, 0.9472, -0.6049],
[0.0000, 0.0000, 0.0000, 1.0000]],
[[1.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 1.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 1.0000]]], device=device, dtype=dtype)
input = input.repeat(2, 1, 1, 1, 1) # 5 x 4 x 4 x 3
out, mat = f(input)
assert_allclose(out, expected, rtol=1e-6, atol=1e-4)
assert_allclose(mat, expected_transform, rtol=1e-6, atol=1e-4)
def test_same_on_batch(self, device, dtype):
f = RandomRotation3D(degrees=40, same_on_batch=True)
input = torch.eye(6, device=device, dtype=dtype).unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 3, 1, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
def test_sequential(self, device, dtype):
torch.manual_seed(0) # for random reproductibility
f = nn.Sequential(
RandomRotation3D(torch.tensor([-45.0, 90]), return_transform=True),
RandomRotation3D(10.4, return_transform=True),
)
f1 = nn.Sequential(
RandomRotation3D(torch.tensor([-45.0, 90]), return_transform=True),
RandomRotation3D(10.4),
)
input = torch.tensor([[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]],
[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]],
[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]]], device=device, dtype=dtype) # 3 x 4 x 4
expected = torch.tensor([[[[[0.2752, 0.0000, 0.0000, 0.0000],
[0.5767, 0.0059, 0.6440, 0.4307],
[0.0000, 0.2793, 0.6638, 0.5716],
[0.0000, 0.0049, 0.0000, 0.0685]],
[[0.0000, 0.0000, 0.1806, 0.0000],
[0.2138, 0.0000, 0.9061, 0.7966],
[0.0657, 0.5395, 1.4299, 1.2912],
[0.0000, 0.3600, 0.3088, 0.3655]],
[[0.0000, 0.6515, 0.8861, 0.0000],
[0.0000, 0.0000, 0.2278, 0.0000],
[0.0027, 0.4403, 1.5462, 1.3480],
[0.0000, 0.1182, 0.6297, 0.8623]]]]], device=device, dtype=dtype)
expected_transform = torch.tensor([[[0.6306, 0.6496, -0.4247, 0.0044],
[-0.3843, 0.7367, 0.5563, 0.4151],
[0.6743, -0.1876, 0.7142, -0.4443],
[0.0000, 0.0000, 0.0000, 1.0000]]], device=device, dtype=dtype)
expected_transform_2 = torch.tensor([[[0.9611, 0.0495, -0.2717, 0.2557],
[0.1255, 0.7980, 0.5894, -0.4747],
[0.2460, -0.6006, 0.7608, 0.7710],
[0.0000, 0.0000, 0.0000, 1.0000]]], device=device, dtype=dtype)
out, mat = f(input)
_, mat_2 = f1(input)
assert_allclose(out, expected, rtol=1e-6, atol=1e-4)
assert_allclose(mat, expected_transform, rtol=1e-6, atol=1e-4)
assert_allclose(mat_2, expected_transform_2, rtol=1e-6, atol=1e-4)
@pytest.mark.skip(reason="turn off all jit for a while")
def test_jit(self, device):
torch.manual_seed(0) # for random reproductibility
@torch.jit.script
def op_script(data: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
return kornia.random_rotation(data, degrees=45.0)
input = torch.tensor([[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]],
[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]],
[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 1., 2., 0.],
[0., 0., 1., 2.]]]) # 3 x 4 x 4
# Build jit trace
op_trace = torch.jit.trace(op_script, (input, ))
# Create new inputs
input = torch.tensor([[[0., 0., 0.],
[5., 5., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[5., 5., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[5., 5., 0.],
[0., 0., 0.]]]) # 3 x 3 x 3
expected = torch.tensor([[[0.0000, 0.2584, 0.0000],
[2.9552, 5.0000, 0.2584],
[1.6841, 0.4373, 0.0000]]])
actual = op_trace(input)
assert_allclose(actual, expected, rtol=1e-6, atol=1e-4)
def test_gradcheck(self, device):
torch.manual_seed(0) # for random reproductibility
input = torch.rand((3, 3, 3)).to(device) # 3 x 3 x 3
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(RandomRotation3D(degrees=(15.0, 15.0), p=1.), (input, ), raise_exception=True)
class TestRandomCrop3D:
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self):
f = RandomCrop3D(size=(2, 3, 4), padding=(0, 1, 2), fill=10, pad_if_needed=False, p=1.)
repr = "RandomCrop3D(crop_size=(2, 3, 4), padding=(0, 1, 2), fill=10, pad_if_needed=False, "\
"padding_mode=constant, resample=BILINEAR, p=1.0, p_batch=1.0, same_on_batch=False, "\
"return_transform=False)"
assert str(f) == repr
@pytest.mark.parametrize("batch_size", [1, 2])
def test_no_padding(self, batch_size, device, dtype):
torch.manual_seed(0)
inp = torch.tensor([[[[
[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]
]]]], device=device, dtype=dtype).repeat(batch_size, 1, 5, 1, 1)
f = RandomCrop3D(size=(2, 3, 4), padding=None, align_corners=True, p=1.)
out = f(inp)
if batch_size == 1:
expected = torch.tensor([[[[
[11, 12, 13, 14],
[16, 17, 18, 19],
[21, 22, 23, 24]
]]]], device=device, dtype=dtype).repeat(batch_size, 1, 2, 1, 1)
if batch_size == 2:
expected = torch.tensor([
[[[[6.0000, 7.0000, 8.0000, 9.0000],
[11.0000, 12.0000, 13.0000, 14.0000],
[16.0000, 17.0000, 18.0000, 19.0000]],
[[6.0000, 7.0000, 8.0000, 9.0000],
[11.0000, 12.0000, 13.0000, 14.0000],
[16.0000, 17.0000, 18.0000, 19.0000]]]],
[[[[11.0000, 12.0000, 13.0000, 14.0000],
[16.0000, 17.0000, 18.0000, 19.0000],
[21.0000, 22.0000, 23.0000, 24.0000]],
[[11.0000, 12.0000, 13.0000, 14.0000],
[16.0000, 17.0000, 18.0000, 19.0000],
[21.0000, 22.0000, 23.0000, 24.0000]]]]], device=device, dtype=dtype)
assert_allclose(out, expected, atol=1e-4, rtol=1e-4)
def test_same_on_batch(self, device, dtype):
f = RandomCrop3D(size=(2, 3, 4), padding=None, align_corners=True, p=1., same_on_batch=True)
input = torch.eye(6).unsqueeze(dim=0).unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 3, 5, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
@pytest.mark.parametrize("padding", [1, (1, 1, 1), (1, 1, 1, 1, 1, 1)])
def test_padding_batch(self, padding, device, dtype):
torch.manual_seed(0)
batch_size = 2
inp = torch.tensor([[[
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]
]]], device=device, dtype=dtype).repeat(batch_size, 1, 3, 1, 1)
expected = torch.tensor([[[
[[0., 1., 2., 10.],
[3., 4., 5., 10.],
[6., 7., 8., 10.]],
[[0., 1., 2., 10.],
[3., 4., 5., 10.],
[6., 7., 8., 10.]],
]], [[
[[3., 4., 5., 10.],
[6., 7., 8., 10.],
[10, 10, 10, 10.]],
[[10, 10, 10, 10.],
[10, 10, 10, 10.],
[10, 10, 10, 10.]],
]]], device=device, dtype=dtype)
f = RandomCrop3D(size=(2, 3, 4), fill=10., padding=padding, align_corners=True, p=1.)
out = f(inp)
assert_allclose(out, expected, atol=1e-4, rtol=1e-4)
def test_pad_if_needed(self, device, dtype):
torch.manual_seed(0)
inp = torch.tensor([[
[0., 1., 2.],
]], device=device, dtype=dtype)
expected = torch.tensor([[[
[[9., 9., 9., 9.],
[9., 9., 9., 9.],
[9., 9., 9., 9.]],
[[0., 1., 2., 9.],
[9., 9., 9., 9.],
[9., 9., 9., 9.]],
]]], device=device, dtype=dtype)
rc = RandomCrop3D(size=(2, 3, 4), pad_if_needed=True, fill=9, align_corners=True, p=1.)
out = rc(inp)
assert_allclose(out, expected, atol=1e-4, rtol=1e-4)
def test_gradcheck(self, device, dtype):
torch.manual_seed(0) # for random reproductibility
inp = torch.rand((3, 3, 3), device=device, dtype=dtype) # 3 x 3
inp = utils.tensor_to_gradcheck_var(inp) # to var
assert gradcheck(RandomCrop3D(size=(3, 3, 3), p=1.), (inp, ), raise_exception=True)
@pytest.mark.skip("Need to fix Union type")
def test_jit(self, device, dtype):
# Define script
op = RandomCrop(size=(3, 3), p=1.).forward
op_script = torch.jit.script(op)
img = torch.ones(1, 1, 5, 6, device=device, dtype=dtype)
actual = op_script(img)
expected = kornia.center_crop3d(img)
assert_allclose(actual, expected)
@pytest.mark.skip("Need to fix Union type")
def test_jit_trace(self, device, dtype):
# Define script
op = RandomCrop(size=(3, 3), p=1.).forward
op_script = torch.jit.script(op)
# 1. Trace op
img = torch.ones(1, 1, 5, 6, device=device, dtype=dtype)
op_trace = torch.jit.trace(op_script, (img,))
# 2. Generate new input
img = torch.ones(1, 1, 5, 6, device=device, dtype=dtype)
# 3. Evaluate
actual = op_trace(img)
expected = op(img)
assert_allclose(actual, expected)
class TestCenterCrop3D:
def test_no_transform(self, device, dtype):
inp = torch.rand(1, 2, 4, 4, 4, device=device, dtype=dtype)
out = kornia.augmentation.CenterCrop3D(2)(inp)
assert out.shape == (1, 2, 2, 2, 2)
def test_transform(self, device, dtype):
inp = torch.rand(1, 2, 5, 4, 8, device=device, dtype=dtype)
out = kornia.augmentation.CenterCrop3D(2, return_transform=True)(inp)
assert len(out) == 2
assert out[0].shape == (1, 2, 2, 2, 2)
assert out[1].shape == (1, 4, 4)
def test_no_transform_tuple(self, device, dtype):
inp = torch.rand(1, 2, 5, 4, 8, device=device, dtype=dtype)
out = kornia.augmentation.CenterCrop3D((3, 4, 5))(inp)
assert out.shape == (1, 2, 3, 4, 5)
def test_gradcheck(self, device, dtype):
input = torch.rand(1, 2, 3, 4, 5, device=device, dtype=dtype)
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(kornia.augmentation.CenterCrop3D(3), (input,), raise_exception=True)
class TestRandomEqualize3D:
# TODO: improve and implement more meaningful smoke tests e.g check for a consistent
# return values such a torch.Tensor variable.
@pytest.mark.xfail(reason="might fail under windows OS due to printing preicision.")
def test_smoke(self, device, dtype):
f = RandomEqualize3D(p=0.5)
repr = "RandomEqualize3D(p=0.5, p_batch=1.0, same_on_batch=False, return_transform=False)"
assert str(f) == repr
def test_random_equalize(self, device, dtype):
f = RandomEqualize3D(p=1.0, return_transform=True)
f1 = RandomEqualize3D(p=0., return_transform=True)
f2 = RandomEqualize3D(p=1.)
f3 = RandomEqualize3D(p=0.)
bs, channels, depth, height, width = 1, 3, 6, 10, 10
inputs3d = self.build_input(channels, depth, height, width, device=device, dtype=dtype).squeeze(dim=0)
row_expected = torch.tensor([
0.0000, 0.11764, 0.2353, 0.3529, 0.4706, 0.5882, 0.7059, 0.8235, 0.9412, 1.0000
], device=device, dtype=dtype)
expected = self.build_input(channels, depth, height, width, bs=1, row=row_expected,
device=device, dtype=dtype)
identity = kornia.eye_like(4, expected)
assert_allclose(f(inputs3d)[0], expected, rtol=1e-4, atol=1e-4)
assert_allclose(f(inputs3d)[1], identity, rtol=1e-4, atol=1e-4)
assert_allclose(f1(inputs3d)[0], inputs3d, rtol=1e-4, atol=1e-4)
assert_allclose(f1(inputs3d)[1], identity, rtol=1e-4, atol=1e-4)
assert_allclose(f2(inputs3d), expected, rtol=1e-4, atol=1e-4)
assert_allclose(f3(inputs3d), inputs3d, rtol=1e-4, atol=1e-4)
def test_batch_random_equalize(self, device, dtype):
f = RandomEqualize3D(p=1.0, return_transform=True)
f1 = RandomEqualize3D(p=0., return_transform=True)
f2 = RandomEqualize3D(p=1.)
f3 = RandomEqualize3D(p=0.)
bs, channels, depth, height, width = 2, 3, 6, 10, 10
inputs3d = self.build_input(channels, depth, height, width, bs, device=device, dtype=dtype)
row_expected = torch.tensor([
0.0000, 0.11764, 0.2353, 0.3529, 0.4706, 0.5882, 0.7059, 0.8235, 0.9412, 1.0000
])
expected = self.build_input(channels, depth, height, width, bs, row=row_expected,
device=device, dtype=dtype)
identity = kornia.eye_like(4, expected) # 2 x 4 x 4
assert_allclose(f(inputs3d)[0], expected, rtol=1e-4, atol=1e-4)
assert_allclose(f(inputs3d)[1], identity, rtol=1e-4, atol=1e-4)
assert_allclose(f1(inputs3d)[0], inputs3d, rtol=1e-4, atol=1e-4)
assert_allclose(f1(inputs3d)[1], identity, rtol=1e-4, atol=1e-4)
assert_allclose(f2(inputs3d), expected, rtol=1e-4, atol=1e-4)
assert_allclose(f3(inputs3d), inputs3d, rtol=1e-4, atol=1e-4)
def test_same_on_batch(self, device, dtype):
f = RandomEqualize3D(p=0.5, same_on_batch=True)
input = torch.eye(4, device=device, dtype=dtype)
input = input.unsqueeze(dim=0).unsqueeze(dim=0).repeat(2, 1, 2, 1, 1)
res = f(input)
assert (res[0] == res[1]).all()
def test_gradcheck(self, device, dtype):
torch.manual_seed(0) # for random reproductibility
inputs3d = torch.rand((3, 3, 3), device=device, dtype=dtype) # 3 x 3 x 3
inputs3d = utils.tensor_to_gradcheck_var(inputs3d) # to var
assert gradcheck(RandomEqualize3D(p=0.5), (inputs3d,), raise_exception=True)
@staticmethod
def build_input(channels, depth, height, width, bs=1, row=None, device='cpu', dtype=torch.float32):
if row is None:
row = torch.arange(width, device=device, dtype=dtype) / float(width)
channel = torch.stack([row] * height)
image = torch.stack([channel] * channels)
image3d = torch.stack([image] * depth).transpose(0, 1)
batch = torch.stack([image3d] * bs)
return batch.to(device, dtype)
| 43,769 | 17,102 |
#!/usr/bin/env python
from pattern.en import spelling
import sys
def main(args):
if len(args) < 1:
sys.stderr.write("1 required argument: <input file>")
with open(args[0], 'r') as f:
for line in f.readlines():
word = line.rstrip()
try:
suggestions = spelling.suggest(word)
except:
suggestions = "No suggestions"
print('%s %s' % (word, str(suggestions)))
if __name__ == '__main__':
main(sys.argv[1:]) | 516 | 164 |
from distutils.core import setup
requires = ["requests", "paramiko"]
try:
import configparser
except ImportError:
requires.append("configparser")
setup(
name="vpsutil",
version="0.0.0",
license="MIT",
packages=["vpsutil"],
install_requires=requires,
entry_points={
"console_scripts": [
"ocean = vpsutil.command:ocean"
]
}
)
| 391 | 131 |
from django.apps import AppConfig
class PodstronyConfig(AppConfig):
name = 'podstrony'
| 93 | 30 |
import csv
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.io
import numpy as np
import random
from ..base import BaseDataset
from .schemas import MatDatasetSchema
"""
The format of the mat dataset is:
train_x 28x28x4x400000 uint8 (containing 400000 training samples of 28x28 images each with 4 channels)
train_y 400000x6 uint8 (containing 6x1 vectors having labels for the 400000 training samples)
test_x 28x28x4x100000 uint8 (containing 100000 test samples of 28x28 images each with 4 channels)
test_y 100000x6 uint8 (containing 6x1 vectors having labels for the 100000 test samples)
"""
LABELS = ["barren land", "trees", "grassland", "roads", "buildings", "water bodies"]
class SAT6(BaseDataset):
schema = MatDatasetSchema
url = "http://csc.lsu.edu/~saikat/deepsat/"
labels = LABELS
name = "SAT-6 dataset"
def __init__(self, config):
# now call the constructor to validate the schema
BaseDataset.__init__(self, config)
# load the data
self.mode = self.config.mode
self.data = self.load_dataset(self.config.mat_file_path)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
# load image
img = self.data[index][0]
# apply transformations
if self.transform:
img = self.transform(img)
target = self.data[index][1]
if self.target_transform:
target = self.target_transform(self.data[index][1])
return img, target
def __len__(self):
return len(self.data)
def get_labels(self):
return self.labels
def data_distribution_table(self):
mat_data = scipy.io.loadmat(self.config.mat_file_path)
img_labels = mat_data[f'{self.mode}_y'].transpose()
data = list(np.where(img_labels == 1)[1])
res_list = [[i, self.labels[index]] for i, index in enumerate(data)]
df = pd.DataFrame(res_list, columns=['id', 'Label'])
label_count = df.groupby("Label").count().reset_index()
label_count.columns = ['Label', 'Count']
return label_count
def data_distribution_barchart(self):
label_count = self.data_distribution_table()
fig, ax = plt.subplots(figsize=(12, 10))
sns.barplot(y="Label", x="Count", data=label_count, ax=ax)
return fig
def show_image(self, index):
label = self.labels[self[index][1]]
fig = plt.figure(figsize=(8, 6))
plt.title(f"Image with index {index} from the dataset {self.get_name()}, with label {label}\n",
fontsize=14)
plt.axis('off')
plt.imshow(self[index][0])
return fig
def show_batch(self, size):
if size % 3:
raise ValueError(
"The provided size should be divided by 4!"
)
image_indices = random.sample(range(0, len(self.data)), size)
figure_height = int(size / 3) * 4
figure, ax = plt.subplots(int(size / 3), 3, figsize=(20, figure_height))
figure.suptitle("Example images with labels from {}".format(self.get_name()), fontsize=32)
for axes, image_index in zip(ax.flatten(), image_indices):
axes.imshow(self[image_index][0])
axes.set_title(self.labels[self[image_index][1]], fontsize=18)
axes.set_xticks([])
axes.set_yticks([])
figure.tight_layout()
# figure.subplots_adjust(top=1.0)
return figure
def load_dataset(self, file_path):
if not self.labels:
raise ValueError(
"You need to provide the list of labels for the dataset"
)
data = []
if file_path:
mat_data = scipy.io.loadmat(file_path)
images = mat_data[f'{self.mode}_x'].transpose(3, 0, 1, 2)
img_labels = mat_data[f'{self.mode}_y'].transpose()
data = list(zip(images[:, :, :, 0:3], np.where(img_labels == 1)[1]))
return data
| 4,117 | 1,346 |
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from config import Configuration_Manager
about_text = '''Welcome to Slideshow!
Photo Directory: The remote will diplay a slideshow of the photos in this folder (excluding photos in it's subfolders)
Installation Type: flatpak commands have an extra prefix, so select the method you used for installation
If Eye of Gnome was pre-installed on the system, the correct installation type is likely the standard/snap option.
Once the Save and Start button is pressed, the web service will start on this machine. You will not be able to change
the settings unless the web service is stopped. This can be done using ctrl+c in the terminal window running this app'''
class ConfigurationManagerGui(Gtk.Window):
def __init__(self, configuration_manager, server_should_run):
self.configuration_manager = configuration_manager
self.server_should_run = server_should_run
current_configuration = self.configuration_manager.get_config()
Gtk.Window.__init__(self, title="Slideshow")
grid = Gtk.Grid()
self.add(grid)
spacing = 30
self.set_border_width(spacing)
grid.set_column_spacing(spacing)
grid.set_row_spacing(spacing)
#------------------------------------------------------------------------------
# About Label
#------------------------------------------------------------------------------
self.about_label = Gtk.Label(about_text)
#------------------------------------------------------------------------------
# Directory Label
#------------------------------------------------------------------------------
self.directory_label = Gtk.Label("Photo Directory")
#------------------------------------------------------------------------------
# Directory Entry
#------------------------------------------------------------------------------
self.directory_entry = Gtk.Entry()
self.directory_entry.set_text(current_configuration['directory'])
self.directory_entry.set_width_chars(80)
#------------------------------------------------------------------------------
# Directory Button
#------------------------------------------------------------------------------
self.directory_button = Gtk.Button("Choose...")
self.directory_button.connect("clicked", self.on_directory_button_clicked)
#------------------------------------------------------------------------------
# Installation Label
#------------------------------------------------------------------------------
self.installation_label = Gtk.Label("Installation Type")
#------------------------------------------------------------------------------
# Installation Combo
#------------------------------------------------------------------------------
installation_store = Gtk.ListStore(str)
installation_types = configuration_manager.get_installation_types()
for installation_type in installation_types:
installation_store.append([installation_type])
self.installation_combo = Gtk.ComboBox.new_with_model(installation_store)
renderer_text = Gtk.CellRendererText()
self.installation_combo.pack_start(renderer_text, True)
self.installation_combo.add_attribute(renderer_text, "text", 0)
self.installation_combo.set_active(installation_types.index(current_configuration['install']))
#------------------------------------------------------------------------------
# Save Button
#------------------------------------------------------------------------------
self.save_button = Gtk.Button("Save")
self.save_button.connect("clicked", self.on_save_button_clicked)
#------------------------------------------------------------------------------
# Cancel Button
#------------------------------------------------------------------------------
self.cancel_button = Gtk.Button("Cancel")
self.cancel_button.connect("clicked", self.on_cancel_button_clicked)
#------------------------------------------------------------------------------
# Save and Continue Button
#------------------------------------------------------------------------------
self.save_and_start_button = Gtk.Button("Save and Continue")
self.save_and_start_button.connect("clicked", self.on_save_and_start_button_clicked)
#------------------------------------------------------------------------------
# Layout
#------------------------------------------------------------------------------
grid.attach(self.about_label, 0,0,3,1)
grid.attach(self.directory_label, 0,1,1,1)
grid.attach(self.directory_entry, 1,1,1,1)
grid.attach(self.directory_button, 2,1,1,1)
grid.attach(self.installation_label, 0,2,1,1)
grid.attach(self.installation_combo, 1,2,1,1)
grid.attach(self.save_button, 2,2,1,1)
grid.attach(self.cancel_button, 0,3,1,1)
grid.attach(self.save_and_start_button, 2,3,1,1)
def run(self):
self.connect("destroy", Gtk.main_quit)
self.show_all()
Gtk.main()
def on_directory_button_clicked(self, widget):
dialog = Gtk.FileChooserDialog(
"Please choose a folder",
self,
Gtk.FileChooserAction.SELECT_FOLDER,
(
Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
"Select",
Gtk.ResponseType.OK
)
)
dialog.set_default_size(800, 400)
if dialog.run() == Gtk.ResponseType.OK:
self.directory_entry.set_text(dialog.get_filename())
dialog.destroy()
def on_save_button_clicked(self, button):
active_iter = self.installation_combo.get_active_iter()
self.configuration_manager.set_config(
self.directory_entry.get_text(),
self.installation_combo.get_model()[active_iter][0]
)
def on_cancel_button_clicked(self, button):
self.server_should_run.prevent()
self.destroy()
def on_save_and_start_button_clicked(self, button):
active_iter = self.installation_combo.get_active_iter()
self.configuration_manager.set_config(
self.directory_entry.get_text(),
self.installation_combo.get_model()[active_iter][0]
)
self.server_should_run.allow()
self.destroy()
| 6,768 | 1,669 |
n1 = int(input('Primeiro Valor: '))
n2 = int(input('Segundo Valor: '))
n3 = int(input('Terceiro Valor: '))
maior = n1
menor = n1
if n2 < n3 and n2 < n1:
menor = n2
if n3 < n2 and n3 < n1:
menor = n3
print('O menor valor digitado foi {}'.format(menor))
if n2 > n3 and n2 > n1:
maior = n2
if n3 > n2 and n3 > n1:
maior = n3
print('O maior valor digitado foi {}'.format(maior))
| 391 | 173 |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os_brick.i18n import _
from os_brick.initiator.connectors import base
from os_brick import utils
class LocalConnector(base.BaseLinuxConnector):
""""Connector class to attach/detach File System backed volumes."""
def __init__(self, root_helper, driver=None,
*args, **kwargs):
super(LocalConnector, self).__init__(root_helper, driver=driver,
*args, **kwargs)
@staticmethod
def get_connector_properties(root_helper, *args, **kwargs):
"""The Local connector properties."""
return {}
def get_volume_paths(self, connection_properties):
path = connection_properties['device_path']
return [path]
def get_search_path(self):
return None
def get_all_available_volumes(self, connection_properties=None):
# TODO(walter-boring): not sure what to return here.
return []
@utils.trace
def connect_volume(self, connection_properties):
"""Connect to a volume.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
connection_properties must include:
device_path - path to the volume to be connected
:type connection_properties: dict
:returns: dict
"""
if 'device_path' not in connection_properties:
msg = (_("Invalid connection_properties specified "
"no device_path attribute"))
raise ValueError(msg)
device_info = {'type': 'local',
'path': connection_properties['device_path']}
return device_info
@utils.trace
def disconnect_volume(self, connection_properties, device_info):
"""Disconnect a volume from the local host.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
:param device_info: historical difference, but same as connection_props
:type device_info: dict
"""
pass
def extend_volume(self, connection_properties):
# TODO(walter-boring): is this possible?
raise NotImplementedError
| 2,896 | 740 |
from .petal import Petal
__all__ = ["Petal"]
| 46 | 19 |
from gazette.items import Gazette
import datetime as dt
import re
import scrapy
from gazette.spiders.base import BaseGazetteSpider
class RjRioDeJaneiroSpider(BaseGazetteSpider):
TERRITORY_ID = "3304557"
name = "rj_rio_de_janeiro"
allowed_domains = ["doweb.rio.rj.gov.br"]
start_urls = ["http://doweb.rio.rj.gov.br"]
search_gazette_url = "http://doweb.rio.rj.gov.br/?buscar_diario=ok&tipo=1&data_busca={}" # format 20/04/2018
download_gazette_url = "http://doweb.rio.rj.gov.br/ler_pdf.php?download=ok&edi_id={}" # 20/04/2018 has edi_id = 3734
def parse(self, response):
parsing_date = dt.date.today()
end_date = dt.date(2015, 1, 1)
while parsing_date >= end_date:
url = self.search_gazette_url.format(parsing_date.strftime("%d/%m/%Y"))
yield scrapy.Request(
url, self.parse_search_by_date, meta={"gazette_date": parsing_date}
)
parsing_date = parsing_date - dt.timedelta(days=1)
def parse_search_by_date(self, response):
gazette_date = response.meta.get("gazette_date")
no_gazettes = response.css("#dialog-message").extract_first()
if no_gazettes and "Não existe publicação para esta data" in no_gazettes:
return
items = []
one_gazette = response.css(
"#conteudo_principal > #conteudo_home > #conteudo_erro script"
).extract_first()
if one_gazette:
match = re.search(".*edi_id=([0-9]+).*", one_gazette)
if match:
url = self.download_gazette_url.format(match.group(1))
items.append(self.create_gazette(gazette_date, url))
multiple_gazettes = response.css("#dialog-message").extract_first()
if (
multiple_gazettes
and "Existe mais de uma publicação para esta data" in multiple_gazettes
):
editions = response.css("#dialog-message a").extract()
for ed in editions:
match = re.search(".*edi_id=([0-9]+).*", ed)
if match:
url = self.download_gazette_url.format(match.group(1))
is_extra_edition = "suplemento" in ed.lower()
items.append(
self.create_gazette(gazette_date, url, is_extra_edition)
)
return items
def create_gazette(self, date, url, is_extra_edition=False):
return Gazette(
date=date,
file_urls=[url],
is_extra_edition=is_extra_edition,
territory_id=self.TERRITORY_ID,
power="executive",
scraped_at=dt.datetime.utcnow(),
)
| 2,705 | 919 |
from djitellopy import Tello
import cv2
import numpy as np
import time
# Values for color segmentation
# It match an orange battery
LOWER = np.array([0, 239, 180])
UPPER = np.array([30, 255, 255])
DESIRED_OBJECT_SIZE = 100
MAX_SPEED_FORWARDBACK = 50
MAX_SPEED_UPDOWN = 50
MAX_SPEED_YAW = 100
MIN_MOV_TIME = 0.15
def calculate_velocity(frame_size, center_of_object, max_speed):
center_of_frame = int(frame_size / 2)
distance = center_of_object - center_of_frame
return int(max_speed * (distance / frame_size)) * 2
def main():
tello = Tello()
tello.connect()
tello.streamon()
frame_read = tello.get_frame_read()
tello.takeoff()
tello.move_up(40)
try:
while True:
# Get frame
frame = frame_read.frame
# Get battery contours
imgHsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(imgHsv, LOWER, UPPER)
#res = cv2.bitwise_and(frame, frame, mask=mask)
battery_contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
# If battery is on image, detect contour
xg = yg = wg = hg = None
if len(battery_contours) > 0:
battery_area = max(battery_contours, key=cv2.contourArea)
(xg, yg, wg, hg) = cv2.boundingRect(battery_area)
if max(xg+wg, yg+hg)> 3: # I set an arbitrary number to prevent false positives
cv2.rectangle(frame, (xg, yg), (xg+wg, yg+hg), (0, 255, 0), 2)
cv2.drawContours(frame, battery_contours, -1, (0,255,0), 3)
# Show images
cv2.imshow('Webcam', frame)
#cv2.imshow('Mask', mask)
#cv2.imshow('Segmented Image', res)
# Exit when user press ESC key
k = cv2.waitKey(3) & 0xFF
if k == 27: # ESC Key
break
velocity_fb = velocity_lr = velocity_ud = velocity_yaw = 0
if not xg is None:
# Move the drone
object_center_x = int(xg + (wg / 2))
object_center_y = int(yg + (hg / 2))
object_size = ((wg ** 2) + (hg ** 2)) ** 0.5 # Fast sqrt
object_distance = DESIRED_OBJECT_SIZE - object_size
if not object_distance == 0:
velocity_fb = int(MAX_SPEED_FORWARDBACK * (object_distance / DESIRED_OBJECT_SIZE))
frame_shape = frame.shape
# I wrote 'object_center_y + 200' because the camera of Tello drone is slightly inclined to down and that causes the drone to go too high
velocity_ud = calculate_velocity(frame_shape[1], object_center_y + 200, MAX_SPEED_UPDOWN * -1)
velocity_yaw = calculate_velocity(frame_shape[0], object_center_x, MAX_SPEED_YAW)
# First rotate, then go forward
if not velocity_yaw == 0:
tello.send_rc_control(0, 0, 0, velocity_yaw)
time.sleep(MIN_MOV_TIME)
if not velocity_lr == velocity_fb == velocity_ud == 0:
tello.send_rc_control(velocity_lr, velocity_fb, velocity_ud, 0)
time.sleep(MIN_MOV_TIME)
tello.send_rc_control(0, 0, 0, 0)
finally:
tello.land()
tello.streamoff()
tello.end()
# When everything done, release the capture
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | 3,502 | 1,240 |
class TempPassword:
types = {
'tempPassword': str
}
def __init__(self):
self.tempPassword = None # str
| 135 | 43 |
# Python Implementation: CCS811UnitTest
##
# @file CCS811UnitTest.py
#
# @version 1.0.1
#
# @par Purpose
# This module provides the Unit Test for the CCS811 module. It has been
# separated from the CCS811 module to conserve some resources, as this code is
# intended to also run on an Raspberry Pi Pico MCU. On this architecture, it is
# mandatory that the CCS811.py and GPIO_AL.py file both reside on the Raspberry
# Pi Pico's flash drive.
#
# Because of the nature of the class under test, this Unit Test cannot be
# completely automated and requires user interaction to set voltage levels on
# GPIO Pins or measure them with appropriate instruments.
#
# @par Comments
# This is Python 3 code! PEP 8 guidelines are decidedly NOT followed in some
# instances, and guidelines provided by "Coding Style Guidelines" a "Process
# Guidelines" document from WEB Design are used instead where the two differ,
# as the latter span several programming languages and are therefore applicable
# also for projects that require more than one programming language; it also
# provides consistency across hundreds of thousands of lines of legacy code.
# Doing so, ironically, is following PEP 8.
#
# @par Known Bugs
# None
#
# @author
# W. Ekkehard Blanz <Ekkehard.Blanz@gmail.com>
#
# @copyright
# Copyright 2021 W. Ekkehard Blanz
# See NOTICE.md and LICENSE.md files that come with this distribution.
# File history:
#
# Date | Author | Modification
# -----------------+----------------+------------------------------------------
# Fri Oct 29 2021 | Ekkehard Blanz | separated from CCS811L.py
# Thu Dec 09 2021 | Ekkehard Blanz | added warning not to use hardware mode on
# | | the Raspberry Pi
# | |
import sys
try:
import os.path
import os
sys.path.append( os.path.join( os.path.dirname( __file__ ),
os.pardir ) )
sys.path.append( os.path.join( os.path.dirname( __file__ ),
os.pardir,
os.pardir,
'GPIO_AL' ) )
except ImportError:
# on the Pico there is no os.path but all modules are in the same directory
pass
from GPIO_AL import *
from CCS811 import *
try:
import traceback
exitChar = 'Ctrl-C'
except:
traceback = None
# keyboard interrupt on Raspberry Pi Pico is broken and gets "stuck"
# so new inputs are also interrupted - use 'q' instead where possible
exitChar = 'q'
# main program - Unit Test
class Temperature( object ):
##! Freezing point in deg F
F0 = 32
##! Absolute zero in deg C
T0 = -273.15
##! ratio of difference of deg C to deg F
CF_RATIO = 9. / 5.
def __init__( self, kelvin=None, celsius=None, fahrenheit=None ):
self.__kelvin = None
if fahrenheit is not None:
self.__kelvin = (fahrenheit - self.F0) / self.CF_RATIO - self.T0
elif celsius is not None:
self.__kelvin = celsius - self.T0
elif kelvin is not None:
self.__kelvin = kelvin
return
@property
def kelvin( self ):
return self.__kelvin
@property
def celsius( self ):
return self.__kelvin + self.T0
@property
def fahrenheit( self ):
return (self.__kelvin + self.T0) * self.CF_RATIO + self.F0
class Humidity( object ):
def __init__( self, humidity ):
self.__humidity = humidity
return
@property
def humidity( self ):
return self.__humidity
if __name__ == "__main__":
DEBUG = True
def main():
"""!
@brief Unit Test for CCS811.
"""
i2cBus = None
aqSensor = None
print( 'In all casses accept default values (in patantheses) by '
'hitting Enter\n' )
try:
while True:
try:
print( '\nSet up I2C bus parameters' )
print( '-------------------------' )
print( 'Enter {0} to end this test\n'
''.format( exitChar ) )
line = input( '\nsda Pin ({0}): '
''.format( I2Cbus.DEFAULT_DATA_PIN ) )
if line:
sdaPin = int( line )
else:
sdaPin = I2Cbus.DEFAULT_DATA_PIN
line = input( 'scl Pin ({0}): '
''.format( I2Cbus.DEFAULT_CLOCK_PIN ) )
if line:
sclPin = int( line )
else:
sclPin = I2Cbus.DEFAULT_CLOCK_PIN
line = input( 'frequency in Hz ({0} Hz): '
''.format( I2Cbus.DEFAULT_I2C_FREQ ) )
if line:
frequency = int( line )
else:
frequency = I2Cbus.DEFAULT_I2C_FREQ
line = input( 'mode - {0} for HW, {1} for SW ({2}) - '
'do not use HW mode on Raspberry Pi 3: '
''.format( I2Cbus.HARDWARE_MODE,
I2Cbus.SOFTWARE_MODE,
I2Cbus.DEFAULT_MODE ) )
if line:
mode = int( line )
else:
mode = I2Cbus.DEFAULT_MODE
i2cBus = I2Cbus( sdaPin, sclPin, frequency, mode )
print( 'I2C bus opened successfully: {0}'.format( i2cBus ) )
break
except (KeyboardInterrupt, ValueError):
print()
break
except GPIOError as e:
print( e )
continue
while i2cBus is not None:
try:
print( '\nSet up CCS811 sensor parameters' )
print( '-------------------------------' )
print( 'Again, enter {0} to end this input and '
'start over\n'.format( exitChar ) )
line = input( 'Enter CCS811 device address in hex '
'(0x{0:02X}): '
''.format( CCS811.DEFAULT_ADDR ) )
if line:
i2cAddr = int( line, 16 )
else:
i2cAddr = CCS811.DEFAULT_ADDR
line = input( 'Enter interrupt Pin or empty line for poll '
'mode: ' )
if line:
interruptPin = int( line )
else:
interruptPin = None
line = input( 'Enter wakeup Pin or empty line: ' )
if line:
wakeupPin = int( line )
else:
wakeupPin = None
print( 'Enter measurement interval' )
print( '1 s .... {0}'.format( CCS811.MEAS_INT_1 ) )
print( '10 s ... {0}'.format( CCS811.MEAS_INT_10 ) )
print( '60 s ... {0}'.format( CCS811.MEAS_INT_60 ) )
print( '250 ms . {0}'.format( CCS811.MEAS_INT_250MS ) )
measInterval = int( input( '> ' ) )
line = input( 'Enter temperature in deg F to use dummy '
'temp object or empty line: ' )
if line:
tempObj = Temperature( fahrenheit=float( line ) )
else:
tempObj = None
line = input( 'Enter humidity in % to use dummy humidity '
'object or empty line: ' )
if line:
humObj = Humidity( float( line ) )
else:
humObj = None
aqSensor = CCS811( i2cBus,
measInterval,
interruptPin,
wakeupPin,
tempObj,
humObj,
i2cAddr )
print( 'Successfully opened CCS811 sensor: {0}'
''.format( aqSensor ) )
except ValueError as e:
if traceback is not None:
traceback.print_exc()
else:
print( '\nCCS811 ERROR: {0}'.format( e ) )
try:
del aqSensor
except:
pass
aqSensor = None
continue
except GPIOError as e:
if traceback is not None:
traceback.print_exc()
else:
print( '\nGPIO ERROR: {0}'.format( e ) )
try:
del aqSensor
except:
pass
aqSensor = None
continue
try:
print( 'Enter Ctrl-C to end data acquisition' )
answer = input( 'Hit Enter to start test or q to quit' )
if answer == 'q':
break
if not wakeupPin:
if mode == 0:
print( 'Testing in regular poll mode...' )
while True:
aqSensor.waitforData()
print( 'CO2: {0} ppm, total VOC: '
'{1} ppb'.format( aqSensor.CO2,
aqSensor.tVOC ) )
if aqSensor.errorCondition:
print( aqSensor.errorText )
else:
print( 'Testing in regular interrupt mode...' )
while True:
if not aqSensor.staleMeasurements:
print( 'CO2: {0} ppm, total VOC: '
'{1} ppb'.format( aqSensor.CO2,
aqSensor.tVOC ) )
if aqSensor.errorCondition:
print( aqSensor.errorText )
else:
print( 'Testing sleep/wake functionality in ', end='' )
if interruptPin is None:
print( 'poll mode...' )
while True:
startTime = time.time()
while time.time() - startTime < 10:
aqSensor.waitforData()
print( 'CO2: {0} ppm, total VOC: {1} '
'ppb'.format( aqSensor.CO2,
aqSensor.tVOC ) )
print( 'sending sensor to sleep '
'(should not see measurements)...' )
aqSensor.sleep()
startTime = time.time()
while time.time() - startTime < 10:
if aqSensor.dataReady:
print( 'CO2: {0} ppm, total VOC: {1} '
'ppb'.format( aqSensor.CO2,
aqSensor.tVOC ) )
print( 'waking sensor up again '
'(expect new measurements)!' )
aqSensor.wake()
else:
print( 'interrupt mode...' )
while True:
startTime = time.time()
while time.time() - startTime < 20:
if not aqSensor.staleMeasurements:
print( 'CO2: {0} ppm, total VOC: {1} '
'ppb'.format( aqSensor.CO2,
aqSensor.tVOC ) )
print( 'sending sensor to sleep '
'(expect no measurements '
'and no interrupt signals)...' )
aqSensor.sleep()
startTime = time.time()
while time.time() - startTime < 20:
if not aqSensor.staleMeasurements:
print( 'CO2: {0} ppm, total VOC: {1} '
'ppb'.format( aqSensor.CO2,
aqSensor.tVOC ) )
print( 'waking sensor up again '
'(expect new measurements)!' )
aqSensor.wake()
except KeyboardInterrupt:
print( '\nGot keyboard interrupt' )
except KeyboardInterrupt:
pass
except ValueError as e:
if traceback is not None:
traceback.print_exc()
else:
print( '\nCCS811 ERROR: {0}'.format( e ) )
except GPIOError as e:
if traceback is not None:
traceback.print_exc()
else:
print( '\nGPIO ERROR: {0}'.format( e ) )
except Exception as e:
if traceback is not None:
traceback.print_exc()
else:
print( '\nERROR: {0}'.format( e ) )
print( '\nClosing CCS811 and I2Cbus objects (in that order)...' )
try:
if aqSensor is not None:
aqSensor.close()
except:
pass
if i2cBus is not None:
i2cBus.close()
print( '\nExiting...\n' )
return 0
sys.exit( int( main() or 0 ) )
| 14,873 | 3,887 |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from tastypie.http import HttpForbidden
from tastypie.resources import ModelResource
from tastypie.exceptions import ImmediateHttpResponse, NotFound
from django.db.models import Q
from haystack.query import SearchQuerySet
from iam.contrib.tastypie.resource import IAMResourceMixin
from .serializers import AppSerializer
class GCloudModelResource(IAMResourceMixin, ModelResource):
login_exempt = False
def wrap_view(self, view):
"""
@summary: 统一处理函数装饰逻辑
"""
view = super(GCloudModelResource, self).wrap_view(view)
setattr(view, "login_exempt", self.login_exempt)
return view
def determine_format(self, request):
"""
@summary: 强制指定返回数据格式为json
"""
return "application/json"
def unauthorized_result(self, exception):
"""
@summary: return 403 if operation is forbidden, while default of tastypie is 401
@return:
"""
raise ImmediateHttpResponse(response=HttpForbidden())
def build_filters(self, filters=None, ignore_bad_filters=False):
"""
@summary:
"""
if filters is None:
filters = {}
orm_filters = super(GCloudModelResource, self).build_filters(filters, ignore_bad_filters)
if filters.get("q", "").strip():
if getattr(self.Meta, "q_fields", []):
queries = [Q(**{"%s__contains" % field: filters["q"]}) for field in self.Meta.q_fields]
query = queries.pop()
for item in queries:
query |= item
orm_filters["q"] = query
else:
sqs = (
SearchQuerySet()
.models(self._meta.object_class)
.auto_query(filters["q"])
.query_facet(self.Meta.q_fields)
)
# 创建自定义定过滤条件
orm_filters["pk__in"] = [i.pk for i in sqs]
return orm_filters
def apply_filters(self, request, applicable_filters):
"""
@summary:
"""
if "q" in applicable_filters:
query = applicable_filters.pop("q")
else:
query = None
queryset = super(GCloudModelResource, self).apply_filters(request, applicable_filters)
return queryset.filter(query) if query else queryset
def obj_delete(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
if not hasattr(bundle.obj, "delete"):
try:
bundle.obj = self.obj_get(bundle=bundle, **kwargs)
except self.Meta.object_class.DoesNotExist:
raise NotFound("A model instance matching the " "provided arguments could not be found")
self.authorized_delete_detail(self.get_object_list(bundle.request), bundle)
if "is_deleted" in bundle.obj.__dict__:
bundle.obj.__dict__.update({"is_deleted": True})
bundle.obj.save()
else:
bundle.obj.delete()
class Meta:
serializer = AppSerializer()
always_return_data = True
# 控制 Resource 一次显示多少个结果。默认值为 API_LIMIT_PER_PAGE 设置(如果设置)或20(如果未设置)
limit = 0
# 控制 Resource 一次显示的最大结果数。如果用户指定的 limit 高于 max_limit,它将被限制为 max_limit
max_limit = 0
| 4,169 | 1,266 |