seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
71000949458 | import time
def start(npc, player, town):
count = 0
exists = any(q for q in player.inventory if q.name.lower() == npc.quest.name.lower())
if exists:
for item in player.inventory:
if item.name.lower() == npc.quest.name.lower():
count += 1
if count == npc.quest.questAmount:
npc.quest.stage = "complete"
print(npc.name + ' has taken ' + npc.quest.name.lower() + ' from you.')
for item in player.inventory:
if item.name.lower() == npc.quest.name.lower():
player.inventory.remove(item)
elif npc.quest.questAmount > 5 and npc.quest.questAmount < 90 and not npc.quest.finished:
if player.gold >= npc.quest.questAmount:
npc.quest.stage = "complete"
player.gold -= npc.quest.questAmount
print(npc.name + ' has taken ' + str(npc.quest.questAmount) + ' gold from you.')
elif npc.quest.questAmount == 100 and town.dungeon.cleared == True:
print('Thank you Hero. You have done great work vanquishing those monsters!')
npc.quest.stage = "complete"
if npc.quest.stage == "complete":
print('Thank you so much! My Hero!')
time.sleep(2)
if npc.quest.finished:
print('Dont be greedy, you already got your reward! >:(')
if not npc.quest.finished:
print('Here is your reward! Thanks again!')
if npc.quest.rewardtype == "item":
player.inventory.append(npc.quest.reward)
print()
print('You received the ' + npc.quest.reward.name)
print()
else:
print()
print('You have received ' + str(npc.quest.reward) + ' gold!')
print()
player.gold += npc.quest.reward
npc.quest.finished = True
if npc.quest.stage == "during":
print('Hurry up and finish that quest!')
if npc.quest.stage == "begin":
print(player.name + ', I must give you this tough quest. Please do me this favour!')
print(npc.name + ' hands you a small note:')
time.sleep(4)
npc.quest.info()
npc.quest.stage = "during"
| Taylor365/Python | DungeonHeroes/Functions/questing.py | questing.py | py | 2,232 | python | en | code | 0 | github-code | 13 |
16863712353 | from __future__ import division
from __future__ import print_function
from builtins import str
from past.utils import old_div
import sys
#import networkx as nx
if __name__=="__main__":
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-c','--correction',required=True)
# parser.add_argument('-G','--genomesize',type=float)
parser.add_argument('-m','--max',type=int,default=1000000)
parser.add_argument('-d','--debug',action="store_true")
parser.add_argument('-p','--progress',action="store_true")
args = parser.parse_args()
if args.debug:
args.progress=True
if args.progress: print("#",str(args))
sys.stdout.flush()
correction={}
G=0.0
if args.correction:
f = open(args.correction)
while True:
l = f.readline()
if not l: break
if l[0]=="#": continue
c=l.strip().split()
correction[int(c[0])]=float(c[1])
f.close()
while True:
l=sys.stdin.readline()
if not l: break
c=l.strip().split()
print(c[0],old_div(float(c[1]),correction.get(int(float(c[0])),1.0)))
| DovetailGenomics/HiRise_July2015_GR | scripts/apply_contiguity_correction.py | apply_contiguity_correction.py | py | 1,185 | python | en | code | 28 | github-code | 13 |
11071308543 | from PyQt5.QtWidgets import QAction, qApp, QMenu
from PyQt5.QtGui import QIcon, QFont
from FileAction.openFileAction import *
from FileAction.saveFileAction import *
from FileAction.saveAsFileAction import *
from FileAction.openFileSequenceAction import *
from ImageProcessingAction.ToolAction.lassoAction import *
# from FileAction.featureExtractAction import *
from ImageProcessingAction.Preprocessing.denoisingMethod import *
#菜单栏实现
def menubarAchieve(var):
openfileAction = QAction('&打开图像', var)
openfileAction.setShortcut('Ctrl+O')
openfileAction.setStatusTip('打开图像')
openfileAction.triggered.connect(lambda: openFile(var))
opennextfileAction = QAction('&打开下一个', var)
opennextfileAction.setShortcut('Ctrl+Shift+O')
opennextfileAction.setStatusTip('打开下一张图像')
opennextfileAction.triggered.connect(lambda: openNextFile(var))
openseriesfileAction = QAction('&打开文件夹', var)
openseriesfileAction.setStatusTip('打开图像文件夹')
openseriesfileAction.triggered.connect(lambda: openFileSequence(var))
closeAction = QAction('&退出', var)
closeAction.setShortcut('Ctrl+W')
closeAction.setStatusTip('退出')
closeAction.triggered.connect(qApp.quit)
saveAction = QAction('&保存', var)
saveAction.setShortcut('Ctrl+S')
saveAction.setStatusTip('保存图像')
saveAction.triggered.connect(lambda: saveFile(var))
saveasAction = QAction('&另存为', var)
saveasAction.setStatusTip('图像另存为')
saveasAction.triggered.connect(lambda: saveAsFile(var))
# self.statusBar()
# QMenuBar * menubar = new QMenuBar(0)
menubar = var.menuBar()
menubar.setFont(QFont('SansSerif', 15))
menubar.setNativeMenuBar(False)
fileMenu = menubar.addMenu('&文件')
fileMenu.addAction(openfileAction)
fileMenu.addAction(opennextfileAction)
fileMenu.addAction(openseriesfileAction)
fileMenu.addAction(saveAction)
fileMenu.addAction(saveasAction)
fileMenu.addAction(closeAction)
#编辑菜单
undoAction = QAction('&撤销', var)
undoAction.setShortcut('Ctrl+Z')
undoAction.setStatusTip('撤销操作')
undoAction.triggered.connect(qApp.quit)
doAction = QAction('&恢复', var)
doAction.setShortcut('Ctrl+U')
doAction.setStatusTip('恢复操作')
doAction.triggered.connect(qApp.quit)
clearAction = QAction('&清除图像痕迹', var)
clearAction.setStatusTip('清除图像上痕迹')
clearAction.triggered.connect(qApp.quit)
compileMenu = menubar.addMenu('&编辑')
compileMenu.addAction(undoAction)
compileMenu.addAction(doAction)
compileMenu.addAction(clearAction)
#视图菜单
imageInforAction = QAction('&图像信息', var, checkable = True)
imageInforAction.setStatusTip('显示图像信息')
imageInforAction.setChecked(True)
imageInforAction.triggered.connect(qApp.quit)
viewMenu = menubar.addMenu('&视图')
viewMenu.addAction(imageInforAction)
#图像操作菜单
featureAction = QAction('&特征提取', var)
featureAction.setStatusTip('提取图像特征')
# featureAction.triggered.connect(lambda: featureExtract(var))
featureAction.triggered.connect(qApp.quit)
denoiseAction = QMenu('&图像去噪', var)
GaussianFilterAction = QAction(QIcon('save.png'), '&高斯滤波去噪', var)
GaussianFilterAction.setStatusTip('高斯滤波去噪')
GaussianFilterAction.triggered.connect(lambda: GaussianDenoising(var))
denoiseAction.addAction(GaussianFilterAction)
MedianFilterAction = QAction(QIcon('save.png'), '&中值滤波去噪', var)
MedianFilterAction.setStatusTip('中值滤波去噪')
MedianFilterAction.triggered.connect(lambda: RectLasso(var))
denoiseAction.addAction(MedianFilterAction)
PMeQuationAction = QAction(QIcon('save.png'), '&P-M方程去噪', var)
PMeQuationAction.setStatusTip('P-M方程去噪')
PMeQuationAction.triggered.connect(lambda: RectLasso(var))
denoiseAction.addAction(PMeQuationAction)
TVAction = QAction(QIcon('save.png'), '&TV法去噪', var)
TVAction.setStatusTip('TV法去噪')
TVAction.triggered.connect(lambda: RectLasso(var))
denoiseAction.addAction(TVAction)
smoothAction = QAction('&平滑处理', var)
smoothAction.setStatusTip('平滑处理操作')
smoothAction.triggered.connect(qApp.quit)
contrastAction = QAction('&对比度增强', var)
contrastAction.setStatusTip('增强图像对比度')
contrastAction.triggered.connect(qApp.quit)
lassoAction = QMenu('&套索工具', var)
rectangleLassoAction = QAction(QIcon('save.png'), '&矩形套索工具', var)
rectangleLassoAction.setStatusTip('矩形套索工具')
rectangleLassoAction.triggered.connect(lambda: RectLasso(var))
lassoAction.addAction(rectangleLassoAction)
ellipseLassoAction = QAction(QIcon('save.png'), '&椭圆形套索工具', var)
ellipseLassoAction.setStatusTip('椭圆形套索工具')
ellipseLassoAction.triggered.connect(lambda: elliLasso(var))
lassoAction.addAction(ellipseLassoAction)
polyLassoAction = QAction(QIcon('save.png'), '&多边形套索工具', var)
polyLassoAction.setStatusTip('多边形套索工具')
polyLassoAction.triggered.connect(lambda: polyLasso(var))
lassoAction.addAction(polyLassoAction)
customLassoAction = QAction(QIcon('save.png'), '&磁性套索工具', var)
customLassoAction.setStatusTip('磁性套索工具')
customLassoAction.triggered.connect(qApp.quit)
lassoAction.addAction(customLassoAction)
segmentationAction = QMenu('&图像分割', var)
thresholdAction = QAction('&阈值分割', var)
thresholdAction.setStatusTip('阈值分割')
thresholdAction.triggered.connect(qApp.quit)
segmentationAction.addAction(thresholdAction)
graphcutAction = QAction('&图切', var)
graphcutAction.setStatusTip('图切')
graphcutAction.triggered.connect(qApp.quit)
segmentationAction.addAction(graphcutAction)
processMenu = menubar.addMenu('&处理')
processMenu.addAction(featureAction)
processMenu.addMenu(denoiseAction)
processMenu.addAction(smoothAction)
processMenu.addAction(contrastAction)
processMenu.addMenu(lassoAction)
processMenu.addMenu(segmentationAction)
#帮助菜单
helpAction = QAction(QIcon('help.png'), '&帮助文档', var)
helpAction.setStatusTip('打开帮助文档')
helpAction.triggered.connect(qApp.quit)
helpMenu = menubar.addMenu('&帮助')
helpMenu.addAction(helpAction) | ChengLongDeng/MedicalImageProcessingTool | MainGUI/BarInformation/MenuBar.py | MenuBar.py | py | 6,619 | python | en | code | 0 | github-code | 13 |
37965072936 | import sys
import os
import jax
import tensorflow_datasets as tfds
import tensorflow as tf
import numpy as np
import jax.numpy as jnp
import optax
import wandb
import logging
from galsim_jax.dif_models import AutoencoderKLModule
from galsim_jax.utils import (
save_checkpoint,
load_checkpoint,
get_wandb_local_dir,
create_folder,
save_plot_as_image,
save_samples,
get_git_commit_version,
get_activation_fn,
get_optimizer,
norm_values_one_diff,
new_optimizer,
)
from galsim_jax.convolution import convolve_kpsf
from galsim_jax.datasets import cosmos
from jax.lib import xla_bridge
from astropy.stats import mad_std
from tensorflow_probability.substrates import jax as tfp
from flax import linen as nn # Linen API
from jax import random
from functools import partial
from tqdm.auto import tqdm
from absl import app
from absl import flags
# logging.getLogger("tfds").setLevel(logging.ERROR)
# flags.DEFINE_string("input_folder", "/data/tensorflow_datasets/", "Location of the input images")
flags.DEFINE_string("dataset", "Cosmos/25.2", "Suite of simulations to learn from")
# flags.DEFINE_string("output_dir", "./weights/gp-sn1v5", "Folder where to store model.")
flags.DEFINE_integer("batch_size", 16, "Size of the batch to train on.")
flags.DEFINE_float("learning_rate", 5e-2, "Learning rate for the optimizer.")
flags.DEFINE_integer("training_steps", 125000, "Number of training steps to run.")
# flags.DEFINE_string("train_split", "90%", "How much of the training set to use.")
# flags.DEFINE_boolean('prob_output', True, 'The encoder has or not a probabilistic output')
flags.DEFINE_float("reg_value", 1e-6, "Regularization value of the KL Divergence.")
flags.DEFINE_integer("gpu", 0, "Index of the GPU to use, e.g.: 0, 1, 2, etc.")
# flags.DEFINE_string(
# "experiment", "model_1", "Type of experiment, e.g. 'model_1', 'model_2', etc."
# )
flags.DEFINE_string("project", "VAE-SD", "Name of the project, e.g.: 'VAE-SD'")
flags.DEFINE_string(
"name", "test_Cosmos_Conv2", "Name for the experiment, e.g.: 'dim_64_kl_0.01'"
)
flags.DEFINE_string(
"act_fn", "gelu", "Activation function, e.g.: 'gelu', 'leaky_relu', etc."
)
flags.DEFINE_string("opt", "adafactor", "Optimizer, e.g.: 'adam', 'adamw'")
flags.DEFINE_integer("resblocks", 2, "Number of resnet blocks.: 1, 2.")
flags.DEFINE_integer("step_sch", 50000, "Steps for the lr_schedule")
flags.DEFINE_string(
"noise",
"Pixel",
"Type of noise, Fourier for correlated, Pixel white Gaussian noise",
)
flags.DEFINE_float(
"alpha", 0.0001, "Coefficient of reduction of initial learning rate"
)
FLAGS = flags.FLAGS
# Loading distributions and bijectors from TensorFlow Probability (JAX version)
tfd = tfp.distributions
tfb = tfp.bijectors
def main(_):
# Checking for GPU access
print("Device: {}".format(xla_bridge.get_backend().platform))
# Checking the GPU available
gpus = jax.devices("gpu")
print("Number of avaliable devices : {}".format(len(gpus)))
# Ensure TF does not see GPU and grab all GPU memory.
tf.config.set_visible_devices([], device_type="GPU")
# Loading the dataset and transforming it to NumPy Arrays
train_dset, info = tfds.load(name=FLAGS.dataset, with_info=True, split="train")
# What's in our dataset:
# info
def input_fn(mode="train", batch_size=FLAGS.batch_size):
"""
mode: 'train' or 'test'
"""
def preprocess_image(data):
# Reshape 'psf' and 'image' to (128, 128, 1)
data["kpsf_real"] = tf.expand_dims(data["kpsf_real"], axis=-1)
data["kpsf_imag"] = tf.expand_dims(data["kpsf_imag"], axis=-1)
data["image"] = tf.expand_dims(data["image"], axis=-1)
return data
if mode == "train":
dataset = tfds.load(FLAGS.dataset, split="train")
dataset = dataset.repeat()
dataset = dataset.shuffle(10000)
else:
dataset = tfds.load(FLAGS.dataset, split="test")
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.map(preprocess_image) # Apply data preprocessing
dataset = dataset.prefetch(
-1
) # fetch next batches while training current one (-1 for autotune)
return dataset
# Dataset as a numpy iterator
dset = input_fn().as_numpy_iterator()
# Generating a random key for JAX
rng, rng_2 = jax.random.PRNGKey(0), jax.random.PRNGKey(1)
# Size of the input to initialize the encoder parameters
batch_autoenc = jnp.ones((1, 128, 128, 1))
latent_dim = 128
act_fn = get_activation_fn(FLAGS.act_fn)
# Initializing the AutoEncoder
Autoencoder = AutoencoderKLModule(
ch_mult=(1, 2, 4, 8, 16),
num_res_blocks=FLAGS.resblocks,
double_z=True,
z_channels=1,
resolution=latent_dim,
in_channels=1,
out_ch=1,
ch=1,
embed_dim=1,
act_fn=act_fn,
)
params = Autoencoder.init(rng, x=batch_autoenc, seed=rng_2)
# Taking 64 images of the dataset
batch_im = next(dset)
# Generating new keys to use them for inference
rng_1, rng_2 = jax.random.split(rng_2)
# Initialisation
optimizer = new_optimizer(
FLAGS.opt, FLAGS.learning_rate, FLAGS.alpha, FLAGS.training_steps
)
opt_state = optimizer.init(params)
def loglikelihood_fn(x, y, noise, type="Pixel"):
stamp_size = x.shape[1]
if type == "Fourier":
print("in Fourier")
xp = (
jnp.fft.rfft2(x)
/ (jnp.sqrt(jnp.exp(noise)) + 0j)
/ stamp_size**2
* (2 * jnp.pi) ** 2
)
yp = (
jnp.fft.rfft2(y)
/ (jnp.sqrt(jnp.exp(noise)) + 0j)
/ stamp_size**2
* (2 * jnp.pi) ** 2
)
return -0.5 * (jnp.abs(xp - yp) ** 2).sum()
elif type == "Pixel":
print("in pixels")
# return - 0.5 * (jnp.abs(x - y)**2).sum() / noise**2
return -0.5 * (jnp.abs(x - y) ** 2).sum() / 0.005**2
else:
raise NotImplementedError
loglikelihood_fn = partial(loglikelihood_fn, type=FLAGS.noise)
loglikelihood_fn = jax.vmap(loglikelihood_fn)
@jax.jit
def loss_fn(params, rng_key, batch, reg_term): # state, rng_key, batch):
"""Function to define the loss function"""
x = batch["image"]
kpsf_real = batch["kpsf_real"]
kpsf_imag = batch["kpsf_imag"]
ps = batch["ps"]
std = batch["noise_std"]
kpsf = kpsf_real + 1j * kpsf_imag
# Autoencode an example
q, posterior, code = Autoencoder.apply(params, x=x, seed=rng_key)
log_prob = posterior.log_prob(code)
p = jax.vmap(convolve_kpsf)(q[..., 0], kpsf[..., 0])
p = jnp.expand_dims(p, axis=-1)
# p = q
if FLAGS.noise == "Fourier":
print("using the Fourier likelihood")
log_likelihood = loglikelihood_fn(x, p, ps)
elif FLAGS.noise == "Pixel":
print("using the Pixel likelihood")
log_likelihood = loglikelihood_fn(x, p, std)
else:
raise NotImplementedError
print("log_likelihood", log_likelihood.shape)
# KL divergence between the p(z|x) and p(z)
prior = tfd.MultivariateNormalDiag(loc=jnp.zeros_like(code), scale_diag=[1.0])
kl = (log_prob - prior.log_prob(code)).sum((-2, -1))
# Calculating the ELBO value applying a regularization factor on the KL term
elbo = log_likelihood - reg_term * kl
print("ll", log_likelihood.shape)
print("kl", kl.shape)
print("elbo", elbo.shape)
loss = -jnp.mean(elbo)
return loss, -jnp.mean(log_likelihood)
""" # Veryfing that the 'value_and_grad' works fine
loss, grads = jax.value_and_grad(loss_fn)(params, rng, batch_im, kl_reg_w)
"""
@jax.jit
def update(params, rng_key, opt_state, batch):
"""Single SGD update step."""
(loss, log_likelihood), grads = jax.value_and_grad(loss_fn, has_aux=True)(
params, rng_key, batch, FLAGS.reg_value
)
updates, new_opt_state = optimizer.update(grads, opt_state, params)
new_params = optax.apply_updates(params, updates)
return loss, log_likelihood, new_params, new_opt_state
"""loss, log_likelihood, params, opt_state = update(params, rng_1, opt_state, batch_im)"""
# Login to wandb
wandb.login()
# Initializing a Weights & Biases Run
wandb.init(
project=FLAGS.project,
name=FLAGS.name,
# tags="kl_reg={:.4f}".format(reg),
)
# Setting the configs of our experiment using `wandb.config`.
# This way, Weights & Biases automatcally syncs the configs of
# our experiment which could be used to reproduce the results of an experiment.
config = wandb.config
config.seed = 42
config.batch_size = FLAGS.batch_size
# config.validation_split = 0.2
# config.pooling = "avg"
config.learning_rate = FLAGS.learning_rate
config.steps = FLAGS.training_steps
config.kl_reg = FLAGS.reg_value
config.using_kl = False if FLAGS.reg_value == 0 else True
config.latent_dim = latent_dim
# config.type_model = FLAGS.experiment
config.commit_version = get_git_commit_version()
config.act_fn = FLAGS.act_fn
config.opt = FLAGS.opt
config.resnet_blocks = FLAGS.resblocks
config.steps_schedule = FLAGS.step_sch
config.scheduler = "Cosine Decay"
config.interpolation = "Bicubic"
config.noise_method = FLAGS.noise
config.alpha = FLAGS.alpha
# Define the metrics we are interested in the minimum of
wandb.define_metric("loss", summary="min")
wandb.define_metric("log_likelihood", summary="min")
wandb.define_metric("test_loss", summary="min")
wandb.define_metric("test_log_likelihood", summary="min")
losses = []
losses_test = []
losses_test_step = []
log_liks = []
log_liks_test = []
best_eval_loss = 1e6
# Train the model as many steps as indicated initially
for step in tqdm(range(1, config.steps + 1)):
rng, rng_1 = random.split(rng)
# Iterating over the dataset
batch_im = next(dset)
loss, log_likelihood, params, opt_state = update(
params, rng_1, opt_state, batch_im
)
losses.append(loss)
log_liks.append(log_likelihood)
# Log metrics inside your training loop to visualize model performance
wandb.log(
{
"loss": loss,
"log_likelihood": log_likelihood,
},
step=step,
)
# Saving best checkpoint
if loss < best_eval_loss:
best_eval_loss = loss
# if best_eval_loss < 0:
save_checkpoint("checkpoint.msgpack", params, step)
# Calculating the loss for all the test images
if step % (config.steps // 50) == 0:
dataset_eval = input_fn("test")
test_iterator = dataset_eval.as_numpy_iterator()
for_list_mean = []
for img in test_iterator:
rng, rng_1 = random.split(rng)
loss_test, log_likelihood_test = loss_fn(
params, rng_1, img, FLAGS.reg_value
)
for_list_mean.append(loss_test)
losses_test.append(np.mean(for_list_mean))
losses_test_step.append(step)
log_liks_test.append(log_likelihood_test)
wandb.log(
{
"test_loss": losses_test[-1],
"test_log_likelihood": log_liks_test[-1],
},
step=step,
)
print(
"Step: {}, loss: {:.2f}, loss test: {:.2f}".format(
step, loss, losses_test[-1]
)
)
# Loading checkpoint for the best step
# params = load_checkpoint("checkpoint.msgpack", params)
# Obtaining the step with the lowest loss value
loss_min = min(losses)
best_step = losses.index(loss_min) + 1
print("\nBest Step: {}, loss: {:.2f}".format(best_step, loss_min))
# Obtaining the step with the lowest log-likelihood value
log_lik_min = min(log_liks)
best_step_log = log_liks.index(log_lik_min) + 1
print("\nBest Step: {}, log-likelihood: {:.2f}".format(best_step_log, log_lik_min))
best_steps = {
"best_step_loss": best_step,
"best_step_log_lik": best_step_log,
}
wandb.log(best_steps)
total_steps = np.arange(1, config.steps + 1)
# Creating the 'results' folder to save all the plots as images (or validating that the folder already exists)
results_folder = "results/{}".format(get_wandb_local_dir(wandb.run.dir))
create_folder(results_folder)
# Saving the loss plots
save_plot_as_image(
folder_path=results_folder,
plot_title="Loglog of the Loss function - Train (KL reg value = {})".format(
FLAGS.reg_value
),
x_data=total_steps,
y_data=losses,
plot_type="loglog",
file_name="loglog_loss.png",
)
save_plot_as_image(
folder_path=results_folder,
plot_title="Loglog of the Loss function - Test (KL reg value = {})".format(
FLAGS.reg_value
),
x_data=losses_test_step,
y_data=losses_test,
plot_type="loglog",
file_name="loglog_loss_test.png",
)
# Saving the log-likelihood plots
save_plot_as_image(
folder_path=results_folder,
plot_title="Loglog of the Log-likelihood - Train (KL reg value = {})".format(
FLAGS.reg_value
),
x_data=total_steps,
y_data=log_liks,
plot_type="loglog",
file_name="loglog_log_likelihood.png",
)
save_plot_as_image(
folder_path=results_folder,
plot_title="Loglog of the Log-likelihood - Test (KL reg value = {})".format(
FLAGS.reg_value
),
x_data=losses_test_step,
y_data=log_liks_test,
plot_type="loglog",
file_name="loglog_log_likelihood_test.png",
)
# Predicting over an example of data
dataset_eval = input_fn("test")
test_iterator = dataset_eval.as_numpy_iterator()
batch = next(test_iterator)
x = batch["image"]
kpsf_real = batch["kpsf_real"]
kpsf_imag = batch["kpsf_imag"]
kpsf = kpsf_real + 1j * kpsf_imag
# Taking 16 images as example
batch = x[:16, ...]
kpsf = kpsf[:16, ...]
rng, rng_1 = random.split(rng)
# X estimated distribution
q, _, _ = Autoencoder.apply(params, x=batch, seed=rng_1)
# Sample some variables from the posterior distribution
rng, rng_1 = random.split(rng)
p = jax.vmap(convolve_kpsf)(q[..., 0], kpsf[..., 0])
p = jnp.expand_dims(p, axis=-1)
min_value, max_value = norm_values_one_diff(batch, p, num_images=8)
# Saving the samples of the predicted images and their difference from the original images
# save_samples(folder_path=results_folder, decode=q, conv=p, batch=batch)
save_samples(
folder_path=results_folder,
decode=q,
conv=p,
batch=batch,
vmin=min_value,
vmax=max_value,
)
wandb.finish()
if __name__ == "__main__":
# Parse the command-line flags
app.FLAGS(sys.argv)
# Set the CUDA_VISIBLE_DEVICES environment variable
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
os.environ["XLA_FLAGS"] = "--xla_gpu_cuda_data_dir=/usr/local/cuda-12.1"
os.environ["XLA_FLAGS"] = "--xla_gpu_force_compilation_parallelism=1"
app.run(main)
| JonnyyTorres/Galsim_JAX | VAE_SD_C.py | VAE_SD_C.py | py | 15,814 | python | en | code | 1 | github-code | 13 |
8841104776 | from django.urls import path
from . import views
urlpatterns = [
path('',views.home, name='home'),
path('additem',views.additem, name='additem'),
path('edit/<str:pk>',views.edit, name='edit'),
path('crossoff/<str:pk>',views.crossoff, name='crossoff'),
path('uncrossoff/<str:pk>',views.uncrossoff, name='uncrossoff'),
path('delete/<str:pk>',views.delete, name='delete'),
]
| EpicGL/ToDolist | todolist/urls.py | urls.py | py | 400 | python | en | code | 0 | github-code | 13 |
15568512225 | from tkinter import *
from tkinter import scrolledtext
from tkinter.filedialog import askopenfilename
from tkinter import messagebox
import os
import os.path
import sys
import Ice
import IceGrid
import time
from random import randrange
import vlc
Ice.loadSlice('Server.ice')
import Server
# Window instance
window = Tk()
# Window settings
window.title("Client")
# window.geometry('1280x720')
currentMusic = None
mediaPlayer = None
myLibVlcInstance = vlc.Instance()
f1 = Frame(window)
f1.pack(side = TOP)
f2 = Frame(window)
f2.pack(side = BOTTOM)
communicator = Ice.initialize(sys.argv, "config.client")
hello = None
try:
print("****** Connect to hello ******")
hello = Server.HelloPrx.checkedCast(communicator.stringToProxy("hello"))
except Ice.NotRegisteredException:
print("****** Connect to query ******")
query = IceGrid.QueryPrx.checkedCast(communicator.stringToProxy("Server/Query"))
hello = Server.HelloPrx.checkedCast(query.findObjectByType("::Server::Hello"))
if not hello:
print("couldn't find a `::Server::Hello' object.")
sys.exit(1)
musics = hello.findAll()
print(musics)
# Input Text
txt = Entry(f1)
txt.pack()
filePath = ""
currentIdentifier = ""
currentIndexListView = None
# OnClick
def search():
# Current text in the TextArea
currentText = txt.get()
print(currentText)
# Fetch the music
res = hello.searchBar(currentText)
listView.delete(0,'end')
for m in res:
listView.insert(m.identifier, m.titre)
# OnClick
def voice():
# Current text in the TextArea
currentText = txt.get()
print(currentText)
# Fetch the music
res = hello.startVoice(currentText)
# Button
btn = Button(f1, text="Search", command=search)
btn.pack(side = LEFT, fill = BOTH)
# Button
btn1 = Button(f1, text="Voice", command=voice)
btn1.pack(side = RIGHT, fill = BOTH)
def onselect(event):
global currentIdentifier, filePath, currentIndexListView
# Get current index ListView
w = event.widget
if not w or not w.curselection():
return
# Current list view index
currentIndexListView = int(w.curselection()[0])
# Get music instance
musicItem = musics[currentIndexListView]
print('You selected item {}: "{}"'.format(currentIndexListView, musicItem.titre))
# Load identifier
currentIdentifier = musicItem.identifier
# Load titre
titre.delete(0,END)
titre.insert(0,musicItem.titre)
# Load artiste
artiste.delete(0,END)
artiste.insert(0,musicItem.artiste)
# Load album
album.delete(0,END)
album.insert(0,musicItem.album)
# Load path
filePath = musicItem.path
# Fill Up the ListView
listView = Listbox(f2)
listView.bind('<<ListboxSelect>>', onselect)
listView.pack(side = RIGHT, fill = BOTH)
for i, m in enumerate(musics):
listView.insert(i, m.titre)
scrollbar = Scrollbar(f2)
scrollbar.pack(side = RIGHT, fill = BOTH, expand = False)
listView.config(yscrollcommand = scrollbar.set)
scrollbar.config(command = listView.yview)
def fileChoose():
global filePath
filePath = askopenfilename()
def fileUpload(path):
file = open(path,'rb')
chunkSize = 61440
offset = 0
results = []
numRequests = 5
# File Extension
extension = path.split(".")[-1]
remotePath = "musics/" + str(randrange(999999)) + "_" + str(int(time.time())) + "." + extension
while True:
chuck = file.read(chunkSize) # Read a chunk
if chuck == bytes('','utf-8') or chuck == None:
break
r = hello.begin_send(offset, chuck, remotePath)
offset += len(chuck)
r.waitForSent()
results.append(r)
while len(results) > numRequests:
r = results[0]
del results[0]
r.waitForCompleted()
while len(results) > 0:
r = results[0]
del results[0]
r.waitForCompleted()
print("Finished")
return remotePath
# Add a Music to the database
def add():
insert("add")
# Add a Music to the database
def update():
insert("update")
def updateList():
global musics, listView
print("---------------------------")
print(musics)
musics = hello.findAll()
print("---------------------------")
print(musics)
print("---------------------------")
listView.delete(0,'end')
for i, m in enumerate(musics):
listView.insert(i, m.titre)
# Add a Music to the database
def insert(status):
global filePath, currentIdentifier, currentIndexListView
print("File Path:")
print(filePath)
if not filePath and not titre.get() and not album.get() and not artiste.get():
messagebox.showwarning("Champs manquant","Il y a un chammps manquant!")
return
if status == "add":
remotePath = fileUpload(filePath)
else:
print(filePath)
remotePath = filePath
print(filePath)
print("File not found locally but continuous!")
if status == "add":
m = hello.add(titre.get(), artiste.get(), album.get(), remotePath)
musics.append(m)
# listView.insert(m.identifier, m.titre)
filePath = None
elif status == "update" and currentIdentifier is not None:
m = hello.update(str(currentIdentifier), titre.get(), artiste.get(), album.get(), remotePath)
[musics.remove(a) for a in musics if a.identifier == currentIdentifier]
musics.append(m)
# listView.delete(currentIndexListView)
# listView.insert(currentIdentifier, m.titre)
# Clear all fields
clearFields()
updateList()
def clearFields():
# Clear all fields
currentIdentifier = None
titre.delete(0,END)
artiste.delete(0,END)
album.delete(0,END)
filePath = None
currentIndexListView = None
# Remove a Music from the database
def delete():
print("Delete!")
index = listView.curselection()[0]
m = musics.pop(index)
print(index)
print(m.titre)
listView.delete(index)
hello.delete(m.identifier)
# Play Audio
def play():
global currentMusic
global mediaPlayer
print("Play!")
stop()
index = listView.curselection()[0]
print(index)
m = musics[index]
print(m)
url = hello.start(m.identifier)
print("url")
print(url)
currentMusic = url
print(currentMusic)
print(url)
media = vlc.libvlc_media_new_location(myLibVlcInstance, bytes(url,'utf-8'))
mediaPlayer = vlc.libvlc_media_player_new_from_media(media)
ret = vlc.libvlc_media_player_play(mediaPlayer)
# Stop Audio
def stop():
global currentMusic
global mediaPlayer
print("Stop!")
print(currentMusic)
if currentMusic:
print("Stoped!")
vlc.libvlc_media_player_stop(mediaPlayer)
# Pause Audio
def pause():
global currentMusic
global mediaPlayer
print("Pause!")
print(currentMusic)
if currentMusic:
print("Paused!")
vlc.libvlc_media_player_pause(mediaPlayer)
def voice():
resVoice = hello.startVoice("demarre D-Sturb & High Voltage")
def ssl():
print(hello.demoSSL())
# Input Text
Label(f2, text = "titre").pack()
titre = Entry(f2)
titre.pack()
Label(f2, text = "artiste").pack()
artiste = Entry(f2)
artiste.pack()
Label(f2, text = "album").pack()
album = Entry(f2)
album.pack()
Label(f2, text = "path").pack()
path = Button(f2, text="File...", command=fileChoose).pack()
f3 = Frame(f2)
f3.pack(side = BOTTOM)
# Buttons
addBtn = Button(f3, text="Add", command=add).pack(side = LEFT)
updateBtn = Button(f3, text="Update", command=update).pack(side = LEFT)
deleteBtn = Button(f3, text="Delete", command=delete).pack(side = LEFT)
stopBtn = Button(f3, text="Stop", command=lambda *args: stop()).pack(side = RIGHT)
pauseBtn = Button(f3, text="Pause", command=lambda *args: pause()).pack(side = RIGHT)
playBtn = Button(f3, text="Play", command=lambda *args: play()).pack(side = RIGHT)
voiceBtn = Button(f3, text="Voice", command=lambda *args: voice()).pack(side = RIGHT)
clearBtn = Button(f3, text="Clear", command=lambda *args: clearFields()).pack(side = RIGHT)
# sslBtn = Button(f3, text="SSL", command=lambda *args: ssl()).pack(side = RIGHT)
window.mainloop() | qanastek/DeepMusic | ICE/client.py | client.py | py | 8,264 | python | en | code | 1 | github-code | 13 |
41767420402 | class Solution:
def frequencySort(self, s: str) -> str:
freq = collections.Counter(s)
freq = sorted([(k,v) for k, v in freq.items()], key= lambda x : x[1], reverse=True)
ans = []
for s, f in freq:
ans = ans + [s]*f
return ''.join(ans)
| ritwik-deshpande/LeetCode | 451-sort-characters-by-frequency/451-sort-characters-by-frequency.py | 451-sort-characters-by-frequency.py | py | 352 | python | en | code | 0 | github-code | 13 |
40492650831 | import unittest
import csv
import json
import os
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import WebDriverException
import pickle
import random
import datetime
import sqlite3
from random import gauss
from time import sleep as original_sleep
import logging
# initialize and setup logging system
# create a new Chrome session
browser = webdriver.Chrome()
browser.implicitly_wait(30)
browser.maximize_window()
username=''
password= ''
STDEV = 0.5
sleep_percentage = 1
def scroll_bottom(browser, element, range_int):
# put a limit to the scrolling
if range_int > 50:
range_int = 50
for i in range(int(range_int / 2)):
browser.execute_script(
"arguments[0].scrollTop = arguments[0].scrollHeight", element)
# update server calls
return
def formatNumber(number):
formattedNum = number.replace(',', '').replace('.', '')
formattedNum = int(formattedNum.replace('k', '00').replace('m', '00000'))
return formattedNum
def randomize_time(mean):
allowed_range = mean * STDEV
stdev = allowed_range / 3 # 99.73% chance to be in the allowed range
t = 0
while abs(mean - t) > allowed_range:
t = gauss(mean, stdev)
return t
def set_sleep_percentage(percentage):
global sleep_percentage
sleep_percentage = percentage/100
def sleep(t, custom_percentage=None):
if custom_percentage is None:
custom_percentage = sleep_percentage
time = randomize_time(t)*custom_percentage
original_sleep(time)
def follow_user_followers(
usernames,
amount,
randomize=False,
interact=False,
sleep_delay=600):
userFollowed = []
if not isinstance(usernames, list):
usernames = [usernames]
for user in usernames:
userFollowed += follow_given_user_followers(browser,
user,
amount,
'',
username,
'',
randomize,
sleep_delay,
'',
'',
'')
if interact:
userFollowed = random.sample(userFollowed, int(ceil(
user_interact_percentage * len(userFollowed) / 100)))
like_by_users(userFollowed,
user_interact_amount,
user_interact_random,
user_interact_media)
return
def follow_given_user_followers(browser,
user_name,
amount,
dont_include,
login,
follow_restrict,
random,
delay,
blacklist,
logger,
follow_times):
browser.get('https://' + user_name)
# update server calls
# check how many poeple are following this user.
# throw RuntimeWarning if we are 0 people following this user
allfollowing = formatNumber(
browser.find_element_by_xpath("//li[2]/a/span").text)
following_link = browser.find_elements_by_xpath(
"//a[starts-with(@href,'/p/Bd')]")
following_link[2].send_keys("\n")
sleep(5)
likes_link = browser.find_elements_by_xpath(
"//a[@class='_nzn1h _gu6vm']")
likes_link[0].send_keys("\n")
personFollowed = follow_through_dialog(browser,
user_name,
amount,
dont_include,
login,
follow_restrict,
allfollowing,
random,
delay,
blacklist,
logger,
follow_times,
callbacks=[])
return personFollowed
def follow_through_dialog(browser,
user_name,
amount,
dont_include,
login,
follow_restrict,
allfollowing,
randomize,
delay,
blacklist,
logger,
follow_times,
callbacks=[]):
sleep(2)
person_followed = []
real_amount = amount
if randomize and amount >= 3:
# expanding the popultaion for better sampling distribution
amount = amount * 3
# find dialog box
dialog = browser.find_element_by_xpath(
"//div[contains(@class,'_ms7sh') and contains(@class,'_41iuk')]")
# scroll down the page
scroll_bottom(browser, dialog, allfollowing)
# get follow buttons. This approch will find the follow buttons and
# ignore the Unfollow/Requested buttons.
follow_buttons = dialog.find_elements_by_xpath(
"//div/div/span/button[text()='Follow']")
person_list = []
abort = False
total_list = len(follow_buttons)
# scroll down if the generated list of user to follow is not enough to
# follow amount set
while (total_list < amount) and not abort:
amount_left = amount - total_list
before_scroll = total_list
scroll_bottom(browser, dialog, amount_left)
sleep(1)
follow_buttons = dialog.find_elements_by_xpath(
"//div/div/span/button[text()='Follow']")
total_list = len(follow_buttons)
abort = (before_scroll == total_list)
for person in follow_buttons:
if person and hasattr(person, 'text') and person.text:
person_list.append(person.find_element_by_xpath("../../../*")
.find_elements_by_tag_name("a")[1].text)
if amount >= total_list:
amount = total_list
# follow loop
hasSlept = False
btnPerson = list(zip(follow_buttons, person_list))
if randomize:
sample = random.sample(range(0, len(follow_buttons)), real_amount)
finalBtnPerson = []
for num in sample:
finalBtnPerson.append(btnPerson[num])
else:
finalBtnPerson = btnPerson
followNum = 0
for button, person in finalBtnPerson:
if followNum >= real_amount:
break
if followNum != 0 and hasSlept is False and followNum % 10 == 0:
sleep(delay)
hasSlept = True
continue
followNum += 1
# Register this session's followed user for further interaction
person_followed.append(person)
button.send_keys("\n")
for callback in callbacks:
callback(person.encode('utf-8'))
sleep(15)
# To only sleep once until there is the next follow
if hasSlept:
hasSlept = False
continue
if randomize:
repickedNum = -1
while repickedNum not in sample and repickedNum != -1:
repickedNum = random.randint(0, len(btnPerson))
sample.append(repickedNum)
finalBtnPerson.append(btnPerson[repickedNum])
continue
return person_followed
browser.get('https://www.instagram.com')
# try to load cookie from username
try:
browser.get('https://www.google.com')
for cookie in pickle.load(open('./logs/{}_cookie.pkl'
.format(username), 'rb')):
browser.add_cookie(cookie)
# logged in!
except (WebDriverException, OSError, IOError):
print("Cookie file not found, creating cookie...")
browser.get('https://www.instagram.com')
login_elem = browser.find_element_by_xpath(
"//article/div/div/p/a[text()='Log in']")
if login_elem is not None:
ActionChains(browser).move_to_element(login_elem).click().perform()
# Enter username and password and logs the user in
# Sometimes the element name isn't 'Username' and 'Password'
# (valid for placeholder too)
input_username = browser.find_elements_by_xpath(
"//input[@name='username']")
ActionChains(browser).move_to_element(input_username[0]). \
click().send_keys(username).perform()
sleep(1)
input_password = browser.find_elements_by_xpath(
"//input[@name='password']")
ActionChains(browser).move_to_element(input_password[0]). \
click().send_keys(password).perform()
login_button = browser.find_element_by_xpath(
"//form/span/button[text()='Log in']")
ActionChains(browser).move_to_element(login_button).click().perform()
sleep(5)
follow_user_followers(['', '', ''], amount=0, randomize=False)
# close the browser window
browser.quit()
| ParasVc98/ScriptyCrawler | instc.py | instc.py | py | 7,625 | python | en | code | 0 | github-code | 13 |
33556647326 | import boto3
import os
from PIL import Image
from io import BytesIO
# Please Note, Lambda Layers Need to Be Created for External Libraries
class ProcessThumbnail:
def __init__(self):
self.s3_client = boto3.client('s3')
self.bucket_name = os.environ['BUCKET_NAME']
self.thumbnail_folder = 'Thumbnails/'
def generate_thumbnail(self, image_key):
try:
image_name = image_key.split('/')[-1]
thumbnail_key = 'Thumbnail/' + image_name
crop_lengths = (300, 300, 300, 300)
s3_response = self.s3_client.get_object(Bucket=self.bucket_name, Key=image_key)
image_data = s3_response['Body'].read()
# Here, we also have option of using 10 GB ephermal storage of AWS Lambda istead of using Streams
image = Image.open(BytesIO(image_data))
cropped_image = image.crop(crop_lengths)
with BytesIO() as output:
cropped_image.save(output, format='JPEG')
output.seek(0)
self.s3_client.put_object(Bucket=self.bucket_name, Key=thumbnail_key, Body=output)
return True
except Exception as e:
print(f"Error receiving messages: {str(e)}")
return False
class SQSReader:
def __init__(self):
self.sqs_client = boto3.client('sqs')
self.queue_url = os.environ['SQS_QUEUE_URL']
def read_messages(self, max_messages=4, visibility_timeout=30, wait_time_seconds=10):
try:
response = self.sqs_client.receive_message(
QueueUrl=self.queue_url,
MaxNumberOfMessages=max_messages,
VisibilityTimeout=visibility_timeout,
WaitTimeSeconds=wait_time_seconds
)
if 'Messages' in response:
return [(message['Body']['object_key'], message['ReceiptHandle']) for message in response['Messages']]
return []
except Exception as e:
print(f"Error receiving messages: {str(e)}")
def delete_message(self, receipt_handle):
try:
self.sqs_client.delete_message(
QueueUrl=self.sqs_queue_url,
ReceiptHandle=receipt_handle
)
except Exception as e:
print(f"Error deleting message: {str(e)}")
def lambda_handler(event, context):
print('Input Event: ', event, sep="\n")
try:
sqs_reader = SQSReader()
messages = sqs_reader.read_messages()
process_thumbnail = ProcessThumbnail()
for message in messages:
s3_object_key = message[0]
receipt_handle = message[1]
if(process_thumbnail.generate_thumbnail(s3_object_key)):
# Deleting message from SQS Queue
sqs_reader.delete_message(receipt_handle)
print(f"Successfully generated thumbnail for {s3_object_key}")
else:
print(f"Failed to generate thumbnail for {s3_object_key}")
except Exception as e:
print(f"Error receiving messages: {str(e)}")
| mohitverma158/image-thumbnail-generator | GenerateThumbnail.py | GenerateThumbnail.py | py | 3,251 | python | en | code | 0 | github-code | 13 |
6269580537 | import numpy as np
import cv2
from matplotlib import pyplot as plt
def getBitPlane(image,bit_plane):
img_bitplane = np.mod(np.floor(image/np.power(2, bit_plane)), 2)
return img_bitplane.astype('uint8')
imge_path='A1_resources/DIP_2019_A1/cameraman.png'
image = cv2.imread(imge_path,0)
plt.title("Original Image")
plt.axis('off')
plt.imshow(image,cmap='gray')
plt.show()
plt.figure(figsize=(20, 20))
num=0
images=[]
for bit_plane in range(8):
plt.subplot(1,8,num+1)
plt.axis('off')
plt.title("Bit plane " +str(bit_plane))
img=getBitPlane(image,bit_plane)
images.append(img)
plt.imshow(img,cmap='gray')
num=num+1
h,w=image.shape
msb_img = np.zeros((h, w), np.uint8)
lsb_img = np.zeros((h, w), np.uint8)
new_img = (2 * (2 * (2 * (2 * (2 * (2 * (2 * images[7] + images[6])
+ images[5]) + images[4]) + images[3]) + images[2]) + images[1]) + images[0]);
lsb_img= (2 * (2 * (2 * (2 * (2 * (2 * images[7] + images[6])
+ images[5]) + images[4]) + images[3]) + images[2]) + images[1])
msb_img = (2 * (2 * (2 * (2 * (2 * (2 * (images[6])
+ images[5]) + images[4]) + images[3]) + images[2]) + images[1]) + images[0]);
plt.figure(figsize=(8, 8))
plt.subplot(3,2,1)
plt.title("Original Image")
plt.axis('off')
plt.imshow(msb_img,cmap='gray')
plt.subplot(3,2,2)
plt.xlabel('Pixel Value',fontweight='bold')
plt.ylabel('Pixel Count', fontweight='bold')
plt.hist(image.ravel(),256,[0,256])
plt.subplot(3,2,3)
plt.title("MSB set to zero")
plt.axis('off')
plt.imshow(msb_img,cmap='gray')
plt.subplot(3,2,4)
plt.xlabel('Pixel Value',fontweight='bold')
plt.ylabel('Pixel Count', fontweight='bold')
plt.hist(msb_img.ravel(),256,[0,256])
plt.subplot(3,2,5)
plt.title("LSB set to zero")
plt.axis('off')
plt.imshow(lsb_img,cmap='gray')
plt.subplot(3,2,6)
plt.xlabel('Pixel Value',fontweight='bold')
plt.ylabel('Pixel Count', fontweight='bold')
plt.hist(lsb_img.ravel(),256,[0,256])
plt.show()
| ddurgaprasad/DIP | Assignment1/bitslicing.py | bitslicing.py | py | 1,972 | python | en | code | 0 | github-code | 13 |
26068897738 | def read():
return list(map(int, input().split()))
global d
n, d = read()
G = []
for i in range(int(n)):
G.append(read())
from math import sqrt
def get_distance(a, b):
return sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def is_safe(vector):
if vector[0] + d >= 50 or vector[0] - d <= -50 or vector[1] + d >= 50 or vector[1] - d <= -50:
return True
else: return False
def BFS(layer, his, G):
if not layer: return
his += layer
layer_ = []
for node in layer:
for p in range(len(G)):
if not p in his:
if get_distance(G[node], G[p]) <= d:
layer_.append(p)
return layer_, his
def get_ans(G):
ans = []
for i in range(len(G)):
if is_safe(G[i]):
ans.append(i)
return ans
def get_layer(G):
layer = []
for i in range(len(G)):
if get_distance([0,0], G[i]) <= d + 7.5:
layer.append(i)
return layer
ans = get_ans(G)
layer = get_layer(G)
for p in layer:
if p in ans:
print('Yes')
exit(0)
if not ans or not layer:
print('No')
exit(0)
his = [layer[0]]
while layer:
layer, his = BFS(layer, his, G)
for p in layer:
if p in ans:
print('Yes')
exit(0)
print('No')
| piglaker/PTA_ZJU_mooc | src16.py | src16.py | py | 1,286 | python | en | code | 0 | github-code | 13 |
73910579539 | import json
import os
import sys
import time
from pathlib import Path
import click
import yaml
from distutils.dir_util import copy_tree
from loguru import logger
from slugify import slugify
from dataherb.serve.mkdocs_templates import index_template as _index_template
from dataherb.serve.mkdocs_templates import site_config as _site_config
from dataherb.serve.models import SaveModel
logger.remove()
logger.add(sys.stderr, level="INFO", enqueue=True)
class SaveMkDocs(SaveModel):
"""
SaveMkDocs saves the dataset files from source as MkDocs files
"""
def __init__(self, flora, workdir, folder):
super().__init__(flora, workdir)
if folder is None:
folder = ".serve"
self.folder = folder
self.mkdocs_folder = Path(self.workdir) / self.folder
self.mkdocs_config = self.mkdocs_folder / "mkdocs.yml"
@staticmethod
def _generate_markdown_list_meta(dic_lists, name) -> str:
"""
_markdown_metadata_entry
"""
if dic_lists:
md_mkdocs = f"{name}:"
for l in dic_lists:
md_mkdocs = md_mkdocs + f'\n - "{l}"'
md_mkdocs = md_mkdocs + "\n"
else:
md_mkdocs = ""
return md_mkdocs
def save_one_markdown(self, herb, path):
"""
save_one_markdown generates a markdown file
"""
logger.info(f"Will save {herb.id} to {path}")
herb_metadata = herb.metadata.copy()
herb_metadata["title"] = herb_metadata.get("name")
md_meta = yaml.dump(herb_metadata)
metadata_mkdocs = f"---\n"
metadata_mkdocs += md_meta
metadata_mkdocs += "---\n "
with open(path, "w") as fp:
fp.write(metadata_mkdocs)
logger.info(f"Saved {herb_metadata} to {path}")
def save_one_markdown_alt(self, herb, path):
"""
save_one_markdown generates a markdown file
"""
logger.info(f"Will save {herb.id} to {path}")
herb_metadata = herb.metadata
# generate tilte, description, keywords, and categories
metadata_title = herb_metadata.get("name")
metadata_description = herb_metadata.get("description")
metadata_tags = herb_metadata.get("tags")
metadata_category = herb_metadata.get("category")
keywords_mkdocs = self._generate_markdown_list_meta(metadata_tags, "keywords")
metadata_mkdocs = f'---\ntitle: "{metadata_title}"\n'
if metadata_description:
metadata_mkdocs = (
metadata_mkdocs + f'description: "{metadata_description}"\n'
)
if keywords_mkdocs:
metadata_mkdocs = metadata_mkdocs + keywords_mkdocs
if metadata_category:
categories_mkdocs = f"category: {metadata_category}"
metadata_mkdocs = metadata_mkdocs + categories_mkdocs
# end the metadata region
metadata_mkdocs = metadata_mkdocs + "---\n "
with open(path, "w") as fp:
fp.write(metadata_mkdocs)
logger.info(f"Saved {herb_metadata} to {path}")
def create_mkdocs_theme(self):
"""copies the prepared theme to the serve dir"""
mkdocs_template_path = Path(__file__).parent / "mkdocs_template"
copy_tree(str(mkdocs_template_path), str(self.mkdocs_folder))
def create_mkdocs_yaml(self):
"""creates mkdocs.yaml from mkdocs_templates.py"""
with open(self.mkdocs_config, "w") as fp:
fp.write(_site_config)
def create_mkdocs_index(self):
"""creates herbs/index.md from mkdocs_templates.py"""
mkdocs_index_path = self.mkdocs_folder / "herbs" / "index.md"
with open(mkdocs_index_path, "w") as fp:
fp.write(_index_template)
def save_all(self, recreate=False) -> None:
"""
save_all saves all files necessary
"""
# attach working directory to all paths
md_folder = self.mkdocs_folder / "herbs"
# create folders if necessary
if md_folder.exists():
if not recreate:
is_remove = click.confirm(
f"{md_folder} exists, remove it and create new?",
default=True,
show_default=True,
)
else:
is_remove = True
if is_remove:
cache_folder = md_folder.parent / "cache"
if cache_folder.exists():
pass
else:
cache_folder.mkdir(parents=True)
md_folder.rename(cache_folder / f"serve.{int(time.time())}")
md_folder.mkdir(parents=True)
else:
md_folder.mkdir(parents=True)
for herb in self.flora.flora:
herb_id = slugify(herb.id)
herb_md_path = md_folder / f"{herb_id}.md"
# generate markdown files
self.save_one_markdown(herb, herb_md_path)
self.create_mkdocs_theme()
if __name__ == "__main__":
pass
| DataHerb/dataherb-python | dataherb/serve/save_mkdocs.py | save_mkdocs.py | py | 5,064 | python | en | code | 3 | github-code | 13 |
12881670384 | #!/bin/python3
import sys
from game import Person
import random
"""
def save_in_file():
"""
#def battle_options(options):
def battle_ground(character, enemy, levels):
counter_levels = 1
print(character[0], character[1])
print(enemy)
print("select your option")
options = input("insert (a) for attack | insert (s) to save | insert (exit) to quit without save game: ")
if options.lower() == 'a':
attack = character[0][1] + character[1][1]
enemy[0] = enemy[0] - attack
if enemy[0] <= 0:
enemy[0] = 0
counter_levels += 1
if counter_levels <= levels:
print("Stage: ", counter_levels)
enemy_spotted(counter_levels)
print("enemy actual: ", enemy)
if options.lower() == 's':
print("save game")
if options.lower() == "exit":
print("exit")
if options not in ('a', 's', "exit"):
print(options, "Invalid input, insert (a) for attack, (s) for save, (exit) to exit without saving")
def character_selection(characters):
game_characters = {1: Person(25, 9), 2: Person(40, 10), 3: Person(20, 6), 4: Person(30, 6)}
play_character1 = game_characters.get(characters[0])
play_character2 = game_characters.get(characters[1])
selected_characters = {"character1": [play_character1.hp, play_character1.dmg],
"character2": [play_character2.hp, play_character2.dmg]}
return [selected_characters.get("character1"), selected_characters.get("character2")]
def enemy_spotted(level):
partial = Person(20, 6)
final_exam = Person(40, 12)
theorical_class = Person(8, 4 + level)
teacher = Person(15, 7)
if teacher.dmg == 7:
teacher.dmg = 14
enemies = [partial, final_exam, theorical_class, teacher]
if level <= 2:
enemies.remove(final_exam)
return enemies
if level >= 3:
random_number = random.randint(0, 3)
return [enemies[random_number].hp, enemies[random_number].dmg]
def type_players():
print("Choose Characters: ")
print("***************************" + "\n")
option_1 = "1.The_bookworn -> Stats: 25 HP and 9 DMG"
option_2 = "2.The_worker -> Stats: 40 HP and 10 DMG"
option_3 = "3.The_whatsapper -> Stats: 20 HP and 6 DMG"
option_4 = "4.The_procrastinator-> Stats: 30 HP and 6 DMG"
return option_1 + "\n" + option_2 + "\n" + option_3 + "\n" + option_4 + "\n"
def main():
k = range(1, 10, 1)
try:
if len(sys.argv) == 3:
if (sys.argv[1] == "-s") and (int(sys.argv[2]) in k):
print("Partida 2 jugadores con " + sys.argv[2] + " niveles")
print(type_players())
char_election_1 = int(input('selec first character: '))
char_election_2 = int(input("select second character: "))
characters = [char_election_1, char_election_2]
selected_enemy = enemy_spotted(int(sys.argv[2]))
selected_chars = character_selection(characters)
battle_ground(selected_chars, selected_enemy, int(sys.argv[2]))
if (sys.argv[1] == "-f") and (sys.argv[2] == "saved"):
print(sys.argv[2] + " Nombre fichero incorrecto")
if len(sys.argv) == 4:
if (sys.argv[1] == "--file=saved.txt") and (sys.argv[2] == "-s") and (sys.argv[3] == "5"):
print("Relanzamos partida guardada en saved.txt teniendo en cuenta el numero de niveles")
if (len(sys.argv) == 2) and (sys.argv[1] == "--file=saved.txt"):
print("Relanzamos la partida guardada en saved.txt")
if (len(sys.argv) == 1) and (sys.argv[0] == "main.py"):
type_players()
except TypeError:
print("fatal error")
except KeyboardInterrupt:
print("Exit program")
# save_game = input("Do you want to save game (y/n): ")
# if save_game == 'y' or save_game=='s':
# save_in_file()
#else:
# print('')
if __name__ == '__main__':
main()
| imorenoma/pst2020 | practica1/main.py | main.py | py | 4,077 | python | en | code | 0 | github-code | 13 |
18089566768 | import os
import sys
import numpy as np
import pandas as pd
from datetime import datetime
from socket import socket, AF_INET, SOCK_DGRAM, timeout, gethostname, gethostbyname
from time import sleep, time
from dotenv import load_dotenv
from paramiko import SSHClient, AutoAddPolicy
# Read connection values from .env
load_dotenv()
hostname = os.getenv('REMOTE_HOST')
username = os.getenv('REMOTE_USER')
password = os.getenv('REMOTE_PASS')
# Create SSH client to remote host
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy)
ssh.connect(hostname=hostname, username=username, password=password)
# Set local IP address and port
lhostname = gethostname()
laddr = gethostbyname(lhostname)
lport = 8080
# Open UDP socket
udp_sock = socket(AF_INET, SOCK_DGRAM)
udp_sock.bind(('', lport))
udp_sock.settimeout(20)
# Check command-line args
args = sys.argv
if len(args) != 2:
sys.exit('Usage: test.py <niter>')
# Create docker network and retrieve its ID
_, netstdout, _ = ssh.exec_command('docker network create benchmarks')
netstdout = netstdout.read().decode('ascii').strip('\n')
netid = netstdout[:12]
# Start testing loop
niter = int(args[1])
masters = ['master_running', 'master_stopped', 'master_paused']
controllers = ['controller_c', 'controller_go', 'controller_rs']
results = pd.DataFrame(columns=['iter', 'mst', 'cont', 'resp_time'])
for mst in masters:
for cont in controllers:
ssh.exec_command(
f'docker run -d --name master -v /var/run/docker.sock:/var/run/docker.sock -p 8080:8080/udp --network benchmarks {mst} {cont} {netid} {laddr} {lport}')
udp_sock.recvfrom(1024) # wait until master is ready
for i in range(0, niter):
start = time()
udp_sock.sendto("ping".encode(), (hostname, 8080))
try:
udp_sock.recvfrom(1024) # receive response
end = time()
if mst != 'master_running':
udp_sock.recvfrom(1024) # wait until master says continue
elapsed = end - start
# Add times to dataframe
results = results.append(pd.DataFrame({
'iter': pd.Series([i], dtype='int'),
'mst': pd.Series([mst], dtype='str'),
'cont': pd.Series([cont], dtype='str'),
'resp_time': pd.Series([elapsed], dtype='float')
}), ignore_index=True)
print(f'[{mst} | {cont} | {i}] Request recorded')
except timeout:
print(f'[{mst} | {cont} | {i}] Request timed out')
_, stdout, _ = ssh.exec_command('docker rm -f master controller')
stdout.channel.recv_exit_status() # wait for command to finish
# Show mean values so far
mean_values = results.groupby(
['mst', 'cont']).agg({
'resp_time': np.mean
})
print(f'[{mst} | {cont}] Mean values so far:\n{mean_values}')
ssh.exec_command('docker network rm benchmarks')
results.to_csv(
f'results/response-time-{datetime.now().strftime("%Y%M%d")}-{niter}.csv', index_label='id')
print('Tests finished correctly')
ssh.close()
udp_sock.close()
| varrrro/container-metrics | response-time/benchmark.py | benchmark.py | py | 3,208 | python | en | code | 0 | github-code | 13 |
70352954578 | class letterCombos: #combination sum
# https://leetcode.com/problems/letter-combinations-of-a-phone-number/
def letterCombinations(self, digits: str) -> List[str]:
phone = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
def backtrack(combination, next_digits):
if len(next_digits) == 0: # 0 digits to check
output.append(combination)
else: # if there are still digits to check
# iterate over all letters which map
# the next available digit
for letter in phone[next_digits[0]]:
# append the current letter to the combination
# and proceed to the next digits
backtrack(combination + letter, next_digits[1:])
output = []
if digits:
backtrack("", digits)
return output
"""https://leetcode.com/problems/task-scheduler/discuss/130786/Python-solution-with-detailed-explanation"""
class TaskScheduler:
"""
greedy algo
Exampe: Say we have the following tasks: [A,A,A,B,C,D,E] with n =2
Now if we schedule using the idea of scheduling all unique tasks once and then calculating if a cool-off is required or not, then we have: A,B,C,D,E,A,idle,dile,A i.e. 2 idle slots.
But if we schedule using most frequent first, then we have:
2.1: A,idle,idle,A,idle,idle,A
2.2: A,B,C,A,D,E,A i.e. no idle slots. This is the general intuition of this problem.
3.The idea in two can be implemented using a heap and temp list. This is illustrated in the code below.
4.Time complexity is O(N * n) where N is the number of tasks and n is the cool-off period.
5.Space complexity is O(1) - will not be more than O(26).
"""
def leastInterval(self, tasks: List[str], n: int) -> int:
from collections import Counter
from heapq import heappush, heappop, heapify
curr_time, h = 0, []
for k,v in Counter(tasks).items():
heappush(h, (-1*v, k))
while h:
i, temp = 0, []
while i <= n:
curr_time += 1
if h:
x,y = heappop(h)
if x != -1:
temp.append((x+1,y))
if not h and not temp:
break
else:
i += 1
for item in temp:
heappush(h, item)
return curr_time
class minMeetingRooms:
def minMeetingRooms(self, intervals):
"""
:type intervals: List[Interval]
:rtype: int
Time: O(n), constructing min heap w n elments
Space: O(n)
"""
# If there is no meeting to schedule then no room needs to be allocated.
if not intervals:
return 0
# The heap initialization
free_rooms = []
# Sort the meetings in increasing order of their start time.
intervals.sort(key= lambda x: x[0])
# Add the first meeting. We have to give a new room to the first meeting.
heapq.heappush(free_rooms, intervals[0][1])
# For all the remaining meeting rooms
for i in intervals[1:]:
# If the room due to free up the earliest is free, assign that room to this meeting.
if free_rooms[0] <= i[0]: #0 is start, 1 is end
heapq.heappop(free_rooms)
# If a new room is to be assigned, then also we add to the heap,
# If an old room is allocated, then also we have to add to the heap with updated end time.
heapq.heappush(free_rooms, i[1])
# The size of the heap tells us the minimum rooms required for all the meetings.
return len(free_rooms)
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class MergeKLists:
def mergeKLists(self, lists):
import heapq
heap = []
for head in lists:
while head != None:
heapq.heappush(heap, head.val)
head = head.next
print(heap)
dummy = ListNode(-1)
head = dummy
while len(heap) != 0:
head.next = ListNode(heapq.heappop(heap))
head = head.next
return dummy.next
def mergeKListsMergeSort(self, lists: List[ListNode]) -> ListNode:
if len(lists) == 0: return
amount = len(lists)
interval = 1
while interval < amount:
for i in range(0, amount - interval, interval * 2):
lists[i] = self.merge2Lists(lists[i], lists[i + interval])
interval *= 2
return lists[0] if amount > 0 else lists
def merge2Lists(self, l1, l2):
head = point = ListNode(0)
while l1 and l2:
if l1.val <= l2.val:
point.next = l1
l1 = l1.next
else:
point.next = l2
l2 = l1
l1 = point.next.next
point = point.next
if not l1:
point.next=l2
else:
point.next=l1
return head.next
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class TextJustification:
"""
https://leetcode.com/problems/text-justification/submissions/
"""
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
length_list = list(map(len,words))
idx = 0
ret = []
while(idx < len(words)):
s = idx
while(idx < len(words) and (sum(length_list[s:idx+1])+idx-s) <= maxWidth):
idx += 1
tmp = words[s]
if idx-s ==1: # if there is only one word
tmp += ' '* (maxWidth-len(tmp))
elif idx == len(words): # if this is the last element
tmp = ' '.join(words[s:idx])
tmp += ' '* (maxWidth-len(tmp))
else: # normal case
minLength = (idx-s-1) + sum(length_list[s:idx]) # minimum length is number of space + total length of strings
numExSpace = maxWidth - minLength
unitSpace = numExSpace//(idx-s-1)
extraSpace = numExSpace % (idx-s-1)
tmp = words[s]
for i in range(s+1, idx):
# add space
extra = 1 if i-s <= extraSpace else 0
space = ' '*(1+unitSpace+extra)
tmp += space
# add next word
tmp += words[i]
ret.append(tmp)
return ret
'''
The logs are already sorted by timestamp.
Use a stack to store tasks. Only the task on stack top got executed.
When a new task comes in, calculate the exclusive time for the previous stack top task.
When a task ends, remove it from stack top.
Because of single thread, task on stack top is guaranteed to ends before other tasks.
Record the time of previous event to calcualte the time period.
Time: O(n)
Space: O(n)
https://leetcode.com/problems/exclusive-time-of-functions/submissions/
'''
class ExclusiveTime:
def exclusiveTime(self, n: int, logs: List[str]) -> List[int]:
if not n or not logs:
return 0
task_stack = []
res = [0]*n
pre_event = 0
for log in logs:
data = log.split(':')
if data[1] == 'start':
if not task_stack:
task_stack.append(int(data[0]))
pre_event = int(data[2])
else:
pre_task = task_stack[-1]
res[pre_task] += int(data[2]) - pre_event
task_stack.append(int(data[0]))
pre_event = int(data[2])
else:
pre_task = task_stack.pop()
res[pre_task] += int(data[2]) - pre_event + 1
pre_event = int(data[2]) + 1
return res
class Solution:
"""
https://leetcode.com/problems/critical-connections-in-a-network/submissions/
"""
def criticalConnections(self, n: int, connections: List[List[int]]) -> List[List[int]]:
graph = [[] for _ in range(n)] ## vertex i ==> [its neighbors]
currentRank = 0 ## please note this rank is NOT the num (name) of the vertex itself, it is the order of your DFS level
lowestRank = [i for i in range(n)] ## here lowestRank[i] represents the lowest order of vertex that can reach this vertex i
visited = [False for _ in range(n)] ## common DFS/BFS method to mark whether this node is seen before
## build graph:
for connection in connections:
## this step is straightforward, build graph as you would normally do
graph[connection[0]].append(connection[1])
graph[connection[1]].append(connection[0])
res = []
prevVertex = -1 ## This -1 a dummy. Does not really matter in the beginning.
## It will be used in the following DFS because we need to know where the current DFS level comes from.
## You do not need to setup this parameter, I setup here ONLY because it is more clear to see what are passed on in the DFS method.
currentVertex = 0 ## we start the DFS from vertex num 0 (its rank is also 0 of course)
self._dfs(res, graph, lowestRank, visited, currentRank, prevVertex, currentVertex)
return res
def _dfs(self, res, graph, lowestRank, visited, currentRank, prevVertex, currentVertex):
visited[currentVertex] = True # it is possible
lowestRank[currentVertex] = currentRank
for nextVertex in graph[currentVertex]:
if nextVertex == prevVertex:
continue ## do not include the the incoming path to this vertex since this is the possible ONLY bridge (critical connection) that every vertex needs.
if not visited[nextVertex]:
self._dfs(res, graph, lowestRank, visited, currentRank + 1, currentVertex, nextVertex)
# We avoid visiting visited nodes here instead of doing it at the beginning of DFS -
# the reason is, even that nextVertex may be visited before, we still need to update my lowestRank using the visited vertex's information.
lowestRank[currentVertex] = min(lowestRank[currentVertex], lowestRank[nextVertex])
# take the min of the current vertex's and next vertex's ranking
if lowestRank[nextVertex] >= currentRank + 1: ####### if all the neighbors lowest rank is higher than mine + 1, then it means I am one connecting critical connection ###
res.append([currentVertex, nextVertex])
"""
https://leetcode.com/problems/construct-quad-tree/submissions/
# Definition for a QuadTree node.
class Node:
def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight):
self.val = val
self.isLeaf = isLeaf
self.topLeft = topLeft
self.topRight = topRight
self.bottomLeft = bottomLeft
self.bottomRight = bottomRight
"""
class ConstructQuadTree:
def construct(self, grid: List[List[int]]) -> 'Node':
"""
@desc
"""
def construct(self, grid):
return grid and self.dfs(0, 0, len(grid), grid) if grid else None
def dfs(self, i, j, l, grid):
if l == 1:
node = Node(grid[i][j] == 1, True, None, None, None, None)
else:
tLeft = self.dfs(i, j, l // 2, grid)
tRight = self.dfs(i, j + l // 2, l // 2, grid)
bLeft = self.dfs(i + l // 2, j, l// 2, grid)
bRight = self.dfs(i + l // 2, j + l // 2, l // 2, grid)
value = tLeft.val or tRight.val or bLeft.val or bRight.val
if tLeft.isLeaf and tRight.isLeaf and bLeft.isLeaf and bRight.isLeaf and tLeft.val == tRight.val == bLeft.val == bRight.val:
node = Node(value, True, None, None, None, None)
else:
node = Node(value, False, tLeft, tRight, bLeft, bRight)
return node
| eriktoor/interview-practice | kevinUber.py | kevinUber.py | py | 13,054 | python | en | code | 0 | github-code | 13 |
40726707824 | from dtaidistance import dtw
from scipy import stats
import numpy as np
import json
import os
import re
from natsort import natsorted
from dtaidistance import dtw_ndim
# 手勢密碼檢查
def pinch_check(sign, template_list):
PINCH_RANK = {
"index": 0.1,
"middle": 0.2,
"ring": 0.3,
"pinky": 0.4
}
pinch_signal = []
check_count = 0
data = sign
# 傳入驗證的簽名,產生手勢序列
for i in range(len(data)):
# 正規化+手勢編號序列
pinch_signal.extend([PINCH_RANK[data[i]['pinch']]] * int(20 * ((data[i]['end_time'] - data[i]['start_time']) / 1000)))
# 正規化+抬手間隔序列(在筆畫之間,len-1)
if i != len(data) -1:
pinch_signal.extend([0] * int(20 * ((data[i+1]['start_time'] - data[i]['end_time']) / 1000)))
# 傳入模板簽名,產生手勢序列
for i in range(len(template_list)):
template_temp = []
for j in range(len(template_list[i])):
template_temp.extend([PINCH_RANK[template_list[i][j]['pinch']]] * int(20 * ((template_list[i][j]['end_time'] - template_list[i][j]['start_time']) / 1000)))
if j != len(template_list[i]) -1:
template_temp.extend([0] * int(20 * ((template_list[i][j+1]['start_time'] - template_list[i][j]['end_time']) / 1000)))
template_list[i]=template_temp
# 驗證,需要5個簽名都符合
for template in template_list:
distance = dtw.distance(pinch_signal, template)
# 這裡的閾值因為註冊簽名互相比對值為0
if(distance == 0):
check_count=check_count + 1
if(check_count == 5):
return True
else:
return False
# 簽名圖形筆跡檢查
def graph_check(sign, template_list, dist):
sign_x = []
sign_y = []
# 傳入驗證簽名,產生簽名筆跡x&y序列
for j in sign:
sign_x.extend([k[0] for k in j['line']])
sign_y.extend([k[1] for k in j['line']])
# 縫合兩個序列
series1 = stats.zscore(np.array(list(zip(sign_x, sign_y)), dtype=np.double))
# 傳入模板簽名,產生簽名筆跡x&y序列,list為二維
sign_x_list = []
sign_y_list = []
check_count = 0
for i in range(len(template_list)):
data = template_list[i]
sign_x = []
sign_y = []
for j in data:
sign_x.extend([k[0] for k in j['line']])
sign_y.extend([k[1] for k in j['line']])
sign_x_list.append(sign_x)
sign_y_list.append(sign_y)
# 驗證簽名,需要四個以上簽名符合,使用window縮限範圍,節省運算時間
# 這裡的距離閾值從註冊簽名計算得出(目前寫死)
for j in range(5):
series2 = stats.zscore(np.array(list(zip(sign_x_list[j], sign_y_list[j])), dtype=np.double))
d = dtw_ndim.distance(series1, series2, window=15)
if(d<float(dist)):
check_count = check_count+1
# print('簽名' + str(i + 1) + ' vs 簽名' + str(j + 1) + ': '+str(d))
return check_count >= 4
def sign_validate(sign, id):
file_path = './app/gesturesign/DB/' +str(id) + '/template/'
template_list = [None] * 5
files = [f for f in os.listdir(file_path) if os.path.isfile(file_path+f)]
r = re.compile('(' + str(id) +').*(.json)')
file_list = natsorted(list(filter(r.match, files)))
#以全部簽名的名單載入json簽名檔並予以取代
for i in range(len(template_list)):
with open(file_path + file_list[i]) as json_file:
template_list[i] = json.load(json_file)
# 目前模版簽名沒有資料庫結構。這裡先用手動輸入的方式,之後會改成資料庫結構
with open('./app/gesturesign/DB/' +str(id) + '/template/dist.txt') as dist_file:
sign_info = dist_file.read().splitlines()
if(not pinch_check(sign, list(template_list))):
return False, 'pinch'
if(not graph_check(sign, list(template_list), sign_info[0])):
return False, 'graph'
return True, 'pass'
| RainMeoCat/CipherAirSig | backend/app/gesturesign/sign_validate.py | sign_validate.py | py | 4,082 | python | en | code | 0 | github-code | 13 |
3348781226 | #!/uisr/bin/env python
#Tiago de Freitas Pereira <tiagofrepereira@gmail.com>
#Mon Dec 05 12:08:00 CEST 2013
import numpy
import bob
import os
import array
class FileLoader:
"""This class load features files from different formats"""
def __init__(self, dim=40):
self.dim = dim
def load_lists_from_database(self, database, arrange_by_client=False):
from facereclib.toolchain import FileSelector
fs = FileSelector(
database,
"",
"",
database.original_directory,
"",
"",
"",
"",
"",
zt_score_directories = None,
default_extension = '.hdf5'
)
directory_type = "features"
file_list = fs.training_list(directory_type,"train_extractor", arrange_by_client = arrange_by_client)
return file_list
#if(arrange_by_client):
#features = []
#for f in file_list:
#features.append(self.load_features_from_list_per_user(f))
#return features
#else:
#return self._load_features_from_list(file_list)
def load_features_from_list(self, list_files):
#Counting for pre-allocation
dim = 0
counter = 0
for o in list_files:
s,self.dim = self.get_shape(o)
counter = counter + s
#pre-allocating
features = numpy.zeros(shape=(counter,self.dim), dtype='float')
#Loading the feaiures
i = 0
for o in list_files:
f = self.load_features_from_file(o)
s = f.shape[0]
features[i:i+s,:] = f
i = i + s
return features
def load_features_from_list_per_user(self, list_files):
#Counting for pre-allocation
if(len(list_files) > 0):
size,self.dim = self.get_shape(list_files[0])
else:
raise ValueError("Empty list!!")
#pre-allocating
features = numpy.zeros(shape=(len(list_files),size,self.dim), dtype='float')
#Loading the feaiures
i = 0
for o in list_files:
f = self.load_features_from_file(o)
features[i,:,:] = f
i = i + 1
return features
def get_shape(self,o):
"""
Keyword Parameters:
o
File object
"""
f = self.load_features_from_file(o)
return f.shape
def load_features_from_file(self, file_name):
"""
Load a feature file
Keyword Parameters:
file_name
File name
"""
if(file_name[len(file_name)-4:len(file_name)] == "hdf5"):
return bob.io.load(file_name)
else:
return self.__paramread(file_name)
def __paramread(self,arquivo):
"""
Converts a sequence of floats (binary format) in a numpy array
"""
numberOfFloats = os.path.getsize(arquivo)/4 # each feature is a float
file = open(arquivo,mode = 'rb') # opens feature input file
parameters = array.array('f')
parameters.fromfile(file,numberOfFloats)
parameters = numpy.array(parameters, dtype=numpy.float64)
file.close()
number_of_vectors = parameters.shape[0] / float(self.dim)
parameters = numpy.reshape(parameters,(int(number_of_vectors),int(self.dim)))
return parameters
| tiagofrepereira2012/parallel_trainers | parallel_trainers/trainers/utils/file_loader.py | file_loader.py | py | 3,085 | python | en | code | 4 | github-code | 13 |
19147360954 | import os
os.environ["MUJOCO_GL"] = "egl"
# ruff: noqa: E402
from absl import app
from dm_control import suite
import dm_env_wrappers
import jax
import numpy as np
import optax
import reverb
import tensorflow as tf
import tensorflow_datasets as tfds
from baselines.drq_bc import drq_frame_stacking
from baselines.drq_bc import vd4rl_preprocessor
import corax
from corax import types
from corax.agents.jax import actor_core as actor_core_lib
from corax.agents.jax import actors
from corax.agents.jax import drq_v2
from corax.agents.jax.drq_v2 import augmentations
from corax.jax import utils
from corax.jax import variable_utils
from corax.utils import counting
from corax.utils import loggers
from corax.wrappers import mujoco
def make_environment(domain: str, task: str):
env = suite.load(domain, task, task_kwargs={"random": 0})
env = mujoco.MujocoPixelWrapper(env, height=84, width=84, camera_id=0)
env = dm_env_wrappers.ActionRepeatWrapper(env, 2)
env = drq_frame_stacking.FrameStackingWrapper(env, 3, flatten=True)
env = dm_env_wrappers.SinglePrecisionWrapper(env)
return env
def get_dataset_iterator(domain, task):
name = "medium_expert"
dataset = tfds.load(f"vd4rl/main_{domain}_{task}_{name}_84px")["train"] # type: ignore
dataset = dataset.map(
lambda episode: {"steps": vd4rl_preprocessor.process_data(episode["steps"], 3)},
num_parallel_calls=tf.data.AUTOTUNE,
)
# TODO: use shuffling
dataset = dataset.flat_map(
lambda episode: vd4rl_preprocessor.tfds_get_n_step_transitions(
episode["steps"], 3, 0.99
)
)
iterator = dataset.cache().batch(256).repeat().prefetch(4).as_numpy_iterator()
while True:
batch = next(iterator)
transitions = types.Transition(**batch)
yield reverb.ReplaySample(data=transitions, info=None) # type: ignore
def main(_):
tf.config.set_visible_devices([], "GPU")
np.random.seed(0)
domain = "walker"
task = "walk"
dataset = utils.device_put(get_dataset_iterator(domain, task), jax.devices()[0])
dataset = utils.prefetch(dataset, 2)
environment = make_environment(domain, task)
env_spec = corax.make_environment_spec(environment)
key = jax.random.PRNGKey(0)
learner_key, actor_key = jax.random.split(key)
networks = drq_v2.make_networks(env_spec)
parent_counter = counting.Counter(time_delta=0.0)
learner = drq_v2.DrQV2Learner(
learner_key,
dataset,
networks,
optax.linear_schedule(0.5, 0.1, 250000),
augmentation=augmentations.batched_random_shift_aug,
policy_optimizer=optax.adam(1e-4),
critic_optimizer=optax.adam(1e-4),
encoder_optimizer=optax.adam(1e-4),
critic_soft_update_rate=0.01,
discount=0.99,
bc_alpha=2.5,
logger=loggers.make_default_logger("learner", asynchronous=True),
counter=counting.Counter(parent_counter, "learner", time_delta=0.0),
)
device = "gpu"
variable_client = variable_utils.VariableClient(learner, "policy", device=device)
evaluator = actors.GenericActor(
actor_core_lib.batched_feed_forward_to_actor_core(
drq_v2.apply_policy_and_sample(networks, env_spec.actions, 0.0)
),
actor_key,
variable_client,
backend=device,
)
eval_loop = corax.EnvironmentLoop(
environment=environment,
actor=evaluator,
logger=loggers.make_default_logger("evaluator"),
counter=counting.Counter(parent_counter, "evaluator", time_delta=0.0),
)
# Run the environment loop.
max_steps = int(1e6)
eval_every = 5000
eval_episodes = 10
steps = 0
while steps < max_steps:
for _ in range(eval_every):
learner.step()
steps += eval_every
eval_loop.run(eval_episodes)
if __name__ == "__main__":
app.run(main)
| ethanluoyc/corax | projects/baselines/baselines/drq_bc/main.py | main.py | py | 3,913 | python | en | code | 27 | github-code | 13 |
32252516305 | import sys
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QPushButton,
QLabel, QGridLayout, QVBoxLayout, QHBoxLayout,
QMenu, QAction)
class MainUI(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('계산기')
self.setGeometry(300, 300, 215, 285)
self.setMenuBar()
self.setCentralWidget(SubUI())
def setMenuBar(self):
menu = self.menuBar()#메뉴바 생성
self.View = menu.addMenu('보기(&V)')
self.Edit = menu.addMenu('편집(&E)')
self.Help = menu.addMenu('도움말(&H)')#그룹 생성
#보기-----------------------------------
self.View_T = QAction('일반용(&T)', self) #메뉴 객체 생성
self.View_S = QAction('공학용(&S)', self)
self.View_P = QAction('프로그래머용(&P)', self)
self.View_A = QAction('통계용(&A)', self)
self.View_Y = QAction('기록(&Y)', self)
self.View_I = QAction('자릿수 구분 단위(&I)', self)
self.View_B = QAction('기본(&B)', self)
self.View_U = QAction('단위변환(&U)', self)
self.View_D = QAction('날짜 계산(&D)', self)
self.View_WorkSheet = QMenu('워크시트(&W)', self)#서브 그룹 생성
self.WorkSheet_W = QAction('주택 담보 대출(&M)', self)
self.WorkSheet_V = QAction('자동차 임대(&V)', self)
self.WorkSheet_F = QAction('연비 계산(mpg)(&F)', self)
self.WorkSheet_U = QAction('연비 계산(L/100km)(&U)', self)
#메뉴 객체, 서브 그룹에 단축키 설정
self.View_T.setShortcut('Alt+1')#단축키 설정
self.View_S.setShortcut('Alt+2')
self.View_P.setShortcut('Alt+3')
self.View_A.setShortcut('Alt+4')
self.View_Y.setShortcut('Ctrl+H')
self.View_B.setShortcut('Ctrl+F4')
self.View_U.setShortcut('Ctrl+U')
self.View_D.setShortcut('Ctrl+E')
#서브 그룹에 액션 생성
self.View_WorkSheet.addAction(self.WorkSheet_W)
self.View_WorkSheet.addAction(self.WorkSheet_V)
self.View_WorkSheet.addAction(self.WorkSheet_F)
self.View_WorkSheet.addAction(self.WorkSheet_U)
#메인 그룹에 메뉴 객체, 서브 그룹 생성
self.View.addAction(self.View_T)
self.View.addAction(self.View_S)
self.View.addAction(self.View_P)
self.View.addAction(self.View_A)
self.View.addSeparator()#구분선 추가
self.View.addAction(self.View_Y)
self.View.addAction(self.View_I)
self.View.addSeparator()#구분선 추가
self.View.addAction(self.View_B)
self.View.addAction(self.View_U)
self.View.addAction(self.View_D)
self.View.addMenu(self.View_WorkSheet)
#편집-----------------------------------
self.Edit_C = QAction('복사(&C)', self)
self.Edit_P = QAction('붙여넣기(&P)', self)
self.Edit_H = QMenu('기록(&H)', self)#서브 그룹 생성
self.H_I = QAction('기록 복사(&I)', self)
self.H_E = QAction('편집(&E)', self)
self.H_N = QAction('편집 취소(&N)', self)
self.H_L = QAction('지우기(&L)', self)
#메뉴 객체, 서브 그룹에 단축키 설정
self.Edit_C.setShortcut('Ctrl+C')#단축키 설정
self.Edit_P.setShortcut('Ctrl+V')
self.H_E.setShortcut('F2')
self.H_N.setShortcut('Esc')
self.H_L.setShortcut('Ctrl+Shift+D')
#메인 그룹에 메뉴 객체, 서브 그룹 생성
self.Edit.addAction(self.Edit_C)
self.Edit.addAction(self.Edit_P)
self.Edit.addSeparator()#구분선 추가
self.Edit.addMenu(self.Edit_H)
#서브 그룹에 액션 생성
self.Edit_H.addAction(self.H_I)
self.Edit_H.addAction(self.H_E)
self.Edit_H.addAction(self.H_N)
self.Edit_H.addAction(self.H_L)
#도움말----------------------------------
self.Help_V = QAction('도움말 보기(&V)', self)
self.Help_A = QAction('계산기 정보(A)', self)
#메뉴 객체, 서브 그룹에 단축키 설정
self.Help_V.setShortcut('F1')
#메인 그룹에 메뉴 객체, 서브 그룹 생성
self.Help.addAction(self.Help_V)
self.Help.addSeparator()#구분선 추가
self.Help.addAction(self.Help_A)
def TestFunc():
print('눌림!')
class SubUI(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
pass
if __name__ =='__main__':
app = QApplication(sys.argv)
ex = MainUI()
ex.show()
sys.exit(app.exec_())
| JungHeumYoo/Window-Calculator | Original.py | Original.py | py | 4,982 | python | ko | code | 0 | github-code | 13 |
72717293777 | import pandas
import re
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
import time
import tensorflow
from keras.callbacks import EarlyStopping
from math import* #imports
def euclidean_distance(x,y):
return sqrt(sum(pow(a-b,2) for a, b in zip(x, y))) #Definition of eyclidian distance between two vectors of same size
start_time = time.time() # Start the time.
ps = PorterStemmer() # Initialize the stemmer.
tf_idf = TfidfVectorizer() # Initialize tf-idf.
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1) # Initialize early stopping.
stop_words = set(stopwords.words('english')) # Set language for stop words.
filen = pandas.read_csv("./SocialMedia_Negative.csv")
filep = pandas.read_csv("./SocialMedia_Positive.csv") #Import the two files
def tfidf(file): #Begin of making the combined tf-idf vector of the median values of all documents inside the file
text = file.Text
labels = file.Sentiment #Divide between text document and text labels
for i,label in enumerate(labels):
if label == 'negative':
labels[i] = 0.0
else:
labels[i] = 1.0 #Binary numerical represantation of the text labels
vector_text = text.to_numpy() #Convert the text list to numpy
vectors_of_words = []
for strings in range(len(vector_text)):
vector_text[strings] = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', vector_text[strings], flags=re.MULTILINE)
vector_text[strings] = re.sub("[^a-zA-Z0-9 ]", "",vector_text[strings])
vector_text[strings] = vector_text[strings].lower()
for word in word_tokenize(vector_text[strings]):
new_word = ps.stem(word)
vector_text[strings] = vector_text[strings].replace(word, new_word)
if new_word in stop_words:
vector_text[strings] = vector_text[strings].replace(word, "")
vector_text[strings] = re.sub(' +',' ',vector_text[strings]) #Filter the text list, removing stopwords, caps etc.
x = tf_idf.fit(vector_text) #Getting the tf-idf matrix
vocab = x.vocabulary_ #Getting the vocab dictionary to correctly map the tf-idf
x = tf_idf.transform(vector_text) # Executes the tf-idf transformation.
x = x.toarray() #Transform the matrix into an array of arrays for easier reiterative manipulation
x_array = np.zeros(len(x[1])) #Define an array to hold the sum of each tf-idf of each word in all documents
x_length = np.zeros(len(x[1])) #Define a support array to count the words for correct median calculation
for i in range(len(x)): #For every element of x i.e. every tf-idfed document
for j in range(len(x[i])): #For every point in the tf-idf space
x_array[i] = x_array[i] + x[i][j] #Add to the sum array the value of the point
x_length[i] = x_length[i] + 1 #And ++ the count vector
vocab[i] = x_array[i]/x_length[i] #Replace in the vocablurary the count of the words with median value
return vocab #And return the dictionary
positive = tfidf(filep)
negative = tfidf(filen) #Do the above process for both files
def compare_vectors(vector_1,vector_2): #Function to calculate the euclidian distance of the projection of the first vector into the second
temp_vector_2 = vector_1.copy() #Make a temporary first vector which will be the projection of the second into the first's space
for key in temp_vector_2: #For each point of the space of the first vector
temp_vector_2[key] = 0.0000000000000000000 #Replace the value with zero so as to initialize it
for key in vector_1: #For each point of the space of the first vector
if str(key) in vector_2.keys(): #If there is something to project
temp_vector_2[key] = vector_2[key] #Do it
vec_1 = list(vector_1.values())
vec_2 = list(temp_vector_2.values()) #Get the values of the dictionaries in the form of a list
print(euclidean_distance(vec_1,vec_2)) #And print the eucldian distance of the two
compare_vectors(positive,negative)
compare_vectors(negative,positive) #DO the process for both ways
| AlexanDelimi/DecisionTheory | eyclidian.py | eyclidian.py | py | 4,762 | python | en | code | 0 | github-code | 13 |
26690083296 | # --------------------------------------------------
# Script de criação/atualização de BD, com dados do Zabbix
# Guilherme Braga, 2022
# https://github.com/gui1080/testes_PyZabbix_FISF3
# --------------------------------------------------
# dependências secundárias
import time
from datetime import datetime
import sys
import sqlite3
import os
import logging
from auxiliar.salva_horario import salva_horario
from auxiliar.zabbix_login import zabbix_login
from auxiliar.arquivos_locais import MOSTRAR_PRINTS
from extract import Extract
from load import Load
# Declaração da main
# --------------------------------------------------------------------------
def main():
# Login no zabbix por meio de biblioteca PyZabbix
# ------------------------------------------------------------------------
logging.basicConfig(filename='exec_log.log', filemode='w+', encoding='utf-8',
format='%(process)d-%(levelname)s-%(message)s', level=logging.DEBUG)
logging.debug("Iniciando script!\n")
try:
zapi = zabbix_login()
logging.debug("Conectado no Zabbix na versão %s" % zapi.api_version())
except:
if MOSTRAR_PRINTS == 1:
print("Erro no Login do Zabbix!")
logging.warning("Erro no login!")
logging.warning("Ocorrido: " + str(sys.exc_info()[0]))
exit(1)
# ------------------------------------------------------------------------
if MOSTRAR_PRINTS == 1:
print("Iniciando busca Zabbix -> Sharepoint")
# Itera por cada host no Zabbix
# ------------------------------------------------------------------------
# Loop itera por todos os hosts de zabbix, fazendo querys individuais para cada um
# buscando problemas, alertas, templates, grupos e interfazer
try:
id_bd, nome, host, id, ip, grupo_lista, host_disponivel_zabbix, erro_recente, templates, ovpn, dif_erro, lista_ultimo_ocorrido, quantidade_problemas, qtd_prolemas_total, hoje, inicio_query, inicio_query_problemas = Extract.extract(logging, zapi)
except:
conn = sqlite3.connect(os.path.dirname(__file__) + '/../hosts.db')
c = conn.cursor()
# criando tabela log (caso não exista)
c.execute(
'CREATE TABLE IF NOT EXISTS log_execucao ( script_id VARCHAR(100) PRIMARY KEY, ultima_execucao VARCHAR(100), exec_sucesso VARCHAR(100))')
conn.commit()
time.sleep(2)
# bota na tabela de log que execucao foi um sucesso
# update if row exists, else insert
agora = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
sql_string = "INSERT OR REPLACE INTO log_execucao (script_id, ultima_execucao, exec_sucesso)" \
"VALUES ('criaBD_Zabbix', '" + agora + "', 'False');"
c.execute(sql_string)
conn.commit()
time.sleep(1)
quit()
finally:
if MOSTRAR_PRINTS == 1:
print("Fim da busca por itens do Zabbix!")
# Cria e exporta dataframe
# ------------------------------------------------------------------------
try:
df = Load.load(id_bd, nome, host, host_disponivel_zabbix, id, ip, grupo_lista, ovpn, erro_recente, templates, dif_erro, quantidade_problemas, qtd_prolemas_total, lista_ultimo_ocorrido)
except:
conn = sqlite3.connect(os.path.dirname(__file__) + '/../hosts.db')
c = conn.cursor()
# criando tabela log (caso não exista)
c.execute(
'CREATE TABLE IF NOT EXISTS log_execucao ( script_id VARCHAR(100) PRIMARY KEY, ultima_execucao VARCHAR(100), exec_sucesso VARCHAR(100))')
conn.commit()
time.sleep(2)
# bota na tabela de log que execucao foi um sucesso
# update if row exists, else insert
agora = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
sql_string = "INSERT OR REPLACE INTO log_execucao (script_id, ultima_execucao, exec_sucesso)" \
"VALUES ('criaBD_Zabbix', '" + agora + "', 'False');"
c.execute(sql_string)
conn.commit()
time.sleep(1)
quit()
finally:
if MOSTRAR_PRINTS == 1:
print("Fim da exportação de dataframe!")
# Salva no BD
# ------------------------------------------------------------------------
logging.debug("Criando/alterando banco de dados!")
# ele trabalha com o banco de dados na pasta de cima, que ai tem outras pastas
# nesse diretório se alimentando desse banco de dados!
conn = sqlite3.connect(os.path.dirname(__file__) + '/../hosts.db')
c = conn.cursor()
# criando tabela dados_zabbix
c.execute('CREATE TABLE IF NOT EXISTS dados_zabbix (id_bd_zabbix VARCHAR(4) PRIMARY KEY, nome VARCHAR(255), host VARCHAR(255), disponivel_no_zabbix VARCHAR(255), host_id VARCHAR(25), host_ip VARCHAR(25), grupos VARCHAR(255), conexao_OVPN VARCHAR(255), erros_ultimas_24h VARCHAR(25), templates_vinculados VARCHAR(255), texto_ultimo_problema_ocorrido VARCHAR(255), qtd_problemas_graves VARCHAR(25), qtd_prolemas_total VARCHAR(25), ultimo_ocorrido VARCHAR(255))')
conn.commit()
time.sleep(2)
# criando tabela log (caso não exista)
c.execute('CREATE TABLE IF NOT EXISTS log_execucao ( script_id VARCHAR(100) PRIMARY KEY, ultima_execucao VARCHAR(100), exec_sucesso VARCHAR(100))')
conn.commit()
time.sleep(2)
# bota na tabela de log que execucao foi um sucesso
# update if row exists, else insert
agora = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
sql_string = "INSERT OR REPLACE INTO log_execucao (script_id, ultima_execucao, exec_sucesso)" \
"VALUES ('criaBD_Zabbix', '" + agora +"', 'True');"
c.execute(sql_string)
conn.commit()
time.sleep(1)
try:
if df.empty:
if MOSTRAR_PRINTS == 1:
print("Dataframe vazio, melhor não mudar o BD!")
logging.debug("Dataframe vazio, melhor não mudar o BD!")
else:
df.to_sql('dados_zabbix', conn, if_exists='replace', index=False)
except:
logging.warning("Erro na conexão com banco de dados!")
logging.warning("Ocorreu: " + str(sys.exc_info()[0]))
agora = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
sql_string = "INSERT OR REPLACE INTO log_execucao (script_id, ultima_execucao, exec_sucesso)" \
"VALUES ('criaBD_Zabbix', '" + agora + "', 'False');"
c.execute(sql_string)
time.sleep(1)
quit()
finally:
logging.debug("Encerrando conexão com BD!")
# ------------------------------------------------------------------------
if __name__ == '__main__':
# contabiliza o tempo de execução!
inicio = time.time()
main()
fim = time.time()
# última execução
salva_horario(inicio)
duracao = (fim - inicio) / 60
logging.debug("\n\n\nFim da execução!\n\nDuração da execução deste script: %f minutos." % (duracao))
| FSLobao/RF.Fusion | src/zabbix/automation/criaBD_Zabbix/main.py | main.py | py | 7,020 | python | pt | code | 0 | github-code | 13 |
11603465671 | #
# @lc app=leetcode.cn id=128 lang=python3
#
# [128] 最长连续序列
#
# @lc code=start
class Solution:
def longestConsecutive(self, nums: List[int]) -> int:
nums_set = set(nums)
max_curve = 0
for i in range(len(nums)):
cur_num = nums[i]
current_curve = 1
if cur_num - 1 not in nums_set:
while cur_num + 1 in nums_set:
cur_num += 1
current_curve += 1
max_curve = max(max_curve, current_curve)
return max_curve
# @lc code=end
| RGBRYANT24/LeetCodePractice_PY | 128.最长连续序列.py | 128.最长连续序列.py | py | 578 | python | en | code | 0 | github-code | 13 |
19048264042 |
from PyQt4 import QtCore, QtGui
from tvInfo import seasonBuilder, episodeBuilder
from torrentSearch import torrentSearch
import sys
import time
import datetime
import webbrowser
import urllib
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_showSearch(object):
def setupUi(self, showSearch):
self.seasons = {} #
self.selected_episode = ''
self.selected_season = ''
self.episode_number = ''
self.searched_name = ''
self.torrents = {}
# General GUI Setup
showSearch.setObjectName(_fromUtf8("showSearch"))
showSearch.resize(1900, 750)
showSearch.setMouseTracking(False)
showSearch.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
showSearch.setLayoutDirection(QtCore.Qt.RightToLeft)
showSearch.setAutoFillBackground(False)
showSearch.setStyleSheet(_fromUtf8(""))
showSearch.setTabShape(QtGui.QTabWidget.Rounded)
self.centralwidget = QtGui.QWidget(showSearch)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.searchFrame = QtGui.QFrame(self.centralwidget)
self.searchFrame.setGeometry(QtCore.QRect(10, 0, 511, 611))
self.searchFrame.setStyleSheet(_fromUtf8(""))
self.searchFrame.setFrameShape(QtGui.QFrame.StyledPanel)
self.searchFrame.setFrameShadow(QtGui.QFrame.Raised)
self.searchFrame.setObjectName(_fromUtf8("searchFrame"))
self.infoFrame = QtGui.QFrame(self.centralwidget)
self.infoFrame.setGeometry(540, 0, 611, 711)
self.infoFrame.setStyleSheet(_fromUtf8(""))
self.infoFrame.setFrameShape(QtGui.QFrame.StyledPanel)
self.infoFrame.setFrameShadow(QtGui.QFrame.Raised)
self.infoFrame.setObjectName(_fromUtf8("infoFrame"))
self.frame = QtGui.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(10, 60, 511, 781))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
# Set up Show Search box to input show name
self.search_query_text = QtGui.QPlainTextEdit(self.searchFrame)
self.search_query_text.setGeometry(QtCore.QRect(10, 10, 341, 41))
self.search_query_text.setLayoutDirection(QtCore.Qt.LeftToRight)
self.search_query_text.setObjectName(_fromUtf8("search_query_text"))
# Setting up labels for GUI
self.titleLabel = QtGui.QLabel(self.centralwidget)
self.titleLabel.setGeometry(QtCore.QRect(580, 0, 491, 100))
font = QtGui.QFont()
font.setFamily("Sans Serif")
font.setPointSize(30)
font.setBold(True)
font.setItalic(True)
font.setUnderline(False)
font.setWeight(75)
self.titleLabel.setFont(font)
self.titleLabel.setFrameShadow(QtGui.QFrame.Raised)
self.titleLabel.setAlignment(QtCore.Qt.AlignCenter)
self.titleLabel.setObjectName("titleLabel")
self.titleLabel.setWordWrap(True)
self.infoLabel = QtGui.QLabel(self.centralwidget)
self.infoLabel.setGeometry(QtCore.QRect(580, 580, 491, 321))
self.infoLabel.setWordWrap(True)
font = QtGui.QFont()
font.setFamily("Sans Serif")
font.setPointSize(14)
self.infoLabel.setFont(font)
self.infoLabel.setAlignment(
QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop)
self.infoLabel.setObjectName("infoLabel")
self.listWidget = QtGui.QListWidget(self.centralwidget)
self.listWidget.setGeometry(QtCore.QRect(1130, 60, 750, 550))
self.listWidget.setObjectName("results")
# Create the widget that will show the seasons and then the episodes for that season
self.results = QtGui.QTreeWidget(self.frame)
self.header = QtGui.QTreeWidgetItem([" "])
self.results.setHeaderItem(self.header)
self.results.setGeometry(5, 1, 501, 561)
self.results.itemSelectionChanged.connect(self.extract_result)
# After episode selected, sends torrent information to torrent application
self.downloadButton = QtGui.QPushButton(self.centralwidget)
self.downloadButton.setGeometry(QtCore.QRect(1130, 630, 750, 80))
self.downloadButton.setObjectName("downloadButton")
self.downloadButton.setText("Download")
self.downloadButton.clicked.connect(self.handleDownload)
self.downloadButton.setEnabled(False)
# The button used to search for show using inputted text in search_query_text
self.searchButton = QtGui.QPushButton(self.searchFrame)
self.searchButton.setGeometry(QtCore.QRect(370, 10, 131, 31))
self.searchButton.setFlat(False)
self.searchButton.setObjectName(_fromUtf8("searchButton"))
self.searchButton.clicked.connect(self.handleSearch)
# the button used to pick a certain episode
self.selectButton = QtGui.QPushButton(self.frame)
self.selectButton.setGeometry(QtCore.QRect(6, 570, 511, 81))
self.selectButton.setAutoDefault(False)
self.selectButton.setObjectName(_fromUtf8("selectButton"))
self.selectButton.clicked.connect(self.handleSelect)
self.selectButton.setEnabled(False)
# picture of show extracted from imdb
self.picture = QtGui.QLabel(self.centralwidget)
# label where the shows name will be displayed
showSearch.setCentralWidget(self.centralwidget)
self.toolBar = QtGui.QToolBar(showSearch)
self.toolBar.setObjectName(_fromUtf8("toolBar"))
showSearch.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.retranslateUi(showSearch)
QtCore.QMetaObject.connectSlotsByName(showSearch)
######################################
# if an argument is passed on command line, automatically search
if len(sys.argv) == 2:
self.search_query_text.appendPlainText(sys.argv[1])
self.searchButton.click()
def handleSearch(self):
'''
Takes text from search_query_text and builds the results widget to show
the show's season and episode information
'''
# Reset previous search data
self.titleLabel.setText('')
self.infoLabel.setText('')
self.listWidget.clear()
self.selectButton.setEnabled(True)
self.searched_name = self.search_query_text.toPlainText()
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
# Extract relevant search data for show
title, year, show_poster_url, tagline, self.seasons = seasonBuilder(
self.searched_name)
self.results.clear()
self.header.setText(0, title)
todays_date = datetime.datetime.today()
for i in range(1, len(self.seasons) + 1):
# input the season into the results list
current_season = QtGui.QTreeWidgetItem(["Season " + str(i)])
self.results.addTopLevelItem(current_season)
for e in range(1, len(self.seasons[i]) + 1):
# inputs each episode in the current season into the results list
temp = ''
if e in range(1, 10):
temp = str(i) + "0" + str(e)
else:
temp = str(i) + str(e)
episode_description = temp + ": " + str(self.seasons[i][e][0])
# only show results for shows that have aired
release_date = datetime.datetime.strptime(
self.seasons[i][e][1], '%Y-%m-%d')
if todays_date > release_date:
current_episode = QtGui.QTreeWidgetItem(
[episode_description])
current_season.addChild(current_episode)
# Set labels to appropraite information
self.titleLabel.setText(title + '(' + str(year) + ')')
self.infoLabel.setText(tagline)
self.setPicture(show_poster_url)
QtGui.QApplication.restoreOverrideCursor()
def extract_result(self):
# get the season and episode selection and store it in varaibles
selected_item = str(self.results.currentItem().text(0)).split(":")[0]
if len(selected_item) == 3:
self.selected_season = selected_item[0]
self.episode_number = selected_item[1:3]
elif len(selected_item) == 4:
self.selected_season = selected_item[0:2]
self.episode_number = selected_item[2:4]
def handleSelect(self):
# Reset label information for downloads list
self.titleLabel.setText('')
self.infoLabel.setText('')
self.listWidget.clear()
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
if self.episode_number[0] == '0':
self.episode_number = self.episode_number[1]
# Get the selected episode information from seasons
result = self.seasons[int(self.selected_season)
][int(self.episode_number)]
title, release, imdb_id = result[0], result[1], result[2]
plot, title = episodeBuilder(imdb_id)
# formatting issues for single digit episode numbers
temp1, temp2 = self.selected_season, self.episode_number
if int(self.selected_season) in range(0, 10):
temp1 = '0' + self.selected_season[0]
if int(self.episode_number) in range(0, 10):
temp2 = '0' + self.episode_number[0]
# Get torrents based on episode
self.torrents = torrentSearch(
self.searched_name, 'S' + temp1 + 'E' + temp2)
#if torrents == {}:
# no torrents
# pass torrents on to download list widget to handle selection of torrent
for t in self.torrents:
self.listWidget.addItem(self.torrents[t][0] + " --- " + self.torrents[t][1] + ' --- '
+ self.torrents[t][2])
# Set the appropraite labels with information
self.titleLabel.setText('S' + temp1 + 'E' + temp2 + ': ' + title)
self.infoLabel.setText(plot)
self.downloadButton.setEnabled(True)
QtGui.QApplication.restoreOverrideCursor()
def handleDownload(self):
# Pressing Download buttons will lead to torrent application to open with
# the selected torrent
webbrowser.open(self.torrents[self.listWidget.currentRow()][4])
def setPicture(self, url):
# set picutre with url from show
data = urllib.request.urlopen(url).read()
image = QtGui.QImage()
image.loadFromData(data)
pixmap = QtGui.QPixmap(image)
self.picture.setPixmap(pixmap)
self.picture.setGeometry(700, 180, pixmap.width(), pixmap.height())
def retranslateUi(self, showSearch):
showSearch.setWindowTitle(_translate("showSearch", "Torrent", None))
self.searchButton.setText(_translate("showSearch", "Search", None))
self.selectButton.setText(_translate("showSearch", "Select", None))
self.toolBar.setWindowTitle(_translate("showSearch", "toolBar", None))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
showSearch = QtGui.QMainWindow()
ui = Ui_showSearch()
ui.setupUi(showSearch)
showSearch.show()
sys.exit(app.exec_())
| sobotadom/torrent | showSearch.py | showSearch.py | py | 11,784 | python | en | code | 0 | github-code | 13 |
34893289409 | import logging
from pyrogram import Client
from Config import Config
logging.basicConfig(level=logging.INFO)
plugins = dict(
root="plugins",
include=[
"forceSub",
"start"
]
)
pbot = Client(
'ForceSubscribeRobot',
bot_token = Config.BOT_TOKEN,
api_id = Config.APP_ID,
api_hash = Config.API_HASH,
plugins = plugins
)
pbot.run()
| Bot-support/ForceSub-Bot | main.py | main.py | py | 387 | python | en | code | 0 | github-code | 13 |
24564528579 | from collections import namedtuple
import pygame
from pygame import USEREVENT
from pygame.locals import K_ESCAPE, KEYDOWN, KEYUP, K_UP, K_DOWN, \
K_w, K_s, QUIT
from menu_ui import MenuPause, MenuEnd
from ball import Ball
from field import Field, L_GOAL_LINE, R_GOAL_LINE
from slider import Slider, SLIDER_DISTX
from bots import EasyBot, NormalBot, HardBot
from scoreboard import ScoreBoard
from animation_controller import AnimationController
from common import UP, DOWN, DISPLAY_SIZE
PUTTING_BALL_WAIT = 500
ONSTART_PUTTING_BALL_WAIT = 2000
EV_PUT_BALL = USEREVENT + 3
SCOREBOARD_POS = (900, 300)
class Battle:
'''
Core of game
'''
goals_target = 10
def __init__(self, display):
self.display = display
self.animations_mgr = AnimationController()
self.surface = pygame.Surface(DISPLAY_SIZE, pygame.SRCALPHA, 32)
self.field = Field(self.surface)
self.scoreboard = ScoreBoard(self.surface, SCOREBOARD_POS)
self.pause_menu = MenuPause(self.display, self)
self.menu_pos = (
(self.field.rect.width-self.pause_menu.width)/2,
(self.field.rect.height-self.pause_menu.height)/2
)
self.menu_end = None
self.score = (0, 0)
self.sprites = pygame.sprite.RenderPlain()
self.sliders = []
self.goals = {}
self.balls = []
self.state = 'need_wait_put_ball'
self.pressing_escape = False
self.present()
def present(self):
'''
Present field on screen, create all objects
'''
self.field.present()
self.fill_goals()
self.create_sliders()
self.add_ball_to_battle()
self.bot_player = NormalBot(self, self.right_slider, self.right_goal)
self.update_score()
pygame.time.set_timer(EV_PUT_BALL, 1500)
self.display.blit(self.surface, ((0,0), DISPLAY_SIZE))
pygame.display.update(((0,0), DISPLAY_SIZE))
def reset(self):
'''
Restart for next game
'''
self.score = (0, 0)
self.update_score()
self.menu_end.hide()
self.menu_end = None
for slider in self.sliders:
slider.put()
self.update_state('need_wait_put_ball')
def update(self, main_events_loop, ticks):
'''
Main cycle of battle, processing all objects and actions
'''
for sprite in self.sprites:
sprite.clear(self.surface)
main_events_loop(self)
self.animations_mgr.update(ticks)
if self.check_state('play'):
self.on_play_state()
elif self.check_state('need_wait_put_ball'):
self.on_need_wait_put_ball()
if self.check_state('pause'):
self.on_pause_state()
return
elif self.check_state('end'):
return
for sprite in self.sprites:
sprite.draw(self.surface)
self.display.blit(self.surface, (0,0))
pygame.display.update(self.field.rect)
def handle_event(self, event):
'''
Handling keyboard and user events
'''
if event.type == KEYDOWN:
self.on_key_down()
elif event.type == KEYUP:
self.on_key_up()
elif event.type == USEREVENT:
self.handle_user_event(event)
elif event.type == EV_PUT_BALL:
self.update_state('play')
pygame.time.set_timer(EV_PUT_BALL, 0)
def on_key_down(self):
is_pressed = pygame.key.get_pressed()
if not self.check_state('pause'):
if is_pressed[K_ESCAPE]:
self.pressing_escape = True
self.pause_game()
return
if is_pressed[K_w]:
self.left_slider.process(UP)
if is_pressed[K_s]:
self.left_slider.process(DOWN)
if is_pressed[K_UP]:
self.right_slider.process(UP)
if is_pressed[K_DOWN]:
self.right_slider.process(DOWN)
def on_key_up(self):
is_pressed = pygame.key.get_pressed()
if not (is_pressed[K_w] and is_pressed[K_s]):
self.left_slider.on_change_direction()
if not (is_pressed[K_UP] and is_pressed[K_DOWN]):
self.right_slider.on_change_direction()
self.pressing_escape = False
def handle_user_event(self, event):
event_info = event.__dict__
for key in event_info:
if key == 'goal':
self.on_goal_scored(event_info)
if key == 'pause':
self.update_state('need_wait_put_ball')
def update_score(self, d_score=(0,0)):
self.score = (self.score[0] + d_score[0], self.score[1] + d_score[1])
self.scoreboard.set_score(self.score)
self.display.blit(self.surface, (0, 0))
pygame.display.update(self.scoreboard.rect)
def update_state(self, state):
self.state = state
def check_state(self, state):
return self.state == state
def add_ball_to_battle(self):
collision_objects = {
'sliders': self.sliders,
'goals': self.field.goals.sprites(),
'posts': self.field.posts.sprites(),
'balls': self.balls
}
ball = Ball(self.field.surface, self.field.rect.center,
self.sprites, collision_objects, self.animations_mgr)
self.balls.append(ball)
def fill_goals(self):
Goal = namedtuple('Goal', ['pos_x', 'top', 'bottom'])
left_goal = self.field.left_goal_line
self.left_goal = Goal(
left_goal.rect.centerx,
left_goal.rect.top,
left_goal.rect.bottom
)
right_goal = self.field.right_goal_line
self.right_goal = Goal(
right_goal.rect.centerx,
right_goal.rect.top,
right_goal.rect.bottom
)
def get_ball(self):
return self.balls[0]
def create_sliders(self):
left_pos = (L_GOAL_LINE + SLIDER_DISTX, self.field.rect.centery)
self.left_slider = Slider(
self.field.surface, left_pos, group = self.sprites,
color = 'red'
)
self.sliders.append(self.left_slider)
right_pos = (R_GOAL_LINE - SLIDER_DISTX, self.field.rect.centery)
self.right_slider = Slider(
self.field.surface, right_pos, group = self.sprites,
color = 'blue'
)
self.sliders.append(self.right_slider)
def on_play_state(self):
self.process_objects()
self.bot_player.process()
def on_pause_state(self):
active_menu = self.menu_end or self.pause_menu
active_menu.update()
def process_objects(self):
for ball in self.balls:
ball.update()
def on_goal_scored(self, scored_info):
if scored_info['goal'] == 1:
self.update_score((1, 0))
else:
self.update_score((0, 1))
if max(self.score) == self.goals_target:
self.end_game()
else:
self.update_state('need_wait_put_ball')
def on_need_wait_put_ball(self):
self.update_state('waiting_put_ball')
pygame.time.set_timer(EV_PUT_BALL, 500)
def pause_game(self):
'''
Show pause menu
'''
self.update_state('pause')
self.pause_menu.set_position(self.menu_pos)
self.pause_menu.show()
continue_btn = self.pause_menu.get_widget('continue_btn')
continue_btn.set_onpressed(self.continue_game)
quit_btn = self.pause_menu.get_widget('quit_btn')
quit_btn.set_onpressed(self.quit_game)
def continue_game(self):
self.update_state('play')
self.pause_menu.hide()
def end_game(self):
'''
Show menu with game result and choice: restart game or quit
'''
self.update_state('pause')
menu = MenuEnd(self.display, self)
menu.set_position(self.menu_pos)
menu.load_layout()
win_icon = menu.get_widget('win_icon')
lose_icon = menu.get_widget('lose_icon')
if self.score[0] > self.score[1]:
win_icon.visible = True
lose_icon.visible = False
else:
win_icon.visible = False
lose_icon.visible = True
menu.show()
self.menu_end = menu
continue_btn = menu.get_widget('continue_btn')
continue_btn.set_onpressed(self.reset)
quit_btn = menu.get_widget('quit_btn')
quit_btn.set_onpressed(self.quit_game)
def quit_game(self):
pygame.event.clear()
ev = pygame.event.Event(QUIT)
pygame.event.post(ev)
| kehlerr/airpong | battle.py | battle.py | py | 8,721 | python | en | code | 0 | github-code | 13 |
39148363578 | #!/bin/python3
#https://www.hackerrank.com/challenges/the-birthday-bar/problem
import sys
def solve(n, s, d, m):
count = 0
for i in range(n - m + 1):
sum = 0
for j in range(m):
sum = sum + s[i + j]
if sum == d:
count = count + 1
return count
n = int(input().strip())
s = list(map(int, input().strip().split(' ')))
d, m = input().strip().split(' ')
d, m = [int(d), int(m)]
result = solve(n, s, d, m)
print(result) | saumya-singh/CodeLab | HackerRank/Implementation/Birthday_Chocolate.py | Birthday_Chocolate.py | py | 477 | python | en | code | 0 | github-code | 13 |
16924724242 | from types import DynamicClassAttribute
import unittest
from selenium import webdriver
from pyunitreport import HTMLTestRunner
from selenium.webdriver.support.ui import Select
class RegisterNewUer(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path = r'F:\John\Proyectos\Python\pt\pythonbasic\Drivers and promgrams\chromedriver.exe')
driver = self.driver
#driver.maximize_window()
driver.get('http://demo-store.seleniumacademy.com/')
driver.implicitly_wait(10)
def test_select_lenguage(self):
exp_options = ['English','French','German']
act_option = []
select_langueage = Select(self.driver.find_element_by_id('select-language'))
self.assertEqual(3,len(select_langueage.options))
for option in select_langueage.options:
act_option.append(option.text)
self.assertListEqual(exp_options,act_option)
self.assertEqual('English',select_langueage.first_selected_option.text)
select_langueage.select_by_visible_text('German')
self.assertTrue('store=german' in self.driver.current_url)
self.driver.implicitly_wait(5)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main(verbosity= 2 , testRunner=HTMLTestRunner(output='Reportes',report_name='Reposte1')) | JohnJNinoP/pythonbasic | Basic5/Selenium/test_select_lenguague.py | test_select_lenguague.py | py | 1,381 | python | en | code | 0 | github-code | 13 |
36594566455 | from time import sleep
board1 = [] #what player 2 would see
board2 = [] #what player 1 would see
for x in range(5):
board1.append(["O"] * 5) #creating the board
for x in range(5):
board2.append(["O"] * 5)
def print_board(board): #stylizing the board
for row in board:
print (" ".join(row))
def player_row_choice(player): #placing piece
row = int(input("Player %s, choose a row from 0 to 4: " % player))
return row
def player_col_choice(): #placing piece
col = int(input("Now, choose a column from 0 to 4: "))
return col
def guessing_row(player):
guess_row = int(input("Player %s, guess the row! " % player))
return guess_row
def guessing_col():
guess_col = int(input("Now, guess the column! "))
return guess_col
def play_game():
game = 3
player_1_row = player_row_choice(1) #choosing placement
player_1_column = player_col_choice()
player_2_row = player_row_choice(2)
player_2_column = player_col_choice()
while game == 3:
check = turn(board2, player_2_row, player_2_column, 1)
if check == 4:
break
check = turn(board1, player_1_row, player_1_column, 2)
if check == 4:
break
def turn(board, player_row, player_col, player):
print("")
print_board(board)
player_guess_row = guessing_row(player)
player_guess_col = guessing_col()
if player_guess_row == player_row and player_guess_col == player_col:
print ()
print ("CONGRATS")
sleep(1)
print ("Player %s, you win!" % player)
game = 4
return game
elif player_guess_row > 4 or player_guess_col > 4:
print()
print("You're not even in the ocean!")
turn(board, player_row, player_col, player)
elif board[player_guess_row][player_guess_col] == "X":
print("")
print("You already guessed that!")
turn(board, player_row, player_col, player)
else:
print ("")
print("You missed!")
board[player_guess_row][player_guess_col] = "X"
print_board(board)
sleep(2)
play_game()
| csmidt95/pythonproj | Mbattleship2.py | Mbattleship2.py | py | 2,305 | python | en | code | 0 | github-code | 13 |
24167406575 |
import random
import pickle
from nltk.classify import ClassifierI
from statistics import mode
from nltk.tokenize import word_tokenize
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self._classifiers = classifiers
def classify(self, features):
print ("ho gya 3")
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes)
return conf
documents_f = open("pickled_algo/documents.pickle", "rb")
documents = pickle.load(documents_f)
documents_f.close()
word_features5k_f = open("pickled_algo/word_features5k.pickle", "rb")
word_features = pickle.load(word_features5k_f)
word_features5k_f.close()
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
print("ho gya")
return features
featuresets_f = open("pickled_algo/feature.pickle", "rb")
featuresets = pickle.load(featuresets_f)
featuresets_f.close()
random.shuffle(featuresets)
print(len(featuresets))
testing_set = featuresets[10000:]
training_set = featuresets[:10000]
open_file = open("pickled_algo/originalnb.pickle", "rb")
classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algo/MultinomialNB.pickle", "rb")
MNB_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algo/BernoulliNB.pickle", "rb")
BernoulliNB_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algo/LogisticRegression.pickle", "rb")
LogisticRegression_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algo/LinearSVC.pickle", "rb")
LinearSVC_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algo/SGDClassifier.pickle", "rb")
SGDC_classifier = pickle.load(open_file)
open_file.close()
print("ho gya 2 ")
voted_classifier = VoteClassifier(
classifier,
LinearSVC_classifier,
MNB_classifier,
BernoulliNB_classifier,
LogisticRegression_classifier)
def sentiment(text):
feats = find_features(text)
return voted_classifier.classify(feats),voted_classifier.confidence(feats) | vpn1997/NLP | Projects in NLP/Twitter sentiment analysis/Sentiment_mod.py | Sentiment_mod.py | py | 2,640 | python | en | code | 4 | github-code | 13 |
32449572446 | # -*- coding: utf-8 -*-
import knack_tools as kt
import os
from datetime import date
name = "Get Schedules For Knack Import"
## Default textbox width is 60 characters.
## To keep text from wrapping, it's best to keep
## lines shorter than that.
description = \
"""
This is the same as "Get Schedules" except the ouput
is formatted to be imported into knack. To export the
data, copy the following into the "Custom Entry" box
on the right side of the GUI:
Schedule - Course Title,Schedule - Time,Schedule - Term,Schedule - Component,Schedule - Days,Schedule - Catalog Nbr,Schedule - Subject,Schedule - Location,Schedule - Start Date,Schedule - End Date,Schedule - Session,Schedule - Codes,Schedule - Acad Group,Schedule - Class Nbr
Notes:
- The raw schedule data can be found at:
https://ro.umich.edu/calendars/schedule-of-classes
- Currently, the script must be updated to point to the
correct data file by changing the "RO_DATA_FILE"
variable at the top of the file.
"""
# Raw data from Registrar's Office
RO_DATA_FILE = os.getcwd()+'/data/'+'ro_schedule_FA2018.csv'
# File to export all failed matches
ERROR_FILE = os.getcwd()+'/results/'+'scraper_errors.csv'
def processing_function(raw):
"""
This is the top-level function for processing data.
The function is meant to be passed to the importer (in this case GuiIO).
The importer will call this function after it has parsed the raw data.
"""
global sdata
# Only work with small group for now...
actives = kt.filterdata(raw, kt.selectors.allactives)
# kdata = kt.filterdata(
# actives,
# lambda person: kt.selectors.bydept(person,depts)
# )
kdata = actives
# actives = kt.filterdata(raw, kt.selectors.allactives)
# engin = kt.filterdata(actives, kt.selectors.engineers)
# kdata = kt.filterdata(
# engin,
# lambda person: kt.selectors.hiredafter(person,NEW_HIRE_DATE)
# )
# import Registrar's Office Schedule information
sdata = kt.importrosched(RO_DATA_FILE)
# extract schedules for each person and add column to knack data
errors = []
for umid in kdata.keys():
person = kdata[umid]
lname = person['Name: Last']
# Grab all schedules with this person's last name
# (since that's the only info the registrar gives us)
try:
schedules = sdata[lname]
except KeyError:
TAB = 20-len(lname)
if TAB<1: TAB = 1
kdata[umid]['Schedule'] = ''
msg = 'Failed to find: \t"'+lname+'"'+' '*TAB+'in department: '+\
person['Department']
errors.append([lname,msg])
print(msg)
continue
# Choose most likely from all schedules with that last name
schedules = choose_schedule(person,schedules)
if not schedules:
TAB = 20-len(lname)
if TAB<1: TAB = 1
kdata[umid]['Schedule'] = ''
msg = 'Failed to choose:\t"'+lname+'"'+' '*TAB+'in department: '+\
person['Department']
errors.append([lname,msg])
print(msg)
continue
# Not sure how to deal with multiple results right now...
if len(schedules)>1:
TAB = 20-len(lname)
if TAB<1: TAB = 1
msg = 'Multiple schedules for:\t"'+lname+'"'+' '*TAB+'in department: '+\
person['Department']
errors.append([lname,msg])
continue
# Add to output data
s = schedules[0]
days = ''.join([s['M'],s['T'],s['W'],s['TH'],s['F'],s['S'],s['SU']])
kdata[umid]['Schedule - Days'] = days
empty_cols = dict()
empty_cols['Days'] = u''
for col in schedules[0].keys():
out_col = 'Schedule - '+col;
kdata[umid][out_col] = schedules[0][col]
empty_cols[out_col] = u''
# Add empty entries for people that have been skipped
for umid in kdata:
if not 'Schedule - Course Title' in kdata[umid]:
kdata[umid].update(empty_cols)
# Don't output anyone we don't have schedule info for
kdata_filtered = kt.filterdata(
kdata,
lambda person: kt.selectors.column_is_empty(person,'Schedule - Course Title')
)
print('Number of failures: '+str(len(errors)))
kt.writecsv_summary(errors, ERROR_FILE)
return kdata_filtered
def choose_schedule(person,schedules):
"""
Given a list of possible schedules, and a person from knack,
return the of schedules that are most likely for that person.
"""
if not person['Department']:
print(person)
dept_codes = kt.DEPT_TO_DEPTCODES[ person['Department'] ]
# Just naivly check the department for now
out = []
for s in schedules:
if s['Component'] == 'IND':
# Assume indep. studies are professors;
# filters out some duplicate names without incurring
# too many false negatives (hopefully)
continue
dcode = s['Subject'].split(' (')[1][:-1]
if dcode in dept_codes:
out.append(s)
return out
def make_human_readable(schedules):
"""
Given a list of schedules, output a list of strings of the form:
'EECS 527 DIS: MWF 3-430PM @ 1012 EECS'
"""
out = []
for s in schedules:
course = s['Subject'].split(' (')[1][:-1] + \
s['Catalog Nbr']
ctype = s['Component']
time = s['Time']
room = s['Location']
days = ''.join([s['M'],s['T'],s['W'],s['TH'],s['F'],s['S'],s['SU']])
this_sched = course+' ' + ctype+': ' + days+' ' + time+' @ ' + room
out.append(this_sched)
return out
| geo3550/theknack | scripts/get_schedules_for_import.py | get_schedules_for_import.py | py | 6,073 | python | en | code | 0 | github-code | 13 |
5188601156 | from typing import Optional, Tuple, Sequence, Mapping, Callable
from mdp_rl_tabular import MDPForRLTabular
from TD_zero import TD0
from helper_func import S, SAf, get_rv_gen_func_single, get_expected_action_value, get_epsilon_greedy_action
class TD_control(TD0):
def __init__(
self,
mdp_for_rl: MDPForRLTabular,
epsilon: float,
epsilon_half_life: float,
learning_rate: float,
learning_rate_decay: float,
num_episodes: int,
max_steps: int,
choice: bool
) -> None:
super().__init__(
mdp_for_rl,
epsilon,
epsilon_half_life,
learning_rate,
learning_rate_decay,
num_episodes,
max_steps)
self.choice = choice
# get both for Sarsa and Q-learning depends on the choice
def get_qv_func_dict(self) -> SAf:
sa_dict = self.mdp_rep.state_action_dict
qf_dict = {s: {a: 0.0 for a in v} for s, v in sa_dict.items()}
episodes = 0
updates = 0
while episodes < self.num_episodes:
state = self.mdp_rep.init_state_gen()
action = get_epsilon_greedy_action(qf_dict[state], self.epsilon)
steps = 0
terminate = False
while not terminate:
next_state, reward = self.mdp_rep.state_reward_gen_dict[state][action]()
# Sarsa
if self.choice == 0:
next_qv = get_expected_action_value(qf_dict[next_state], \
self.epsilon)
next_action = get_epsilon_greedy_action(qf_dict[next_state], \
self.epsilon)
# Q-learning
else:
next_qv = max(qf_dict[next_state][a] for a in
qf_dict[next_state])
next_action, next_qv = max(qf_dict[next_state].items(), \
key = lambda l:l[1])
qf_dict[state][action] += self.learning_rate *(updates / self.learning_rate_decay + 1) ** -0.5 *\
(reward + self.mdp_rep.gamma * next_qv -
qf_dict[state][action])
updates += 1
steps += 1
terminate = steps >= self.max_steps or \
state in self.mdp_rep.terminal_states
state = next_state
action = next_action
episodes += 1
return qf_dict | annie0808/cme241 | MP+MRP+MDP+RL/td_control.py | td_control.py | py | 2,706 | python | en | code | 1 | github-code | 13 |
23002251900 | number=int(input())
text=""
for x in range(number):
gap = " " * number
number -= 1
text=text+"*"
print(gap,text)
text=text+"*"
# ตัวอย่างของคนอื่น (บอกแล้ววิธีเขียนโปรแกรมมีหลายแบบ)
"""
number = int(input("กรอกตัวเลข : "))
print("จำนวน",number,"แถว")
for i in range(number):
print(" "*(number-i),"*"*(((i+1)*2)-1))
"""
"""
n = int(input())
space = n-1
star = 1
for i in range(n):
print(" "*space,end="")
print("*"*star)
star+=2
space-=1
""" | mrxtenten/CP3-Woravisudt-Rattanabenjapat | Exercise11_Woravisudt_R.py | Exercise11_Woravisudt_R.py | py | 617 | python | th | code | 0 | github-code | 13 |
14412218600 | # This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
from bpy.props import *
import abc
import itertools
from typing import Any, Dict, FrozenSet, Optional
from ... import helpers
class PlasmaModifierProperties(bpy.types.PropertyGroup):
@property
def allowed(self) -> bool:
"""Returns if this modifier is allowed to be enabled on the owning Object"""
allowed_page_types = getattr(self, "pl_page_types", {"room"})
allowed_object_types = getattr(self, "bl_object_types", set())
page_name = self.id_data.plasma_object.page
if not allowed_object_types or self.id_data.type in allowed_object_types:
if helpers.get_page_type(page_name) in allowed_page_types:
return True
return False
@property
def copy_material(self):
"""Materials MUST be single-user"""
return False
def created(self):
pass
def destroyed(self):
pass
@property
def draw_opaque(self):
"""Render geometry before the avatar"""
return False
@property
def draw_framebuf(self):
"""Render geometry after the avatar but before other blended geometry"""
return False
@property
def draw_no_defer(self):
"""Disallow geometry being sorted into a blending span"""
return False
@property
def draw_late(self):
return False
@property
def enabled(self) -> bool:
return self.display_order >= 0 and self.allowed
@enabled.setter
def enabled(self, value: bool) -> None:
plmods = self.id_data.plasma_modifiers
if value and self.enabled is False:
self.display_order = plmods.determine_next_id()
self.created()
# Determine if this modifier has any dependencies and make sure they're enabled
for dep in getattr(self, "pl_depends", set()):
getattr(plmods, dep).enabled = True
elif not value and self.enabled is True:
mods_to_delete = frozenset(itertools.chain([self.pl_id], self.get_dependents()))
enabled_mods = sorted(self.id_data.plasma_modifiers.modifiers, key=lambda x: x.display_order)
subtract = 0
for modifier in enabled_mods:
if modifier.pl_id in mods_to_delete:
modifier.display_order = -1
modifier.destroyed()
subtract += 1
else:
modifier.display_order -= subtract
def export(self, exporter, bo, so):
"""This is the main phase of the modifier export where most, if not all, PRP objects should
be generated. No new Blender objects should be created unless their lifespan is constrained
to the duration of this method.
"""
pass
# Commented out to prevent conflicts with TranslationMixin overload.
"""
def export_localization(self, exporter):
'''This is an auxiliary export phase that should only convert localization data. PRP objects
are in an undefined state and therefore should not be used.
'''
pass
"""
@property
def face_sort(self):
"""Indicates that the geometry's faces should be sorted by the engine"""
return False
@classmethod
def get_dependents(cls) -> FrozenSet[str]:
"""Returns the set of modifiers that depend on this modifier being active."""
deps = set()
for i in PlasmaModifierProperties.__subclasses__():
if cls.pl_id in getattr(i, "pl_depends", []):
deps.add(i.pl_id)
deps.update(i.get_dependents())
return frozenset(deps)
def harvest_actors(self):
return ()
@property
def key_name(self):
return self.id_data.name
@property
def no_face_sort(self):
"""Indicates that the geometry's faces should never be sorted by the engine"""
return False
@property
def no_span_sort(self):
"""Indicates that the geometry's Spans should never be sorted with those from other
Drawables that will render in the same pass"""
return False
# This is temporarily commented out to prevent MRO failure. Revisit in Python 3.7
'''
def pre_export(self, exporter, bo: bpy.types.Object) -> Generator:
"""This is the first phase of the modifier export; allowing modifiers to create additonal
objects or logic nodes to be used by the exporter. To do so, overload this method
and yield any Blender ID from your method. That ID will then be exported and deleted
when the export completes. PRP objects should generally not be exported in this phase.
"""
yield
'''
@property
def requires_actor(self):
"""Indicates if this modifier requires the object to be a movable actor"""
return False
# Guess what?
# You can't register properties on a base class--Blender isn't smart enough to do inheritance,
# you see... So, we'll store our definitions in a dict and make those properties on each subclass
# at runtime. What joy. Python FTW. See register() in __init__.py
_subprops = {
"display_order": (IntProperty, {"name": "INTERNAL: Display Ordering",
"description": "Position in the list of buttons",
"default": -1,
"options": {"HIDDEN"}}),
"show_expanded": (BoolProperty, {"name": "INTERNAL: Actually draw the modifier",
"default": True,
"options": {"HIDDEN"}}),
"current_version": (IntProperty, {"name": "INTERNAL: Modifier version",
"default": 1,
"options": {"HIDDEN"}}),
}
class PlasmaModifierLogicWiz:
def convert_logic(self, bo, **kwargs):
"""Creates, converts, and returns an unmanaged NodeTree for this logic wizard. If the wizard
fails during conversion, the temporary tree is deleted for you. However, on success, you
are responsible for removing the tree from Blender, if applicable."""
name = kwargs.pop("name", self.key_name)
assert not "tree" in kwargs
tree = bpy.data.node_groups.new(name, "PlasmaNodeTree")
kwargs["tree"] = tree
try:
self.logicwiz(bo, **kwargs)
except:
bpy.data.node_groups.remove(tree)
raise
else:
return tree
def _create_python_file_node(self, tree, filename: str, attributes: Dict[str, Any]) -> bpy.types.Node:
pfm_node = tree.nodes.new("PlasmaPythonFileNode")
with pfm_node.NoUpdate():
pfm_node.filename = filename
for attr in attributes:
new_attr = pfm_node.attributes.add()
new_attr.attribute_id = attr["id"]
new_attr.attribute_type = attr["type"]
new_attr.attribute_name = attr["name"]
pfm_node.update()
return pfm_node
def _create_python_attribute(self, pfm_node, attribute_name: str, attribute_type: Optional[str] = None, **kwargs):
"""Creates and links a Python Attribute Node to the Python File Node given by `pfm_node`.
This will automatically handle simple attribute types such as numbers and strings, however,
for object linkage, you should specify the optional `attribute_type` to ensure the proper
attribute type is found. For attribute nodes that require multiple values, the `value` may
be set to None and handled in your code."""
from ...nodes.node_python import PlasmaAttribute, PlasmaAttribNodeBase
if attribute_type is None:
assert len(kwargs) == 1 and "value" in kwargs, \
"In order to deduce the attribute_type, exactly one attribute value must be passed as a kw named `value`"
attribute_type = PlasmaAttribute.type_LUT.get(kwargs["value"].__class__)
node_cls = next((i for i in PlasmaAttribNodeBase.__subclasses__() if attribute_type in i.pl_attrib), None)
assert node_cls is not None, "'{}': Unable to find attribute node type for '{}' ('{}')".format(
self.id_data.name, attribute_name, attribute_type
)
node = pfm_node.id_data.nodes.new(node_cls.bl_idname)
node.link_output(pfm_node, "pfm", attribute_name)
for i, j in kwargs.items():
setattr(node, i, j)
return node
@abc.abstractmethod
def logicwiz(self, bo, tree):
pass
def pre_export(self, exporter, bo):
"""Default implementation of the pre_export phase for logic wizards that simply triggers
the logic nodes to be created and for their export to be scheduled."""
yield self.convert_logic(bo)
class PlasmaModifierUpgradable:
@property
@abc.abstractmethod
def latest_version(self):
raise NotImplementedError()
@property
def requires_upgrade(self):
current_version, latest_version = self.current_version, self.latest_version
assert current_version <= latest_version
return current_version < latest_version
@abc.abstractmethod
def upgrade(self):
raise NotImplementedError()
@bpy.app.handlers.persistent
def _restore_properties(dummy):
# When Blender opens, it loads the default blend. The post load handler
# below is executed and deprecated properties are unregistered. When the
# user goes to load a new blend file, the handler below tries to execute
# again and BOOM--there are no deprecated properties available. Therefore,
# we reregister them here.
for mod_cls in PlasmaModifierUpgradable.__subclasses__():
for prop_name in mod_cls.deprecated_properties:
# Unregistered propertes are a sequence of (property function,
# property keyword arguments). Interesting design decision :)
prop_cb, prop_kwargs = getattr(mod_cls, prop_name)
del prop_kwargs["attr"] # Prevents proper registration
setattr(mod_cls, prop_name, prop_cb(**prop_kwargs))
bpy.app.handlers.load_pre.append(_restore_properties)
@bpy.app.handlers.persistent
def _upgrade_modifiers(dummy):
# First, run all the upgrades
for i in bpy.data.objects:
for mod_cls in PlasmaModifierUpgradable.__subclasses__():
mod = getattr(i.plasma_modifiers, mod_cls.pl_id)
if mod.requires_upgrade:
mod.upgrade()
# Now that everything is upgraded, forcibly remove all properties
# from the modifiers to prevent sneaky zombie-data type export bugs
for mod_cls in PlasmaModifierUpgradable.__subclasses__():
for prop in mod_cls.deprecated_properties:
RemoveProperty(mod_cls, attr=prop)
bpy.app.handlers.load_post.append(_upgrade_modifiers)
| H-uru/korman | korman/properties/modifiers/base.py | base.py | py | 11,666 | python | en | code | 31 | github-code | 13 |
21092599387 | import requests
import sqlite3
from flask import jsonify
import shutil
#most popular
URL = "https://api.rawg.io/api/games?dates=2019-01-01,2019-12-31&ordering=-added" #page url
def getJson():
r = requests.get(url= URL) #get the json data from the website with an http request using get method
print(r)
return(r.json())
def downloadImage(id):
try :
sqliteConnection = sqlite3.connect('gamesDB.db') #connect to database
cursor = sqliteConnection.cursor()
cursor.execute(f"SELECT background_image,nome FROM mostPopular WHERE id = {id}") #get name and link from the db for background img download
fetch = cursor.fetchall()
print(f"---->{fetch}")
img_link = fetch[0][0] #split data in name and link
name = fetch[0][1]
except sqlite3.Error as error:
print("eccezione --> " + error)
finally:
if (sqliteConnection):
print('chiusura connessione con database')
sqliteConnection.close()
filename = f"{name}.jpeg" #format the name with underscore
filename = filename.replace(" ", "-")
filename = filename.replace(":", "-")
r = requests.get(url = img_link, stream = True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filename,'wb') as f: #save the image in a new file
shutil.copyfileobj(r.raw, f)
f.close()
else:
print("immagine non scaricata") #if the request doesn't work, print an error
def writeOnDB(data):
try:
sqliteConnection = sqlite3.connect('gamesDB.db')
cursor = sqliteConnection.cursor() #connect to db
cursor.execute("DELETE FROM mostPopular;")
for i in range(len(data['results'])): #save json data in single variables and then load on the db
slug = (data['results'][i]['slug'])
name = (data['results'][i]['name'])
released = (data['results'][i]['released'])
rating = (data['results'][i]['rating'])
rating_top = (data['results'][i]['rating_top'])
ratings_count = (data['results'][i]['ratings_count'])
reviews_text_count = (data['results'][i]['reviews_text_count'])
added = (data['results'][i]['added'])
suggestions_count = (data['results'][i]['suggestions_count'])
id = (data['results'][i]['id'])
reviews_count = (data['results'][i]['reviews_count'])
background_image = (data['results'][i]['background_image'])
cursor.execute(f"INSERT INTO mostPopular (nome,dataRilascio,rating,rating_top,ratings_count,reviews_text_count,added,suggestions_count,id,reviews_count,slug,background_image) VALUES ('{name}',{released},{rating},{rating_top},{ratings_count},{reviews_text_count},{added},{suggestions_count},{id},{reviews_count},'{slug}','{background_image}');")
sqliteConnection.commit()
except sqlite3.Error as error:
print("eccezione --> " + error)
finally:
if (sqliteConnection):
print('chiusura connessione con database')
sqliteConnection.close()
def GetDataFromDB():
try:
sqliteConnection = sqlite3.connect('gamesDB.db')
cursor = sqliteConnection.cursor()
cursor.execute("SELECT nome,dataRilascio,rating,id FROM mostPopular") #get data from the most popular games in the db
ranking = cursor.fetchall()
except sqlite3.Error as error:
print("eccezione --> " + error)
finally:
if (sqliteConnection):
print('Lettura eseguita: chiusura connessione con database\n')
sqliteConnection.close()
return ranking
def main():
while True:
selection = int(input("1 -> carica DB\n2->ottieni classifica\n3->ottieni immagine con id\n4->Esci\n>>>"))
if selection == 1:
writeOnDB(getJson())
if selection == 2:
ranking = GetDataFromDB()
for i,val in enumerate(ranking):
print(f"{i+1} posto --> nome: {val[0]}, rilasciato nel {val[1]}, rating: {val[2]}, ID = {val[3]}\n")
if selection == 3:
downloadImage(int(input("\n\ninserisci id del gioco da cercare: ")))
if selection == 4:
break
main()
| MatteoAllemandi/School | TPSIT/RequestGameApi/api_clientRequest.py | api_clientRequest.py | py | 4,503 | python | en | code | 0 | github-code | 13 |
17057833604 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PaymentSchedule(object):
def __init__(self):
self._date = None
self._repaid_interest_total = None
self._repaid_penalty_total = None
self._repaid_principal_total = None
self._start_date = None
self._term = None
self._unpaid_interest_total = None
self._unpaid_penalty_total = None
self._unpaid_principal_total = None
@property
def date(self):
return self._date
@date.setter
def date(self, value):
self._date = value
@property
def repaid_interest_total(self):
return self._repaid_interest_total
@repaid_interest_total.setter
def repaid_interest_total(self, value):
self._repaid_interest_total = value
@property
def repaid_penalty_total(self):
return self._repaid_penalty_total
@repaid_penalty_total.setter
def repaid_penalty_total(self, value):
self._repaid_penalty_total = value
@property
def repaid_principal_total(self):
return self._repaid_principal_total
@repaid_principal_total.setter
def repaid_principal_total(self, value):
self._repaid_principal_total = value
@property
def start_date(self):
return self._start_date
@start_date.setter
def start_date(self, value):
self._start_date = value
@property
def term(self):
return self._term
@term.setter
def term(self, value):
self._term = value
@property
def unpaid_interest_total(self):
return self._unpaid_interest_total
@unpaid_interest_total.setter
def unpaid_interest_total(self, value):
self._unpaid_interest_total = value
@property
def unpaid_penalty_total(self):
return self._unpaid_penalty_total
@unpaid_penalty_total.setter
def unpaid_penalty_total(self, value):
self._unpaid_penalty_total = value
@property
def unpaid_principal_total(self):
return self._unpaid_principal_total
@unpaid_principal_total.setter
def unpaid_principal_total(self, value):
self._unpaid_principal_total = value
def to_alipay_dict(self):
params = dict()
if self.date:
if hasattr(self.date, 'to_alipay_dict'):
params['date'] = self.date.to_alipay_dict()
else:
params['date'] = self.date
if self.repaid_interest_total:
if hasattr(self.repaid_interest_total, 'to_alipay_dict'):
params['repaid_interest_total'] = self.repaid_interest_total.to_alipay_dict()
else:
params['repaid_interest_total'] = self.repaid_interest_total
if self.repaid_penalty_total:
if hasattr(self.repaid_penalty_total, 'to_alipay_dict'):
params['repaid_penalty_total'] = self.repaid_penalty_total.to_alipay_dict()
else:
params['repaid_penalty_total'] = self.repaid_penalty_total
if self.repaid_principal_total:
if hasattr(self.repaid_principal_total, 'to_alipay_dict'):
params['repaid_principal_total'] = self.repaid_principal_total.to_alipay_dict()
else:
params['repaid_principal_total'] = self.repaid_principal_total
if self.start_date:
if hasattr(self.start_date, 'to_alipay_dict'):
params['start_date'] = self.start_date.to_alipay_dict()
else:
params['start_date'] = self.start_date
if self.term:
if hasattr(self.term, 'to_alipay_dict'):
params['term'] = self.term.to_alipay_dict()
else:
params['term'] = self.term
if self.unpaid_interest_total:
if hasattr(self.unpaid_interest_total, 'to_alipay_dict'):
params['unpaid_interest_total'] = self.unpaid_interest_total.to_alipay_dict()
else:
params['unpaid_interest_total'] = self.unpaid_interest_total
if self.unpaid_penalty_total:
if hasattr(self.unpaid_penalty_total, 'to_alipay_dict'):
params['unpaid_penalty_total'] = self.unpaid_penalty_total.to_alipay_dict()
else:
params['unpaid_penalty_total'] = self.unpaid_penalty_total
if self.unpaid_principal_total:
if hasattr(self.unpaid_principal_total, 'to_alipay_dict'):
params['unpaid_principal_total'] = self.unpaid_principal_total.to_alipay_dict()
else:
params['unpaid_principal_total'] = self.unpaid_principal_total
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PaymentSchedule()
if 'date' in d:
o.date = d['date']
if 'repaid_interest_total' in d:
o.repaid_interest_total = d['repaid_interest_total']
if 'repaid_penalty_total' in d:
o.repaid_penalty_total = d['repaid_penalty_total']
if 'repaid_principal_total' in d:
o.repaid_principal_total = d['repaid_principal_total']
if 'start_date' in d:
o.start_date = d['start_date']
if 'term' in d:
o.term = d['term']
if 'unpaid_interest_total' in d:
o.unpaid_interest_total = d['unpaid_interest_total']
if 'unpaid_penalty_total' in d:
o.unpaid_penalty_total = d['unpaid_penalty_total']
if 'unpaid_principal_total' in d:
o.unpaid_principal_total = d['unpaid_principal_total']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/PaymentSchedule.py | PaymentSchedule.py | py | 5,689 | python | en | code | 241 | github-code | 13 |
33568386288 | import boto3
import logging
import json
import io
import pandas as pd
from datetime import datetime
import pytz
bucket = "kmk-practice"
file_name = "KRX_holiday_calendar.csv"
s3 = boto3.client('s3')
obj = s3.get_object(Bucket= bucket, Key= file_name)
df = pd.read_csv(obj['Body'])
list = list(df['일자 및 요일'])
#print(list)
tz = pytz.timezone('Asia/Seoul')
raw_dates = datetime.now(tz)
today = raw_dates.strftime('%Y-%m-%d')
# setup simple logging for INFO
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# define the connection
ec2 = boto3.resource('ec2', region_name='ap-northeast-2')
def lambda_handler(event, context):
# all stopped EC2 instances.
filters = [{
'Name': 'tag:AutoStart',
'Values': ['True']
},
{
'Name': 'instance-state-name',
'Values': ['stopped']
}
]
# filter the instances
instances = ec2.instances.filter(Filters=filters)
# locate all stopped instances
RunningInstances = [instance.id for instance in instances]
# print StoppedInstances
if today not in list: #KRX 휴장일 아니면 EC2 작동.
if len(RunningInstances) > 0:
# perform the startup
AutoStarting = ec2.instances.filter(InstanceIds=RunningInstances).start()
print("AutoStarting")
else:
print("Nothing to see here")
| data04190/AWS_KRX_AutoCrawler | Lambda/EC2_AutoStart.py | EC2_AutoStart.py | py | 1,408 | python | en | code | 0 | github-code | 13 |
30918123073 | # import package:numpy、pandas
import numpy as np
import datetime
import sys
import random as rand
def semi_km(ori, label, label_data, label_2, max_iter):
attr_num = len(ori[0]) # number of attributes
tup_num = len(ori) # number of tuples
labeled_num = len(label_data) # number of labeled samples
label_uni = list(set(label_2)) # total classes
n_clusters = len(label_uni) # k = number of classes
clu_arr = [tup_num] # save the cluster of each tuple
mean_arr = [] # save the mean position
label_arr = [] # save the clusters of labeled data
change_num = tup_num + 1 # number of changed points in each iter
# dist_arr = [n_clusters]
# count_arr = [] # save the total number of each cluster
# ===========================initial ==========================
for j in range(0, labeled_num):
label_arr.append(-1)
for i in range(0, n_clusters):
sum_label = []
sum_num = 0
for i2 in range(0, attr_num):
sum_label.append(0)
for j in range(0, labeled_num):
if label_arr[j] == -1:
if label_2[j] == label_uni[i]:
sum_label = np.add(sum_label, label_data[j])
label_arr[j] = i
sum_num += 1
mean_arr.append(sum_label/sum_num)
# print(mean_arr)
for i in range(0, tup_num):
clu_arr.append(0)
# =========================iteration ================================
iter_num = 0
change_con = int(tup_num*0.001)
while iter_num < max_iter and change_num > 1:
clu_arr, change_num = assign(mean_arr, ori, clu_arr)
# update
mean_arr = update(ori, clu_arr, n_clusters, label_data, label_arr)
iter_num += 1
# print("iter_num")
# print(change_num)
# print(iter_num)
SSE2 = 0
for i in range(0, tup_num):
SSE2 += np.square(np.linalg.norm((ori[i] - mean_arr[clu_arr[i]])))
# print(SSE2)
# =========================accuracy ================================
clu_class = label_uni
print(clu_class)
# compute the accuracy
num_t = 0
for i in range(0, tup_num):
if clu_class[clu_arr[i]] == label[i]:
num_t +=1
for j in range(0, labeled_num):
num_t += 1
accuracy = num_t/(tup_num + labeled_num)
print("Accuracy is : " )
print(accuracy)
# =========================SSE ================================
SSE = 0
for i in range(0, tup_num):
SSE += np.square(np.linalg.norm((ori[i] - mean_arr[clu_arr[i]])))
print("SSE is:")
print(SSE)
return SSE
# =========================assign================================
def assign(mean_arr, ori, pre_clu):
tup_num = len(ori)
n_clusters = len(mean_arr)
clu_arr = [] # save the cluster of each tuple
change_num = 0 # number of changed points in each iter
for i in range(0, tup_num):
clu_arr.append(0)
for i in range(0, tup_num):
min_in = -1
# min_dist = np.linalg.norm((ori[i] - mean_arr[0]))
# min_dist = np.dot(ori[i], mean_arr[0])
# min_dist = np.dot(ori[i], mean_arr[0])/(np.linalg.norm(ori[i])*(np.linalg.norm(mean_arr[0])))
min_dist = 999999
for j in range(0, n_clusters):
# dist = np.linalg.norm((ori[i] - mean_arr[j]))
dist = np.linalg.norm((ori[i] - mean_arr[j]), ord=1)
# dist = np.dot(ori[i], mean_arr[j])
# dist = np.dot(ori[i], mean_arr[j]) / (np.linalg.norm(ori[i]) * (np.linalg.norm(mean_arr[j])))
# print("dist")
# print(dist)
if dist < min_dist:
min_in = j
min_dist = dist
# print(min_dist)
clu_arr[i] = min_in
if clu_arr[i] != pre_clu[i]:
change_num += 1
return clu_arr, change_num
# =========================update================================
def update(ori, clu_arr, n_clusters, label_data, label_arr):
mean_arr = []
for i in range(0, n_clusters):
sum_num = 0
sum_arr = np.zeros(len(ori[0]))
for j in range(0, len(ori)):
if clu_arr[j] == i:
sum_num += 1
sum_arr = np.add(sum_arr, ori[j])
# print(sum_arr)
for j2 in range(0, len(label_data)):
if label_arr[j2] == i:
sum_num += 1
sum_arr = np.add(sum_arr, label_data[j2])
mean_arr.append(sum_arr/sum_num)
return mean_arr
def main(argv):
if len(argv) < 4:
print("請按照以下格式輸入: unlabeled_data labeled_data max_iteration")
return
if argv[1] == 'iris_unlabeled.csv':
data = np.loadtxt(argv[1], delimiter=",", skiprows=1, usecols=(0, 1, 2, 3))
labels = np.loadtxt(argv[1], delimiter=",", dtype=str, usecols=(4))
labeled_data = np.loadtxt(argv[2], delimiter=",", skiprows=1, usecols=(0, 1, 2, 3))
labeled = np.loadtxt(argv[2], delimiter=",", dtype=str, usecols=(4))
else:
data = np.loadtxt(argv[1], delimiter=",", skiprows=1, usecols=(1, 2, 3, 4, 5, 6, 7))
labels = np.loadtxt(argv[1], delimiter=",", dtype=str, usecols=(0))
labeled_data = np.loadtxt(argv[2], delimiter=",", skiprows=1, usecols=(1, 2, 3, 4, 5, 6, 7))
labeled = np.loadtxt(argv[2], delimiter=",", dtype=str, usecols=(0))
iteration = int(argv[3]) # 最大迭代次數
start_time = datetime.datetime.now()
model = semi_km(data, labels, labeled_data, labeled, iteration)
end_time = datetime.datetime.now()
print("time cost is:")
print(end_time - start_time)
if __name__=='__main__':
main(sys.argv)
| zkxshg/Test_of_machine_learning | Cluster/Cluster_semi_k_means_path.py | Cluster_semi_k_means_path.py | py | 5,867 | python | en | code | 0 | github-code | 13 |
24268125556 | import tensorflow as tf
import numpy as np
from models.unsupervised.autoencoders import dense_encoder,dense_decoder
from models.supervised.applications import ConditioningLayer
import matplotlib.pyplot as plt
input_shape=(224,224,3)
latent_dim=15
class CVAE(tf.keras.Model):
def __init__(self,latent_dim,input_shape,loss="elbo",layers_decoder=5,l2=0.0005,weights=None):
super(CVAE,self).__init__()
self.latent_dim=latent_dim
self.loss=loss
self.inference_net=dense_encoder(input_shape,
latent_dim=(latent_dim+latent_dim) if loss=="elbo" else latent_dim,
l2=l2,weights=weights)
self.generative_net=dense_decoder(input_shape,latent_dim=latent_dim,layers=layers_decoder,l2=l2,activation='tanh')
def call(self,inputs,training=False):
z=self.embed(inputs,training=training)
return self.decode(z,training=training)
def sample(self,eps=None):
if eps is None:
eps=tf.random.normal(shape=(100,self.latent_dim))
return self.decode(eps)
def encode(self,x,training=False):
if self.loss=="elbo":
mean,logvar=tf.split(self.inference_net(x,training=training),num_or_size_splits=2,axis=1)
return mean,logvar
else:
return self.inference_net(x,training=training)
def reparameterize(self,mean,logvar):
eps=tf.random.normal(shape=mean.shape)
return eps*tf.exp(logvar * .5) + mean
def embed(self,x,training=False):
if self.loss=="elbo":
mean, logvar = self.encode(x, training=training)
return self.reparameterize(mean,logvar)
else:
return self.encode(x,training=training)
def reconstruct(self,x):
z=self.embed(x,training=False)
return self.decode(z,training=False)
def decode(self,z,training=False):
logits=self.generative_net(z,training=training)
return logits
class Classifier(tf.keras.Model):
def __init__(self,model,n_out=5,l2=0.0005,
# dropout=0.05
):
super(Classifier,self).__init__()
self.loss=model.loss
self.inference_net_=model.inference_net
inf_net_input=model.inference_net.input
inf_net_output=model.inference_net.get_layer("ConvnetGlobalAveragePooling").output
self.inference_net=tf.keras.Model(inputs=inf_net_input,outputs=inf_net_output)
self.DenseLayer=tf.keras.layers.Dense(n_out,name="predictions",activation= "softmax" ,
kernel_regularizer=tf.keras.regularizers.l2(l2),
kernel_initializer=tf.keras.initializers.he_uniform(),
)
def call(self,inputs, *args, **kwargs):
x=self.inference_net(inputs)
x=self.DenseLayer(x)
return x
def split_sample(self,inputs):
mean, logvar = tf.split(inputs, num_or_size_splits=2, axis=1)
eps = tf.random.normal(tf.shape(mean))
x = eps * tf.exp(logvar * .5) + mean
return x
def freeze_conv_layers(self):
for l in self.inference_net.layers:
l.trainable=False
def unfreeze_conv_layers(self,last=None):
for l in self.inference_net.layers:
l.trainable=True
class ClassifierEnsemble(tf.keras.Model):
def __init__(self,model,n_out=5,n_raters=18,n_embedding=16,l2=0.0005,l2_embeddings=0.0005,
# dropout=0.05,
conditioning="multiply",
activation_conditioning="sigmoid"
):
super(ClassifierEnsemble,self).__init__()
self.loss=model.loss
self.inference_net_=model.inference_net
inf_net_input = model.inference_net.input
inf_net_output = model.inference_net.get_layer("ConvnetGlobalAveragePooling").output
self.inference_net=tf.keras.Model(inputs=inf_net_input,outputs=inf_net_output)
#Embeddings
self.embedding_layer = tf.keras.layers.Embedding(input_dim=n_raters, output_dim=n_embedding,
input_length=None,
embeddings_initializer=tf.keras.initializers.he_uniform(),
# embeddings_initializer=tf.keras.initializers.RandomNormal(
# stddev=0.01),
embeddings_regularizer=tf.keras.regularizers.l2(l2_embeddings),
)
self.reshape_layer = tf.keras.layers.Flatten()
self.conditioning_layer=ConditioningLayer(conditioning=conditioning,l2=l2_embeddings,name="Conditioning_Layer",
activation=activation_conditioning)
self.logits = tf.keras.layers.Dense(n_out, activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(l2),
kernel_initializer=tf.keras.initializers.he_uniform(),
name="logits")
def call(self,inputs, *args, **kwargs):
im, emb = inputs
x1 = self.inference_net(im)
x2 = self.embedding_layer(emb)
x2 = self.reshape_layer(x2)
x=self.conditioning_layer([x1,x2])
x = self.logits(x)
return x
def embed_images(self,images):
return self.inference_net(images)
def embed_reviewer(self,rev):
return self.embedding_layer(rev)
class PerceptionLoss:
def __init__(self):
model=tf.keras.applications.vgg16.VGG16(input_shape=(224,224,3),weights='imagenet',include_top=False)
model.trainable=False
for layer in model.layers:
layer.trainable=False
self.feature_model=tf.keras.Model(inputs=model.input,outputs=model.get_layer("block1_pool").output)
def extract_features(self,x):
return self.feature_model(x)
def loss(self,x,y):
f_x=self.extract_features(x)
f_y=self.extract_features(y)
diff=tf.square(f_x-f_y)
diff2=tf.reduce_sum(diff,axis=[1,2,3])
l=tf.sqrt(tf.reduce_mean(diff2))
return l
perceptual_loss=PerceptionLoss()
def compute_kernel(x, y):
x_size = tf.shape(x)[0]
y_size = tf.shape(y)[0]
dim = tf.shape(x)[1]
tiled_x = tf.tile(tf.reshape(x, tf.stack([x_size, 1, dim])), tf.stack([1, y_size, 1]))
tiled_y = tf.tile(tf.reshape(y, tf.stack([1, y_size, dim])), tf.stack([x_size, 1, 1]))
return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32))
def compute_mmd(x, y):
x_kernel = compute_kernel(x, x)
y_kernel = compute_kernel(y, y)
xy_kernel = compute_kernel(x, y)
return tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel)
def compute_kl(mean,logvar):
return 0.5*tf.reduce_sum(
tf.exp(logvar)+tf.square(mean)-1-logvar
)
def log_normal_pdf(sample,mean,logvar,raxis=1):
log2pi=tf.math.log(2. * np.pi)
return tf.reduce_sum(
-.5*((sample-mean)**2 * tf.exp(-logvar)+ logvar + log2pi),
axis=raxis
)
def compute_loss(model,x,beta=1,sigmoid=False,training=False,per_pixel=True):
if model.loss=="elbo":
mean,logvar=model.encode(x,training=training)
z=model.reparameterize(mean,logvar)
else:
z=model.embed(x,training=training)
x_hat=model.decode(z,training=training)
if sigmoid:
x_binary = tf.cast(x > 0.5, tf.float32)
recon_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_hat, labels=x_binary)
logpx_z = tf.reduce_sum(recon_loss, axis=[1, 2, 3]) + tf.add_n(model.losses)
else:
recon_loss = tf.losses.mean_squared_error(x, x_hat)
logpx_z = tf.reduce_sum(recon_loss, axis=[1, 2, ]) + tf.add_n(model.losses)
if not per_pixel:
perc_loss=perceptual_loss.loss(x,x_hat)
logpx_z=logpx_z+perc_loss
if model.loss == "elbo":
loss=tf.reduce_mean(logpx_z)+beta*compute_kl(mean,logvar)
elif model.loss=="mmd":
true_samples=tf.random.normal(z.shape)
mmd_loss=compute_mmd(true_samples,z)
loss=tf.reduce_mean(logpx_z)+beta*mmd_loss
return loss
def compute_gradients(model,x,beta=1,sigmoid=False,training=True,per_pixel=True):
with tf.GradientTape() as tape:
loss=compute_loss(model,x,beta=beta,sigmoid=sigmoid,training=training,per_pixel=per_pixel)
return tape.gradient(loss,model.trainable_variables),loss
def apply_gradients(optimizer,gradients,variables):
optimizer.apply_gradients(zip(gradients,variables))
def train_one_step(model,x,optimizer,norm,beta=1,sigmoid=False,training=True,per_pixel=True):
grad,loss=compute_gradients(model,x,beta=beta,sigmoid=sigmoid,training=training,per_pixel=per_pixel)
grad_norm=[tf.clip_by_norm(t,norm) for t in grad]
apply_gradients(optimizer,grad_norm,model.trainable_variables)
return grad,grad_norm,loss
def generate_and_save_images(model,test_input,display=True,filename=None,transform_fun=None):
predictions=model.sample(test_input)
if transform_fun is not None:
predictions=transform_fun(predictions)
fig=plt.figure(figsize=(7,7))
for i in range(predictions.shape[0]):
plt.subplot(4,4,i+1)
plt.imshow(predictions[i,:,:,0],cmap="gray")
plt.axis("off")
if filename is not None: plt.savefig(filename)
if display: plt.show()
return fig
def plot_reconstruction(model,data,n_images=5,replications=6,shape=(224,224,1),transform_fun=None,show=True):
h,w,_=shape
output=np.zeros((n_images*h,(replications+1)*w))
for i,image in enumerate(data.take(n_images).batch(1)):
output[i*h:(i+1)*h,0:w]=image.numpy()[0,:,:,0]
for j in range(1,replications+1):
dec_image = model.reconstruct(image)
output[i*h:(i+1)*h,j*w:(j+1)*w]=dec_image[0,:,:,0]
fig=plt.figure(figsize=(12,12))
if transform_fun: output=transform_fun(output)
plt.imshow(output,cmap="gray",vmin=0,vmax=1)
plt.axis("off")
if show: plt.show()
return fig
#
| pmwaniki/perch-analysis | models/unsupervised/vae.py | vae.py | py | 10,330 | python | en | code | 0 | github-code | 13 |
37792705995 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from functools import partial
from itertools import groupby
from odoo import api, fields, models, SUPERUSER_ID, _
from odoo.exceptions import AccessError, UserError, ValidationError, Warning
from odoo.tools.misc import formatLang, get_lang
from odoo.osv import expression
from odoo.tools import float_is_zero, float_compare
class PurchaseOrderForStudents(models.Model):
_inherit = "purchase.order"
acumulated_amount = fields.Monetary(string='Acumulated amount', readonly=True, compute="_compute_acumulated_amount")
partner_yearly_wht_forecast_amount = fields.Monetary(related="partner_id.yearly_wht_forecast_amount")
exempt_upto = fields.Monetary(string='Exempt upto', readonly=True, compute="_compute_exempt_upto")
exempt_upto_tax_percent = fields.Float(string='Exempt upto', readonly=True, compute="_compute_exempt_upto_tax_percent")
def _compute_exempt_upto_tax_percent(self):
for purchase in self:
tax_rate_ids = purchase.partner_id.tax_category_id.tax_rate_ids.filtered(
lambda self: purchase.partner_id.status_wht_id == self.status).sorted("exempt_upto", True)
tax_id = False
amount_to_compare = purchase.acumulated_amount
yearly_wht_forecast_amount = purchase.partner_id.yearly_wht_forecast_amount
if yearly_wht_forecast_amount > 0:
amount_to_compare = yearly_wht_forecast_amount
if tax_rate_ids:
for tax_aux_id in tax_rate_ids:
if amount_to_compare < tax_aux_id.exempt_upto:
continue
else:
tax_id = tax_aux_id
if tax_id:
purchase.exempt_upto_tax_percent = tax_id.tax_id.amount
else:
purchase.exempt_upto_tax_percent = 0.0
def _compute_exempt_upto(self):
for purchase in self:
tax_rate_ids = purchase.partner_id.tax_category_id.tax_rate_ids.filtered(
lambda self: purchase.partner_id.status_wht_id == self.status).sorted("exempt_upto", True)
tax_id = False
amount_to_compare = purchase.acumulated_amount
yearly_wht_forecast_amount = purchase.partner_id.yearly_wht_forecast_amount
if yearly_wht_forecast_amount > 0:
amount_to_compare = yearly_wht_forecast_amount
if tax_rate_ids:
for tax_aux_id in tax_rate_ids:
if amount_to_compare < tax_aux_id.exempt_upto:
continue
else:
tax_id = tax_aux_id
if tax_id:
purchase.exempt_upto = tax_id.exempt_upto
else:
purchase.exempt_upto = 0.0
def _compute_acumulated_amount(self):
for purchase in self:
company_id = self.env["res.company"].browse(self._context.get("allowed_company_ids"))
fiscal_year_range = company_id.compute_fiscalyear_dates( datetime.now())
# all_partner_purchase = self.env["purchase.order"].search([("partner_id", "=", purchase.partner_id.id), ("company_id", "=", company_id.id)]).filtered(
# lambda self: fiscal_year_range["date_from"] < self.date_order < fiscal_year_range["date_to"])
all_partner_bills = self.env["account.move"].search([
("type","in",["in_invoice","in_refund"]),
("partner_id","=",purchase.partner_id.id),
("state","in",["draft","posted"]),
("invoice_date",">=",fiscal_year_range["date_from"]),
("invoice_date","<=",fiscal_year_range["date_to"]),
("company_id","=",company_id.id),
])
amount_purchases_sum = -sum([record.amount_untaxed_signed for record in all_partner_bills]) + purchase.amount_untaxed
purchase.acumulated_amount = amount_purchases_sum
@api.model
def create(self, vals):
purchase_ids = super().create(vals)
for purchase in purchase_ids:
# We get the taxes from the contact
amount_purchases_sum = purchase.acumulated_amount
tax_rate_ids = purchase.partner_id.tax_category_id.tax_rate_ids.filtered(
lambda self: purchase.partner_id.status_wht_id == self.status).sorted("exempt_upto", True)
amount_to_compare = amount_purchases_sum
yearly_wht_forecast_amount = purchase.partner_id.yearly_wht_forecast_amount
if yearly_wht_forecast_amount > amount_to_compare:
amount_to_compare = yearly_wht_forecast_amount
if tax_rate_ids and amount_to_compare:
tax_id = False
for tax_aux_id in tax_rate_ids:
if amount_to_compare < tax_aux_id.exempt_upto:
continue
else:
tax_id = tax_aux_id
if tax_id:
purchase.order_line.write(
{"taxes_id": [(4, tax_id.tax_id.id, 0)]})
return purchase_ids
def write(self, vals):
purchase_ids = super().write(vals)
for purchase in self:
# We get the taxes from the contact
amount_purchases_sum = purchase.acumulated_amount
tax_rate_ids = purchase.partner_id.tax_category_id.tax_rate_ids.filtered(
lambda self: purchase.partner_id.status_wht_id == self.status).sorted("exempt_upto", True)
amount_to_compare = amount_purchases_sum
yearly_wht_forecast_amount = purchase.partner_id.yearly_wht_forecast_amount
if yearly_wht_forecast_amount > amount_to_compare:
amount_to_compare = yearly_wht_forecast_amount
if tax_rate_ids and amount_to_compare:
tax_id = False
for tax_aux_id in tax_rate_ids:
if amount_to_compare < tax_aux_id.exempt_upto:
continue
else:
tax_id = tax_aux_id
if tax_id:
purchase.order_line.write(
{"taxes_id": [(4, tax_id.tax_id.id, 0)]})
# raise Warning("Message")
# mess = {
# 'title': _('Not enough inventory!'),
# 'message': _("Wtf?")
# }
# return {'warning': mess}
return purchase_ids
| MattPedrosa/haque-13 | pakistan_wht/models/purchase_order.py | purchase_order.py | py | 6,664 | python | en | code | 0 | github-code | 13 |
31277851485 |
import logging
import db_api as db
from swapper import swap
import configparser
from telegram import KeyboardButton, ReplyKeyboardMarkup, Update
from telegram.ext import (
Application,
CommandHandler,
ContextTypes,
ConversationHandler,
MessageHandler,
filters,
)
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
config = configparser.ConfigParser()
config.read("config.ini")
RES_IMG = range(1)
TOKEN = config.get("telegram_bot", "token")
IMG_TARGET = "img_target"
IMG_RES = "img_res"
IMG_RESULT = "img_result"
PREFIX = config.get("files", "prefix")
async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
"""Команда /start знакомит пользовтеля с функционалом бота"""
db.add_user(str(update.message.chat_id))
keyboard = [
[
KeyboardButton("/start", callback_data="1"),
KeyboardButton("/cancel", callback_data="2"),
],
]
reply_markup = ReplyKeyboardMarkup(keyboard)
await update.message.reply_text(
"Просто пришли мне фото, откуда мы будем вырезать лицо.",
reply_markup=reply_markup
)
async def target_img(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Сохронить присланное целевое изображение и запросить ресурсное изоюражение"""
user = update.message.from_user
photo_file = await update.message.photo[-1].get_file()
chat_id = update.message.chat_id
img_url = PREFIX+"target_"+str(update.message.chat_id)+".jpg"
await photo_file.download(img_url)
db.update_img(str(chat_id), IMG_TARGET, img_url)
await update.message.reply_text(
"Отлично! Теперь пришли мне фото, где нужно заменить лицо."
)
return RES_IMG
async def res_img(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Сохронить присланное ресурсное изображение и отправить резльтут"""
user = update.message.from_user
photo_file = await update.message.photo[-1].get_file()
chat_id = update.message.chat_id
img_url = PREFIX+"res_"+str(update.message.chat_id)+".jpg"
await photo_file.download(img_url)
db.update_img(str(chat_id), IMG_RES, img_url)
await update.message.reply_text(
"Готово! Вот Результат:"
)
print(db.get_target_img(str(chat_id)))
img_url = swap(db.get_target_img(str(chat_id)), img_url, str(chat_id))
await context.bot.send_photo(chat_id, photo=open(PREFIX+"result_"+str(chat_id)+".jpg", 'rb'))
return ConversationHandler.END
async def cancel(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Закончить диолог"""
user = update.message.from_user
await update.message.reply_text(
"Отменено..."
)
return ConversationHandler.END
def main() -> None:
"""Запустить бота"""
application = Application.builder().token(TOKEN).build()
conv_handler = ConversationHandler(
entry_points=[MessageHandler(filters.PHOTO, target_img)],
states={
RES_IMG: [MessageHandler(filters.PHOTO, res_img)],
},
fallbacks=[CommandHandler("cancel", cancel), CommandHandler("start", start)],
)
start_handler = CommandHandler("start", start)
application.add_handler(conv_handler)
application.add_handler(start_handler)
application.run_polling()
if __name__ == "__main__":
main() | maratsher/FaceSwapperBot | bot.py | bot.py | py | 3,737 | python | ru | code | 1 | github-code | 13 |
20082270452 | # TRS-80 MC-10 Micro Color Computer
# This code is part of the process to convert a .vb file to .wav 'cassette' file for the MC-10
# Step 1: C10Builder.py: Convert .vb code to .C10 format
# Step 2: C10ToWav.py: Convert .C10 code to .WAV format
# This file covers step 1
# Albert M Thalheim
# January 2021
# Description from the MC-10 Service Manual
# The standard MC-10 tape is composed of the following items:
# 1. A leader consisting of 128 bytes of hex 55
# 2. A Namefile block (21 bytes)
# 3. A blank section of tape approximately equal to 0.5 seconds in length; this allows BASIC time to evaluate the Namefile.
# 4. A second leader of 128 bytes of Hex 55
# 5. One or more Data blocks
# 6. An End of File block (6 bytes)
# ==============================================================
# The 'C10' file contains all the above EXCEPT the blank section
# Conversion to 'WAV' file SHOULD INSERT the blank section.
# ==============================================================
# (Description continues)
# The block format for Data, Namefile or EndOfFile blocks is as follows:
# 1. One leader byte - 55H
# 2. One sync byte - 3CH
# 3. One block type byte:
# 00H - Namefile
# 01H = Data
# FFH =End of File
# 4. One block length byte - 00H to FFH
# 5. Data - 0 to 255 bytes
# 6. One checksum byte - the sum of all the data plus block type and block length
# 7. One leader byte - 55H
# The Namefile block is a standard block with a length of 15 bytes (0FH) and the block type equals 00H.
# The 15 bytes of data provide information to BASIC and are employed as described below:
# 1. Eight bytes for the program name
# 2. One file type byte:
# 00H = BASIC
# 01H = Data
# 02H = Machine Language
# 3. One ASCII flag byte:
# 00H = Binary
# FFH =ASCII
# 4. One Gap flag byte:
# 01H = Continuous
# FFH= Gaps
# 5. Two bytes for the start address of a machine language program
# 6. Two bytes for the load address of a machine language program
#The End of File block is a standard block with a length of 0 and the block type equal to FFH.
# (Description ends)
# Global variables
# Program's name
programName = ''
# Output file path
c10Filepath = ''
# Previous code line number, to ensure a logical order is maintained
previousLineNo = -1
# According to the MC10 memory map, x4346 (17222) is the usual start ob BASIC programs:
memoryAddress = 17222
# Conversion table from text to byte code
mc10Codes = {}
# VB code scanned, validated and converted to a byte array
C10CodeBytes = bytearray()
def main():
global c10Filepath
global programName
getMC10VbCodes()
# Select .vb file
from tkinter.filedialog import askopenfilename
vbFilepath = askopenfilename()
if (vbFilepath == ''):
from tkinter import messagebox
messagebox.showinfo('Error', 'No file selected.')
exit()
# Set C10 filepath (same directory)
lastIndex = vbFilepath.rindex('.')
extension = vbFilepath[lastIndex:]
if (extension.upper() != '.VB'):
from tkinter import messagebox
messagebox.showinfo('Error', 'Expected format is .vb\nWas provided with ' + extension)
exit()
vbFileRoot = vbFilepath[:lastIndex]
c10Filepath = vbFileRoot + '.C10'
# Set C10 program filename
programName = vbFileRoot.rsplit('/', 1).pop()
programName = programName[:8]
programName = programName.upper()
getCodeLines(vbFilepath)
# End of code delimitation
C10CodeBytes.extend(b'\x00')
C10CodeBytes.extend(b'\x00')
buildAndExportC10Bytes()
from tkinter import messagebox
messagebox.showinfo('Done', 'Conversion complete.')
# End of main code
#==========================================================
# Step 1: Format code lines (lineNo_space_code) to (memAddress_lineNo_code) into a byte array
# 1.a) Process all code lines
def getCodeLines(txtFilepath):
global C10CodeBytes
global memoryAddress
# Process file text one line at a time
with open(txtFilepath, 'r', encoding='ascii') as f:
codeLines = f.readlines()
for codeLine in codeLines:
codeLine = codeLine.strip()
if codeLine == '':
# Skip empty lines
codeLine = ''
elif codeLine.startswith('#'):
# Skip comments
codeLine = ''
else:
codeFragment = buildByteLine(codeLine)
# Next line start address
memoryAddress += len(codeFragment)
first = memoryAddress // 256
last = memoryAddress % 256
codeFragment.insert(0, first)
codeFragment.insert(1, last)
# Add code bytes to global array
C10CodeBytes.extend(codeFragment)
# 1.b) Process single code line
def buildByteLine(codeLine):
global previousLineNo
# Step 1: Get and validate code line number
# a) Get line number:
firstSpaceIndex = codeLine.index(' ')
lineNo = int(codeLine[:firstSpaceIndex])
# b) Remove line number from codeLine:
codeLine = codeLine[firstSpaceIndex:]
codeLine = codeLine.strip()
# c) Validate number: exit when invalid
if (lineNo <= previousLineNo):
from tkinter import messagebox
messagebox.showinfo('Error', 'LineNo ' + str(lineNo) + ' follows lineNo ' + str(previousLineNo))
exit()
# d) Keep reference of last line No
previousLineNo = lineNo
# Start process
codeFragment = bytearray()
# Line number
first = lineNo // 256
last = lineNo % 256
codeFragment.extend(first.to_bytes(1, 'big'))
codeFragment.extend(last.to_bytes(1, 'big'))
# Code
while codeLine != '':
if (codeLine.startswith('"')):
lastIndex = codeLine.index('"',1)
codeFragment.extend(str.encode(codeLine[:lastIndex + 1]))
codeLine = codeLine[lastIndex + 1:]
else:
codeWord = getCodeWord(codeLine)
if (codeWord is None):
codeFragment.extend(str.encode(codeLine[:1]))
codeLine = codeLine[1:]
else:
codeFragment.extend(mc10Codes[codeWord])
if len(codeWord) == len(codeLine):
codeLine = ''
else:
codeLine = codeLine[len(codeWord):]
codeLine = codeLine.strip()
# End of code line
codeFragment.extend(b'\x00')
# return fragment
return codeFragment
def getMC10VbCodes():
global mc10Codes
# Get MC10 Codes: (keyword: binary value)
mc10Codes = {}
with open('.\MC10-Codes.txt', 'r') as f:
Lines = f.readlines()
for line in Lines:
line = line.strip()
if line == '':
line = ''
elif line.startswith('#'):
line = ''
else:
values = line.split('\t')
mc10Codes[values[1]] = bytes.fromhex(values[0])
# 1.c) scan code line (at start position) for codeWord
def getCodeWord(codeLine):
# Code line to uppercase
upperCodeLine = codeLine.upper()
for codeWord in mc10Codes:
if (upperCodeLine.startswith(codeWord)):
return codeWord
return None
#==========================================================
# Step 2: Build C10 data
# Cut data in 255 bytes chunks
# Build a data block from each chunk
# Append each chunk to the dataBytes array
def buildC10Data():
dataBytes = bytearray()
for i in range(0,len(C10CodeBytes),255):
dataLength = min(255, len(C10CodeBytes)-i)
dataEnd = (i + dataLength)
dataBlock = bytearray()
# Data type:1
dataBlock.extend(b'\x01')
# Data length
dataBlock.extend(dataLength.to_bytes(1, 'big'))
# Data
dataBlock.extend(C10CodeBytes[i:dataEnd])
# Build data block and append to data bytes array
dataBytes.extend(buildBlock(dataBlock))
return dataBytes
#==========================================================
# Step 3: Export C10 data
# 3.a)
def buildAndExportC10Bytes():
C10Bytes = bytearray()
C10Bytes.extend(buildLeaderOf55s())
C10Bytes.extend(buildC10Header())
C10Bytes.extend(buildLeaderOf55s())
C10Bytes.extend(buildC10Data())
C10Bytes.extend(buildBlock(bytes([0xff, 0x00])))
with open(c10Filepath, 'w+b') as f:
f.write(C10Bytes)
# 3.b)
def buildLeaderOf55s():
leaderOf55s = bytearray()
for i in range(0, 128):
leaderOf55s.extend(b'\x55')
return leaderOf55s
# 3.c)
def buildC10Header():
c10Header = bytearray()
c10Header.extend(b'\x00')
c10Header.extend(b'\x0f')
# programName
c10Header.extend(str.encode(programName))
if len(programName) < 8:
for i in range(len(programName), 8):
c10Header.extend(b'\x20')
# Block type: BASIC: 00
c10Header.extend(b'\x00')
# ASCII flag type:
c10Header.extend(b'\x00')
# Gap flag type:
c10Header.extend(b'\x00')
# Two bytes for the start address of a machine language program: N/A
c10Header.extend(b'\x00')
c10Header.extend(b'\x00')
# Two bytes for the load address of a machine language program: N/A
c10Header.extend(b'\x00')
c10Header.extend(b'\x14')
#
return buildBlock(c10Header)
#==========================================================
# 4: Build Block
def buildBlock(data):
dataBlock = bytearray()
# Block start
dataBlock.extend(b'\x55')
# Block sync byte
dataBlock.extend(b'\x3c')
# Block Data
dataBlock.extend(data)
# Block Checksum
sum = 0
for i in data:
sum += i
dataBlock.extend((sum % 256).to_bytes(1, 'big'))
# Block end byte
dataBlock.extend(b'\x55')
#
return dataBlock
#==========================================================
# Call the main routine
main()
# EOF -\\-
| athalheim/TRS-80-MC-10 | vbToC10.py | vbToC10.py | py | 10,312 | python | en | code | 1 | github-code | 13 |
31070979639 | # -*- encoding: utf-8 -*-
"""
PyCharm main
2022年07月11日
by littlefean
"""
from typing import *
class Position:
__slots__ = ["x", "y"]
def __init__(self, x, y):
self.x = x
self.y = y
def main():
p = Position(1, 3)
p.__class__.__slots__.append("z")
p.__slots__.append("z")
p.z = 15
return None
if __name__ == "__main__":
main()
| Littlefean/SmartPython | 043 面向对象-slots/main.py | main.py | py | 386 | python | en | code | 173 | github-code | 13 |
12003406903 | import copy
import itertools
import sys
def run(program):
pc = 0
input = None
output = None
def parameter(index):
return program[pc + index] \
if program[pc] // (10 ** (index + 1)) % 10 \
else program[program[pc + index]]
while True:
opcode = program[pc] % 100
if opcode == 1:
program[program[pc + 3]] = parameter(1) + parameter(2)
pc += 4
elif opcode == 2:
program[program[pc + 3]] = parameter(1) * parameter(2)
pc += 4
elif opcode == 3:
input = yield output
program[program[pc + 1]] = input
pc += 2
elif opcode == 4:
output = parameter(1)
pc += 2
elif opcode == 5:
pc = parameter(2) if parameter(1) else pc + 3
elif opcode == 6:
pc = parameter(2) if not parameter(1) else pc + 3
elif opcode == 7:
program[program[pc + 3]] = 1 if parameter(1) < parameter(2) else 0
pc += 4
elif opcode == 8:
program[program[pc + 3]] = 1 if parameter(1) == parameter(2) else 0
pc += 4
elif opcode == 99:
yield output
return
else:
assert False, "Unrecognized opcode"
def run_amplifiers(ordering, program):
amplifiers = [run(copy.copy(program)) for phase in ordering]
for i, phase in enumerate(ordering):
next(amplifiers[i])
amplifiers[i].send(phase)
index = 0
value = 0
while True:
try:
value = amplifiers[index].send(value)
index = (index + 1) % len(amplifiers)
except StopIteration:
return value
def day07(filename):
with open(filename) as f:
program = [int(token) for token in f.read().rstrip().split(",")]
print(max(run_amplifiers(ordering, program) for ordering in itertools.permutations(range(5))))
print(max(run_amplifiers(ordering, program) for ordering in itertools.permutations(range(5, 10))))
if __name__ == "__main__":
day07(sys.argv[1])
| tkieft/adventofcode-2019 | day07/day07.py | day07.py | py | 2,111 | python | en | code | 0 | github-code | 13 |
24877563882 | import tweepy
name_list = []
save_to = open('name_list.txt', 'a')
# API keys that yous saved earlier (have to be a twitter dev)
api_key = "x"
api_secrets = "x"
access_token = "x"
access_secret = "x"
# Authenticate to Twitter
auth = tweepy.OAuthHandler(api_key,api_secrets)
auth.set_access_token(access_token,access_secret)
# Create API object
api = tweepy.API(auth, wait_on_rate_limit= True)
try:
api.verify_credentials()
print('Successful Authentication')
except:
print('Failed authentication')
# the screen_name of the targeted user (the one you want to get the followers of)
screen_name1 = "x"
counter = 0
# get the followers of the targeted user
# getting only 30 friends
for follower in tweepy.Cursor(api.followers, screen_name1).items(2476): # the items count is the number of followers of the targeted user
print(follower.screen_name)
save_to.write(follower.screen_name + '\n')
counter += 1
print(counter)
# reminder that there is a time limit for each API call. Check the twitter dev docs for more info.
| mfurkanatac/Botometer-Calc | follower_finder.py | follower_finder.py | py | 1,050 | python | en | code | 0 | github-code | 13 |
70427120657 | from solutions import BaseSolution
class Solution(BaseSolution):
input_file = '16.txt'
def __str__(self):
return 'Day 16: Permutation Promenade'
def _move(self, programs, m, i):
l = len(programs)
if m == 's':
r = int(i)
return programs[-r:] + programs[:l - r]
if m == 'x':
x, y = [int(s) for s in i.split('/')]
z = programs[x]
programs[x] = programs[y]
programs[y] = z
return programs
if m == 'p':
xp, yp = i.split('/')
x = programs.index(xp)
y = programs.index(yp)
z = programs[x]
programs[x] = programs[y]
programs[y] = z
return programs
def _dance(self, programs, moves):
for m in moves:
programs = self._move(programs, m[0], m[1:])
return programs
def solve(self, puzzle_input, n=16):
programs = [chr(c) for c in range(97, 97 + n)]
moves = puzzle_input.split(',')
return ''.join(self._dance(programs, moves))
def solve_again(self, puzzle_input, n=16):
moves = puzzle_input.split(',')
initial = [chr(c) for c in range(97, 97 + n)]
programs = list(self.solve(puzzle_input))
dances = 1
while not programs == initial:
programs = self._dance(programs, moves)
dances += 1
for _ in range(10 ** 9 % dances):
programs = self._dance(programs, moves)
return ''.join(programs)
if __name__ == '__main__':
solution = Solution()
solution.show_results()
| madr/julkalendern | 2017-python/solutions/day_16.py | day_16.py | py | 1,635 | python | en | code | 3 | github-code | 13 |
42638229896 | from __future__ import print_function, division, absolute_import
from threading import Thread
import scipy.ndimage
import numpy as np
from torch.multiprocessing import Pool, Process
import pdb
import os
import torch
def worker_distance_transform(args):
bid = args[0]
image = args[1]
return_indices = args[2]
# print('{} , {} '.format(bid, os.getpid()))
image = image.numpy()
dist, dist_indices = scipy.ndimage.distance_transform_edt(
image, return_indices=return_indices)
dist_indices = torch.LongTensor(dist_indices)
return dist_indices
class BatchedSignedDistance:
'''
A batched version of computing signed distance in parallel.
'''
def __init__(self, num_workers=4, return_indices=True):
self.num_workers = num_workers
self.return_indices = return_indices
return
def forward(self, images, ):
parameters = []
num_workers = self.num_workers
pool = Pool(num_workers)
for bx in range(len(images)):
bx_params = [bx, images[bx], True]
parameters.append(bx_params, )
predictions = pool.map(
worker_distance_transform, parameters)
predictions = torch.stack(predictions)
pool.close()
pool.join()
return predictions
if __name__ == "__main__":
batchedSignedDist = BatchedSignedDistance(num_workers=1)
a = np.array(([0, 1, 1, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 1, 1, 0, 0]))
data = torch.FloatTensor(np.stack([1-a, 1-a, 1-a, 1-a], axis=0))
outputs = batchedSignedDist.forward(data)
outputs = batchedSignedDist.forward(data)
pdb.set_trace()
print ('Done')
| nileshkulkarni/acsm | acsm/nnutils/signed_distance.py | signed_distance.py | py | 1,780 | python | en | code | 64 | github-code | 13 |
73853359696 | import requests
def get_intelligence(hero_name):
token = "2619421814940190"
request_one = f"https://www.superheroapi.com/api.php/{token}/search/{hero_name}"
resp = requests.get(request_one)
if resp.status_code != 200 or resp.headers['content-type'] != "application/json":
return None
d = resp.json()
id = d['results'][0]['id']
request_two = f"https://www.superheroapi.com/api.php/{token}/{id}/powerstats"
resp = requests.get(request_two)
if resp.status_code != 200 or resp.headers['content-type'] != "application/json":
return None
d = resp.json()
intel = d['intelligence']
return int(intel)
names = ["Hulk", "Captain america", "Thanos"]
hero_intels = []
for hero_name in names:
hero_intel = get_intelligence(hero_name)
d = {"name": hero_name, "intelligence": hero_intel}
hero_intels.append(d)
hero_intels.sort(key=lambda x: x['intelligence'])
print('the most intelligent superhero:')
print(hero_intels[-1])
| mariabimatova/home-work | request/request_hw_task1.py | request_hw_task1.py | py | 993 | python | en | code | 0 | github-code | 13 |
13612282540 | class Solution(object):
def minWindow(self, s, t):
count = collections.defaultdict(int)
for c in t:
count[c] += 1
total_cnt = len(t)
min_len = len(s) + 1
start_idx = res_idx = idx = 0
while True:
if idx >= len(s):
break
c = s[idx]
if count[c] > 0:
total_cnt -= 1
count[c] -= 1
# if cnt == 0, it's a valid window now, then make it invalid
while start_idx <= idx and total_cnt == 0:
if idx - start_idx < min_len:
min_len = idx - start_idx
res_idx = start_idx
if count[s[start_idx]] == 0:
total_cnt += 1
count[s[start_idx]] += 1
start_idx += 1
idx += 1
return s[res_idx:res_idx + min_len] if min_len < len(s) + 1 else ''
| clovery410/mycode | interview_qa/snapchat/76minimum_window_substring.py | 76minimum_window_substring.py | py | 961 | python | en | code | 1 | github-code | 13 |
37621969000 | #!/usr/bin/env python
# -*- coding: utf8 -*-
from gimpfu import *
def mattefade( img, draw, useColor, colorScheme, orientation, flipColors, colorOpacity, colorOffset, overExposure, oeAmount, addVignette, sharpAmount):
current_f=pdb.gimp_context_get_foreground()
current_b=pdb.gimp_context_get_background()
#clean start
img.disable_undo()
pdb.gimp_context_push()
#get height and width
pdb.gimp_selection_all
sel_size=pdb.gimp_selection_bounds(img)
w=sel_size[3]-sel_size[1]
h=sel_size[4]-sel_size[2]
if orientation == 0:
#vertical gradient
#set color gradient start point
startX = w/2
startY = 0
#set color gradient end points
endX = w/2
endY = h
else:
#horizontal gradient
#set color gradient start point
startX = 0
startY = h/2
#set color gradient end points
endX = w
endY = h/2
#set image center and corner points
centerX = w/2
centerY = h/2
cornerX = w
cornerY = h
###
### Adjust curves
###
#layer copy from background and use this as the starting point for processing
copyLayer1=pdb.gimp_layer_new_from_visible(img, img, "AdjustCurves")
pdb.gimp_image_insert_layer(img, copyLayer1, None, -1)
#adjust curves
curveArray = [0, 0.22846441947565543, 0.28650137741046827, 0.348314606741573, 1, 1]
pdb.gimp_drawable_curves_spline(copyLayer1, HISTOGRAM_VALUE, 6, curveArray)
###
### Add sharpening
###
if sharpAmount > 0:
#set other contexts for sharpen gradient.
pdb.gimp_context_set_opacity(70)
pdb.gimp_context_set_paint_mode(LAYER_MODE_NORMAL)
pdb.gimp_context_set_gradient_fg_bg_rgb()
pdb.gimp_context_set_gradient_blend_color_space(1)
pdb.gimp_context_set_gradient_reverse(FALSE)
#set colors to black and white
pdb.gimp_context_set_foreground((255, 255, 255))
pdb.gimp_context_set_background((0, 0, 0))
#copy the visible image for sharpening
copyLayer6=pdb.gimp_layer_new_from_visible(img, img, "Sharpen")
pdb.gimp_image_insert_layer(img, copyLayer6, None, -1)
#unsharp mask settings
sharpRadius = 2
sharpThreshold = 0
sharpOffset = 50
#add unsharp mask
pdb.plug_in_unsharp_mask(img, copyLayer6, sharpRadius, sharpAmount, sharpThreshold)
#add layer mask with black fill
layerMask6 = copyLayer6.create_mask(1)
copyLayer6.add_mask(layerMask6)
#apply a blend to the layer mask that fades out sharpening away from the center of the image
pdb.gimp_drawable_edit_gradient_fill(layerMask6, 2, sharpOffset, FALSE, 1, 0, TRUE, centerX, centerY, cornerX, cornerY)
###
### Add vignette
###
if addVignette == TRUE:
#set other contexts for vignette gradient.
pdb.gimp_context_set_opacity(35)
pdb.gimp_context_set_paint_mode(LAYER_MODE_NORMAL)
pdb.gimp_context_set_gradient_fg_transparent()
pdb.gimp_context_set_gradient_blend_color_space(1)
pdb.gimp_context_set_gradient_reverse(TRUE)
#set foreground color
pdb.gimp_context_set_foreground((0, 0, 0))
#add a new layer for vignette
copyLayer5=pdb.gimp_layer_new(img, w, h, 1, "Vignette", 100.0, 23)
pdb.gimp_image_insert_layer(img, copyLayer5, None, -2)
pdb.gimp_drawable_fill(copyLayer5, 3)
#add radial gradient w/start point in center of image
vignetteOffset = 80
pdb.gimp_drawable_edit_gradient_fill(copyLayer5, 2, vignetteOffset, FALSE, 1, 0, TRUE, centerX, centerY, cornerX, cornerY)
###
### Add color overlay
###
if useColor == TRUE:
#set contexts for gradient
pdb.gimp_context_set_opacity(colorOpacity)
pdb.gimp_context_set_paint_mode(LAYER_MODE_ADDITION)
pdb.gimp_context_set_gradient_fg_bg_rgb()
pdb.gimp_context_set_gradient_blend_color_space(1)
pdb.gimp_context_set_gradient_reverse(flipColors)
#set color contexts
if colorScheme == 0:
#warm colors to violet and orange
pdb.gimp_context_set_foreground((198, 4, 198))
pdb.gimp_context_set_background((227, 145, 3))
elif colorScheme == 1:
#cool colors to purple and teal
pdb.gimp_context_set_foreground((124, 63, 156))
pdb.gimp_context_set_background((10, 139, 166))
elif colorScheme == 2:
#neutral colors to purple and neutral gray
pdb.gimp_context_set_foreground((162, 77, 189))
pdb.gimp_context_set_background((189, 181, 149))
elif colorScheme == 3:
#green to transparent
pdb.gimp_context_set_gradient_fg_transparent()
pdb.gimp_context_set_foreground((16, 230, 3))
elif colorScheme == 4:
#bright orange to transparent
pdb.gimp_context_set_gradient_fg_transparent()
pdb.gimp_context_set_foreground((236, 180, 102))
else:
#dark orange to transparent
pdb.gimp_context_set_gradient_fg_transparent()
pdb.gimp_context_set_foreground((178, 77, 0))
#create new layer and fill with transparency
copyLayer2=pdb.gimp_layer_new(img, w, h, 1, "ColorScreen", 100.0, 23)
pdb.gimp_image_insert_layer(img, copyLayer2, None, -1)
pdb.gimp_drawable_fill(copyLayer2, 3)
#add gradient w/start point in top center of image finish in bottom center
pdb.gimp_drawable_edit_gradient_fill(copyLayer2, 0, colorOffset, FALSE, 1, 0, TRUE, startX, startY, endX, endY )
###
### Add overexposure
###
if overExposure == TRUE:
#set other contexts for overexposure gradient.
pdb.gimp_context_set_opacity(oeAmount)
pdb.gimp_context_set_paint_mode(LAYER_MODE_ADDITION)
pdb.gimp_context_set_gradient_fg_transparent()
pdb.gimp_context_set_gradient_blend_color_space(1)
pdb.gimp_context_set_gradient_reverse(FALSE)
#set colors to white and black
pdb.gimp_context_set_foreground((255, 255, 255))
pdb.gimp_context_set_background((0, 0, 0))
#set gradient offset to fixed amount
oeOffset = 0
copyLayer4=pdb.gimp_layer_new(img, w, h, 1, "OverExposure", 100.0, 23)
pdb.gimp_image_insert_layer(img, copyLayer4, None, -1)
pdb.gimp_drawable_fill(copyLayer4, 3)
#add radial gradient w/start point in center of image finish in bottom center or center right edge
pdb.gimp_drawable_edit_gradient_fill(copyLayer4, 2, oeOffset, FALSE, 1, 0, TRUE, centerX, centerY, endX, endY )
#clean up
pdb.gimp_displays_flush()
pdb.gimp_context_pop()
img.enable_undo()
pdb.gimp_context_set_foreground(current_f)
pdb.gimp_context_set_background(current_b)
register( "gimp_matte_fade",
"Add matte faded effect",
"Add matte faded effect",
"Simon Bland",
"(©) 2023 Simon Bland",
"2023-01-25",
"<Image>/Filters/Matte Fade",
'RGB*',
[
(PF_TOGGLE, "useColor", "Use colors", 1),
(PF_OPTION, "colorScheme", " Color scheme", 0, (['Violet/Yellow', 'Purple/Teal', 'Purple/Neutral' , 'Green/Transp.', 'Br.Orange/Transp.', 'Dk.Orange/Transp.'])),
(PF_OPTION, "orientation", " Orientation", 0, (['Vertical', 'Horizontal'])),
(PF_TOGGLE, "flipColors", " Flip colors", 0),
(PF_SLIDER, "colorOpacity", " Opacity", 25, (0, 100, 5)),
(PF_SLIDER, "colorOffset", " Offset", 20, (0, 100, 5)),
(PF_TOGGLE, "overExposure", "Over expose", 1),
(PF_SLIDER, "oeAmount", " Over exposure amt", 20, (0, 100, 5)),
(PF_TOGGLE, "addVignette", "Add vignette", 1),
(PF_SLIDER, "sharpAmount", "Sharpen amount", 1.0, (0, 5.0, 0.1))
],
'',
mattefade)
main()
| Nikkinoodl/Matte-Fade | gimp_matte_fade.py | gimp_matte_fade.py | py | 7,010 | python | en | code | 1 | github-code | 13 |
24634439184 | import torchaudio
import os
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from scipy import signal
import multiprocessing
import pandas as pd
from paths import *
from preproc_mfccTransform import MFCCTransform
from misc_progress_bar import draw_progress_bar
transformer = MFCCTransform()
def process_files(src_dir, tgt_dir, files, save_name):
mfcc_feats = torch.empty(0, 25, 39)
for file in files:
print(f"-----------------{file}------------------")
try:
wave, sr = torchaudio.load(os.path.join(src_dir, file))
resampled_wave = torch.tensor(signal.resample(wave, 4240, axis=1))
mfcc_feats = torch.cat([mfcc_feats, transformer(resampled_wave).unsqueeze(0)], dim=0)
except Exception as e:
print(e)
torch.save(mfcc_feats, os.path.join(tgt_dir, f"{save_name}.mfcc"))
print(save_name)
def generate_dict(csv_path):
# Read in the CSV file as a pandas dataframe
df = pd.read_csv(csv_path)
rec_dict = df.groupby('rec').groups
idx_list = df["idx"].tolist()
# Sort the lists of indices for each 'rec' value
for rec in rec_dict:
rec_dict[rec] = sorted([idx_list[idx] for idx in rec_dict[rec]])
return rec_dict
def divide_work(worklist, n):
chunks = []
for i in range(0, len(worklist), n):
chunks.append(worklist[i:i+n])
return chunks
if __name__ == '__main__':
src_ = phone_seg_anno_rec_path
tgt_ = phone_seg_anno_path
workmap = generate_dict(os.path.join(phone_seg_anno_log_path, "log.csv"))
worklist = sorted(workmap.keys())
divided_worklist = divide_work(worklist, multiprocessing.cpu_count())
for workchunk in divided_worklist:
pool = multiprocessing.Pool(processes=16)
for i, rec in enumerate(workchunk):
print(f"Start {rec}")
files = workmap[rec]
filelist = [f"{rec}_{str(idx).zfill(8)}.wav" for idx in files]
result = pool.apply_async(process_files, args=(src_, tgt_, filelist, rec))
pool.close()
pool.join() | Frankalexej/featln | preproc_wav2mfcc_multiprocessing.py | preproc_wav2mfcc_multiprocessing.py | py | 2,091 | python | en | code | 0 | github-code | 13 |
38271380643 | #!/usr/bin/env python
# coding: utf-8
import plotly as py
# import plotly.express as px
import plotly.graph_objs as go
import plotly.io as pio
import pandas as pd
pio.templates.default = "plotly_white"
pylt = py.offline.plot
def weekly_and_monthly(file_path, sheet_names, view_path):
data1_1 = pd.read_excel(file_path, sheet_name=sheet_names[0])
date1_1 = data1_1["date_day"].values.tolist()
time1_1 = data1_1["Daily_execution_time"].values.tolist()
y1_1 = []
# x1_1 = []
for i in time1_1:
y1_1.append(round(i / 3600, 2))
y1_11 = []
for i in time1_1:
second = i
m, s = divmod(second, 60)
h, m = divmod(m, 60)
y1_11.append("%02d:%02d:%02d" % (h, m, s))
data1_2 = pd.read_excel(file_path, sheet_name=sheet_names[1])
# print(data1_2)
date1_2 = data1_2["date_day"].values.tolist()
time1_2 = data1_2["Daily_execution_time"].values.tolist()
y1_2 = []
# x1_2 = []
for i in time1_2:
y1_2.append(round(i / 3600, 2))
y1_22 = []
for i in time1_2:
second = i
m, s = divmod(second, 60)
h, m = divmod(m, 60)
y1_22.append("%02d:%02d:%02d" % (h, m, s))
week_fig = go.Scatter(x=date1_1, y=time1_1, text=y1_11,
line_shape='spline', marker_size=10, marker_color="rgb(177,195,210)")
month_fig = go.Scatter(x=date1_2, y=time1_2, text=y1_22,
line_shape='linear', marker_size=10, marker_color="rgb(177,195,210)")
layoutnew = list(
[dict(
# type="buttons"
direction="right"
, active=0
, x=0.6
, y=1.134
, buttons=list(
[dict(label="Week"
, method="update"
, args=[{"visible": [True, False]}
, {'showlegend': False}]),
dict(label="Month"
, method="update"
, args=[{"visible": [False, True]}
, {'showlegend': False}])
])
)]
)
layout = dict(title=dict(text="Time Trend", x=0.5), showlegend=False, updatemenus=layoutnew)
datanew = [week_fig, month_fig]
fignew = go.Figure(data=datanew, layout=layout)
return pylt(fignew, filename=view_path+'weekly_and_monthly.html', auto_open=False)
def Top_Counts(file_path, sheet_names, view_path):
data4 = pd.read_excel(file_path, sheet_name=sheet_names)
name4 = data4["PACKAGE_NAME"].values.tolist()
counts4 = data4["COUNTS"].values.tolist()
y4 = []
for i in counts4:
y4.append(round(i / 1000000, 2))
layout4 = go.Layout(title=dict(text="Total Counts Top%d" %(len(name4)), x=0.5),
xaxis=dict(title='Package Name'),
yaxis=dict(title='Counts(million)'))
fig4 = go.Figure(go.Bar(x=name4, y=y4, text=y4, textposition="outside", marker_color="#9AC0CD"),
layout=layout4) # B4CDCD
return pylt(fig4, filename=view_path+'Top_Counts.html', auto_open=False)
def Top_Time(file_path, sheet_names, view_path):
data5 = pd.read_excel(file_path, sheet_name=sheet_names)
name5 = data5["PACKAGE_NAME"].values.tolist()
time5 = data5["Total_Times"].values.tolist()
# print(name5,time5)
y5 = []
y5_1 = []
for i in time5:
y5_1.append(round(i / 3600, 1))
# print(time5)
# print(y5_1)#转成小时
for i in time5:
second = i
m, s = divmod(second, 60)
h, m = divmod(m, 60)
y5.append("%02d:%02d:%02d" % (h, m, s))
layout5 = go.Layout(title=dict(text="Total Time Consuming Top%d" %(len(name5)), x=0.5),
xaxis=dict(title='Package Name'),
yaxis=dict(title='Time Consuming(S)'))
fig5 = go.Figure(go.Bar(x=name5, y=time5, text=y5, marker_color="#9AC0CD", textposition="outside"),
layout=layout5, )
return pylt(fig5, filename=view_path+'Top_Time.html', auto_open=False)
| zystudent/zystudent | DWH_BI/view_first_half.py | view_first_half.py | py | 4,153 | python | en | code | 0 | github-code | 13 |
34346159442 | import asyncio
import logging
import ssl
import time
from collections import ChainMap
from time import monotonic
from types import MappingProxyType
from typing import (
Any,
AnyStr,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from async_timeout import timeout as atimeout
from aioredis_cluster.abc import AbcChannel, AbcCluster, AbcPool
from aioredis_cluster.aioredis import Redis, create_pool
from aioredis_cluster.aioredis.errors import (
ConnectionForcedCloseError,
ProtocolError,
ReplyError,
)
from aioredis_cluster.command_exec import ExecuteContext, ExecuteFailProps, ExecuteProps
from aioredis_cluster.command_info import CommandInfo, extract_keys
from aioredis_cluster.commands import RedisCluster
from aioredis_cluster.crc import key_slot
from aioredis_cluster.errors import (
AskError,
ClusterClosedError,
ClusterDownError,
ConnectTimeoutError,
LoadingError,
MovedError,
RedisClusterError,
TryAgainError,
UncoveredSlotError,
closed_errors,
network_errors,
)
from aioredis_cluster.manager import ClusterManager, ClusterState
from aioredis_cluster.pool import ConnectionsPool
from aioredis_cluster.pooler import Pooler
from aioredis_cluster.structs import Address, ClusterNode
from aioredis_cluster.typedef import (
AioredisAddress,
BytesOrStr,
CommandsFactory,
PubsubResponse,
)
from aioredis_cluster.util import ensure_bytes, iter_ensure_bytes, retry_backoff
__all__ = (
"AbcCluster",
"Cluster",
)
logger = logging.getLogger(__name__)
class Cluster(AbcCluster):
_connection_errors = network_errors + closed_errors + (asyncio.TimeoutError,)
POOL_MINSIZE = 1
POOL_MAXSIZE = 10
MAX_ATTEMPTS = 10
RETRY_MIN_DELAY = 0.05 # 50ms
RETRY_MAX_DELAY = 1.0
ATTEMPT_TIMEOUT = 5.0
CONNECT_TIMEOUT = 1.0
def __init__(
self,
startup_nodes: Sequence[Address],
*,
# failover options
retry_min_delay: Optional[float] = None,
retry_max_delay: Optional[float] = None,
max_attempts: Optional[int] = None,
attempt_timeout: Optional[float] = None,
# manager options
state_reload_interval: Optional[float] = None,
follow_cluster: Optional[bool] = None,
# pool options
idle_connection_timeout: Optional[float] = None,
# node client options
username: Optional[str] = None,
password: Optional[str] = None,
encoding: Optional[str] = None,
pool_minsize: Optional[int] = None,
pool_maxsize: Optional[int] = None,
commands_factory: Optional[CommandsFactory] = None,
connect_timeout: Optional[float] = None,
pool_cls: Optional[Type[AbcPool]] = None,
ssl: Optional[Union[bool, ssl.SSLContext]] = None,
) -> None:
if len(startup_nodes) < 1:
raise ValueError("startup_nodes must be one at least")
if retry_min_delay is None:
retry_min_delay = self.RETRY_MIN_DELAY
elif retry_min_delay < 0:
raise ValueError("min_delay value is negative")
self._retry_min_delay = retry_min_delay
if retry_max_delay is None:
retry_max_delay = self.RETRY_MAX_DELAY
elif retry_max_delay < 0:
raise ValueError("max_delay value is negative")
elif retry_max_delay < retry_min_delay:
logger.warning(
"retry_max_delay < retry_min_delay: %s < %s", retry_max_delay, retry_min_delay
)
retry_max_delay = retry_min_delay
self._retry_max_delay = retry_max_delay
if max_attempts is None:
max_attempts = self.MAX_ATTEMPTS
elif max_attempts == 0:
# probably infinity (CAUTION!!!)
max_attempts = (1 << 64) - 1
elif max_attempts < 0:
raise ValueError("max_attempts must be >= 0")
self._max_attempts = max_attempts
if attempt_timeout is None:
attempt_timeout = self.ATTEMPT_TIMEOUT
elif attempt_timeout <= 0:
raise ValueError("attempt_timeout must be > 0")
self._attempt_timeout = float(attempt_timeout)
self._username = username
self._password = password
self._encoding = encoding
if pool_minsize is None:
pool_minsize = self.POOL_MINSIZE
if pool_maxsize is None:
pool_maxsize = self.POOL_MAXSIZE
if pool_minsize < 1 or pool_maxsize < 1:
raise ValueError("pool_minsize and pool_maxsize must be greater than 1")
if pool_maxsize < pool_minsize:
logger.warning(
"pool_maxsize less than pool_minsize: %s < %s", pool_maxsize, pool_minsize
)
pool_maxsize = pool_minsize
self._pool_minsize = pool_minsize
self._pool_maxsize = pool_maxsize
if commands_factory is None:
commands_factory = RedisCluster
self._commands_factory = commands_factory
if connect_timeout is None:
connect_timeout = self.CONNECT_TIMEOUT
self._connect_timeout = connect_timeout
if pool_cls is None:
pool_cls = ConnectionsPool
self._pool_cls = pool_cls
self._pool_ssl = ssl
self._idle_connection_timeout = idle_connection_timeout
self._pooler = Pooler(self._create_default_pool, reap_frequency=idle_connection_timeout)
self._manager = ClusterManager(
startup_nodes,
self._pooler,
state_reload_interval=state_reload_interval,
follow_cluster=follow_cluster,
execute_timeout=self._attempt_timeout,
)
self._loop = asyncio.get_running_loop()
self._closing: Optional[asyncio.Task] = None
self._closing_event = asyncio.Event()
self._closed = False
def __repr__(self) -> str:
state_stats = ""
if self._manager._state:
state_stats = self._manager._state.repr_stats()
return f"<{type(self).__name__} {state_stats}>"
async def execute(self, *args, **kwargs) -> Any:
"""Execute redis command."""
ctx = self._make_exec_context(args, kwargs)
keys = self._extract_command_keys(ctx.cmd_info, ctx.cmd)
if keys:
ctx.slot = self.determine_slot(*keys)
exec_fail_props: Optional[ExecuteFailProps] = None
while ctx.attempt < ctx.max_attempts:
self._check_closed()
ctx.attempt += 1
state = await self._manager.get_state()
exec_props = self._make_execute_props(state, ctx, exec_fail_props)
if exec_props.reload_state_required:
self._manager.require_reload_state()
node_addr = exec_props.node_addr
# reset previous execute fail properties
prev_exec_fail_props = exec_fail_props
exec_fail_props = None
try:
result = await self._try_execute(ctx, exec_props, prev_exec_fail_props)
except asyncio.CancelledError:
raise
except Exception as e:
exec_fail_props = ExecuteFailProps(
node_addr=node_addr,
error=e,
)
if exec_fail_props:
await self._on_execute_fail(ctx, exec_fail_props)
continue
break
return result
async def execute_pubsub(self, *args, **kwargs) -> List[PubsubResponse]:
"""Execute Redis (p)subscribe/(p)unsubscribe commands."""
ctx = self._make_exec_context(args, kwargs)
if len(ctx.cmd) != 2:
raise ValueError("Only one channel supported in cluster mode")
# get first pattern and calculate slot
channel_name = ctx.cmd[1]
is_pattern = ctx.cmd_name in {"PSUBSCRIBE", "PUNSUBSCRIBE"}
is_unsubscribe = ctx.cmd_name in {"UNSUBSCRIBE", "PUNSUBSCRIBE", "SUNSUBSCRIBE"}
ctx.slot = self.determine_slot(channel_name)
exec_fail_props: Optional[ExecuteFailProps] = None
result: List[List]
while ctx.attempt < ctx.max_attempts:
self._check_closed()
ctx.attempt += 1
state = await self._manager.get_state()
if exec_fail_props is None:
ready_node_addr = self._pooler.get_pubsub_addr(
channel_name,
is_pattern=is_pattern,
is_sharded=ctx.is_sharded_pubsub,
)
# if unsuscribe command and no node found for pattern
# probably pubsub connection is already close
if is_unsubscribe and ready_node_addr is None:
result = [[ctx.cmd[0], channel_name, 0]]
break
exec_props = self._make_execute_props(
state,
ctx,
exec_fail_props,
ready_node_addr=ready_node_addr,
)
ready_node_addr = None
if exec_props.reload_state_required:
self._manager.require_reload_state()
exec_fail_props = None
try:
pool = await self._pooler.ensure_pool(exec_props.node_addr)
async with atimeout(self._attempt_timeout):
result = await pool.execute_pubsub(*ctx.cmd, **ctx.kwargs)
except asyncio.CancelledError:
raise
except Exception as e:
exec_fail_props = ExecuteFailProps(
node_addr=exec_props.node_addr,
error=e,
)
if exec_fail_props:
if is_unsubscribe:
self._pooler.remove_pubsub_channel(
channel_name,
is_pattern=is_pattern,
is_sharded=ctx.is_sharded_pubsub,
)
await self._on_execute_fail(ctx, exec_fail_props)
continue
self._pooler.add_pubsub_channel(
exec_props.node_addr,
channel_name,
is_pattern=is_pattern,
is_sharded=ctx.is_sharded_pubsub,
)
break
return [(cmd, name, count) for cmd, name, count in result]
def close(self) -> None:
"""Perform connection(s) close and resources cleanup."""
self._closed = True
if self._closing is None:
self._closing = self._loop.create_task(self._do_close())
self._closing.add_done_callback(lambda f: self._closing_event.set())
async def wait_closed(self) -> None:
"""
Coroutine waiting until all resources are closed/released/cleaned up.
"""
await self._closing_event.wait()
@property
def closed(self) -> bool:
"""Flag indicating if connection is closing or already closed."""
return self._closed
@property
def db(self) -> int:
"""Current selected DB index. Always 0 for cluster"""
return 0
@property
def encoding(self) -> Optional[str]:
"""Current set connection codec."""
return self._encoding
@property
def in_pubsub(self) -> int:
"""Returns number of subscribed channels.
Can be tested as bool indicating Pub/Sub mode state.
"""
return sum(p.in_pubsub for p in self._pooler.pools())
@property
def pubsub_channels(self) -> Mapping[str, AbcChannel]:
"""Read-only channels dict."""
chain_map = ChainMap(*(p.pubsub_channels for p in self._pooler.pools()))
return MappingProxyType(chain_map)
@property
def sharded_pubsub_channels(self) -> Mapping[str, AbcChannel]:
"""Read-only channels dict."""
chain_map = ChainMap(
*(p.sharded_pubsub_channels for p in self._pooler.pools()), # type: ignore
)
return MappingProxyType(chain_map)
@property
def pubsub_patterns(self) -> Mapping[str, AbcChannel]:
"""Read-only patterns dict."""
return MappingProxyType(ChainMap(*(p.pubsub_patterns for p in self._pooler.pools())))
@property
def address(self) -> Tuple[str, int]:
"""Connection address."""
addr = self._manager._startup_nodes[0]
return addr.host, addr.port
async def auth(self, password: str) -> None:
self._check_closed()
self._password = password
async def authorize(pool) -> None:
nonlocal password
await pool.auth(password)
await self._pooler.batch_op(authorize)
async def auth_with_username(self, username: str, password: str) -> None:
self._check_closed()
self._username = username
self._password = password
async def authorize(pool) -> None:
nonlocal username
nonlocal password
await pool.auth_with_username(username, password)
await self._pooler.batch_op(authorize)
def determine_slot(self, first_key: bytes, *keys: bytes) -> int:
slot: int = key_slot(first_key)
for k in keys:
if slot != key_slot(k):
raise RedisClusterError("all keys must map to the same key slot")
return slot
async def all_masters(self) -> List[Redis]:
ctx = self._make_exec_context((b"PING",), {})
exec_fail_props: Optional[ExecuteFailProps] = None
pools: List[Redis] = []
while ctx.attempt < ctx.max_attempts:
self._check_closed()
ctx.attempt += 1
state = await self._manager.get_state()
exec_fail_props = None
execute_timeout = self._attempt_timeout
pools = []
try:
for node in state._data.masters:
pool = await self._pooler.ensure_pool(node.addr)
start_exec_t = monotonic()
await self._pool_execute(pool, ctx.cmd, ctx.kwargs, timeout=execute_timeout)
execute_timeout = max(0, execute_timeout - (monotonic() - start_exec_t))
pools.append(self._commands_factory(pool))
except asyncio.CancelledError:
raise
except Exception as e:
exec_fail_props = ExecuteFailProps(
node_addr=node.addr,
error=e,
)
if exec_fail_props:
await self._on_execute_fail(ctx, exec_fail_props)
continue
break
return pools
async def keys_master(self, key: AnyStr, *keys: AnyStr) -> RedisCluster:
self._check_closed()
slot = self.determine_slot(ensure_bytes(key), *iter_ensure_bytes(keys))
ctx = self._make_exec_context((b"EXISTS", key), {})
exec_fail_props: Optional[ExecuteFailProps] = None
while ctx.attempt < ctx.max_attempts:
self._check_closed()
ctx.attempt += 1
state = await self._manager.get_state()
try:
node = state.slot_master(slot)
except UncoveredSlotError:
logger.warning("No master node found by slot %d", slot)
self._manager.require_reload_state()
raise
exec_fail_props = None
try:
pool = await self._pooler.ensure_pool(node.addr)
await self._pool_execute(pool, ctx.cmd, ctx.kwargs, timeout=self._attempt_timeout)
except asyncio.CancelledError:
raise
except Exception as e:
exec_fail_props = ExecuteFailProps(
node_addr=node.addr,
error=e,
)
if exec_fail_props:
await self._on_execute_fail(ctx, exec_fail_props)
continue
break
return self._commands_factory(pool)
async def get_master_node_by_keys(self, key: AnyStr, *keys: AnyStr) -> ClusterNode:
slot = self.determine_slot(ensure_bytes(key), *iter_ensure_bytes(keys))
state = await self._manager.get_state()
try:
node = state.slot_master(slot)
except UncoveredSlotError:
logger.warning("No master node found by slot %d", slot)
self._manager.require_reload_state()
raise
return node
async def create_pool_by_addr(
self,
addr: Address,
*,
minsize: int = None,
maxsize: int = None,
) -> RedisCluster:
state = await self._manager.get_state()
if state.has_addr(addr) is False:
raise ValueError(f"Unknown node address {addr}")
opts: Dict[str, Any] = {}
if minsize is not None:
opts["minsize"] = minsize
if maxsize is not None:
opts["maxsize"] = maxsize
pool = await self._create_pool((addr.host, addr.port), opts)
return self._commands_factory(pool)
async def get_cluster_state(self) -> ClusterState:
return await self._manager.get_state()
def extract_keys(self, command_seq: Sequence[BytesOrStr]) -> List[bytes]:
if len(command_seq) < 1:
raise ValueError("No command")
command_seq_bytes = tuple(iter_ensure_bytes(command_seq))
cmd_info = self._manager.commands.get_info(command_seq_bytes[0])
keys = extract_keys(cmd_info, command_seq_bytes)
return keys
async def _init(self) -> None:
await self._manager._init()
async def _do_close(self) -> None:
await self._manager.close()
await self._pooler.close()
def _check_closed(self) -> None:
if self._closed:
raise ClusterClosedError()
def _make_exec_context(self, args: Sequence, kwargs) -> ExecuteContext:
cmd_info = self._manager.commands.get_info(args[0])
if cmd_info.is_unknown():
logger.warning("No info found for command %r", cmd_info.name)
ctx = ExecuteContext(
cmd=tuple(iter_ensure_bytes(args)),
cmd_info=cmd_info,
kwargs=kwargs,
max_attempts=self._max_attempts,
start_time=self._loop.time(),
)
return ctx
def _extract_command_keys(
self,
cmd_info: CommandInfo,
command_seq: Sequence[bytes],
) -> List[bytes]:
if cmd_info.is_unknown():
keys = []
else:
keys = extract_keys(cmd_info, command_seq)
return keys
async def _execute_retry_slowdown(self, attempt: int, max_attempts: int) -> None:
if attempt < 1:
return
delay = retry_backoff(attempt - 1, self._retry_min_delay, self._retry_max_delay)
logger.info("[%d/%d] Retry was slowed down by %.02fms", attempt, max_attempts, delay * 1000)
await asyncio.sleep(delay)
def _make_execute_props(
self,
state: ClusterState,
ctx: ExecuteContext,
fail_props: Optional[ExecuteFailProps] = None,
*,
ready_node_addr: Optional[Address] = None,
) -> ExecuteProps:
exec_props = ExecuteProps()
node_addr: Address
if fail_props:
# reraise exception for simplify classification
# instead of many isinstance conditions
exc = fail_props.error
if isinstance(exc, self._connection_errors):
if ctx.attempt <= 2 and ctx.slot != -1:
replica = state.random_slot_replica(ctx.slot)
if replica is not None:
node_addr = replica.addr
else:
node_addr = state.random_node().addr
else:
node_addr = state.random_node().addr
elif isinstance(exc, MovedError):
node_addr = Address(exc.info.host, exc.info.port)
elif isinstance(exc, AskError):
node_addr = Address(exc.info.host, exc.info.port)
exec_props.asking = exc.info.ask
elif isinstance(exc, TryAgainError):
node_addr = fail_props.node_addr
elif isinstance(exc, (ClusterDownError, LoadingError, ProtocolError)):
node_addr = state.random_node().addr
elif isinstance(exc, Exception):
# usualy never be done here
logger.exception("Uncaught exception on execute: %r", exc, exc_info=exc)
raise exc
if node_addr != fail_props.node_addr:
logger.info(
"Change node to execute %s on slot %s: %s->%s",
ctx.cmd_name,
ctx.slot,
fail_props.node_addr,
node_addr,
)
else:
if ready_node_addr is not None:
node_addr = ready_node_addr
elif ctx.slot != -1:
try:
node = state.slot_master(ctx.slot)
except UncoveredSlotError:
logger.warning("No node found by slot %d", ctx.slot)
# probably cluster is corrupted and
# we need try to recover cluster state
exec_props.reload_state_required = True
node = state.random_master()
node_addr = node.addr
else:
node_addr = state.random_master().addr
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"Command %s properties: slot=%s, node=%s, repr=%r",
ctx.cmd_name,
ctx.slot,
node_addr,
ctx.cmd_for_repr(),
)
exec_props.node_addr = node_addr
return exec_props
async def _try_execute(
self, ctx: ExecuteContext, props: ExecuteProps, fail_props: Optional[ExecuteFailProps]
) -> Any:
node_addr = props.node_addr
attempt_log_prefix = ""
if ctx.attempt > 1:
attempt_log_prefix = f"[{ctx.attempt}/{ctx.max_attempts}] "
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"%sExecute %r on %s (slot:%s)",
attempt_log_prefix,
ctx.cmd_for_repr(),
node_addr,
ctx.slot,
)
pool = await self._pooler.ensure_pool(node_addr)
if props.asking:
logger.debug("Send ASKING to %s for command %r", node_addr, ctx.cmd_name)
result = await self._conn_execute(
pool,
ctx.cmd,
ctx.kwargs,
timeout=self._attempt_timeout,
asking=True,
)
else:
if ctx.cmd_info.is_blocking():
result = await self._conn_execute(
pool,
ctx.cmd,
ctx.kwargs,
timeout=self._attempt_timeout,
)
else:
result = await self._pool_execute(
pool,
ctx.cmd,
ctx.kwargs,
timeout=self._attempt_timeout,
)
return result
async def _on_execute_fail(self, ctx: ExecuteContext, fail_props: ExecuteFailProps) -> None:
# classify error for logging and
# set mark to reload cluster state if needed
exc = fail_props.error
if isinstance(exc, network_errors):
logger.warning("Connection problem with %s: %r", fail_props.node_addr, exc)
self._manager.require_reload_state()
elif isinstance(exc, ConnectionForcedCloseError):
logger.warning("Connection is force closed")
self._check_closed()
elif isinstance(exc, closed_errors):
logger.warning("Connection is closed: %r", exc)
self._manager.require_reload_state()
elif isinstance(exc, ConnectTimeoutError):
logger.warning("Connect to node is timed out: %s", exc)
self._manager.require_reload_state()
elif isinstance(exc, ClusterDownError):
logger.warning("Cluster is down: %s", exc)
self._manager.require_reload_state()
elif isinstance(exc, TryAgainError):
logger.warning("Try again error: %s", exc)
elif isinstance(exc, MovedError):
logger.info("MOVED reply: %s", exc)
self._manager.require_reload_state()
elif isinstance(exc, AskError):
logger.info("ASK reply: %s", exc)
elif isinstance(exc, LoadingError):
logger.warning("Cluster node %s is loading: %s", fail_props.node_addr, exc)
self._manager.require_reload_state()
elif isinstance(exc, ProtocolError):
logger.warning("Redis protocol error: %s", exc)
self._manager.require_reload_state()
elif isinstance(exc, ReplyError):
# all other reply error we must propagate to caller
logger.warning("Reply error: %s", exc)
raise exc
elif isinstance(exc, asyncio.TimeoutError):
is_readonly = ctx.cmd_info.is_readonly()
if is_readonly:
logger.warning(
"Read-Only command %s to %s is timed out",
ctx.cmd_name,
fail_props.node_addr,
)
else:
logger.warning(
"Non-idempotent command %s to %s is timed out. Abort command",
ctx.cmd_name,
fail_props.node_addr,
)
# node probably down
self._manager.require_reload_state()
# abort non-idempotent commands
if not is_readonly:
raise exc
elif isinstance(exc, Exception):
logger.exception("Unexpected error: %r", exc, exc_info=exc)
raise exc
if ctx.attempt >= ctx.max_attempts:
logger.warning(
"Command %s failed after %d attempts and %.03f sec",
ctx.cmd_name,
ctx.attempt,
self._loop.time() - ctx.start_time,
)
raise exc
# slowdown retry calls
await self._execute_retry_slowdown(ctx.attempt, ctx.max_attempts)
async def _create_default_pool(self, addr: AioredisAddress) -> AbcPool:
return await self._create_pool(addr)
async def _create_pool(
self,
addr: AioredisAddress,
opts: Dict[str, Any] = None,
) -> AbcPool:
if opts is None:
opts = {}
default_opts: Dict[str, Any] = dict(
pool_cls=self._pool_cls,
username=self._username,
password=self._password,
encoding=self._encoding,
minsize=self._pool_minsize,
maxsize=self._pool_maxsize,
create_connection_timeout=self._connect_timeout,
ssl=self._pool_ssl,
idle_connection_timeout=self._idle_connection_timeout,
)
pool = await create_pool(addr, **{**default_opts, **opts})
return pool
async def _conn_execute(
self,
pool: AbcPool,
args: Sequence,
kwargs: Dict,
*,
timeout: float = None,
asking: bool = False,
) -> Any:
result: Any
tail_timeout = timeout
acquire_start_t = time.monotonic()
try:
async with atimeout(tail_timeout):
conn = await pool.acquire()
except asyncio.TimeoutError:
logger.warning(
"Acquire connection from pool %s is timed out while processing command %s",
pool.address,
args[0],
)
raise
if tail_timeout is not None:
tail_timeout -= time.monotonic() - acquire_start_t
try:
async with atimeout(tail_timeout):
if asking:
# emulate command pipeline
results = await asyncio.gather(
conn.execute(b"ASKING"),
conn.execute(*args, **kwargs),
return_exceptions=True,
)
# raise first error
for result in results:
if isinstance(result, BaseException):
raise result
result = results[1]
else:
result = await conn.execute(*args, **kwargs)
except asyncio.TimeoutError:
logger.warning(
"Execute command %s on %s is timed out. Closing connection",
args[0],
pool.address,
)
conn.close()
await conn.wait_closed()
raise
finally:
pool.release(conn)
return result
async def _pool_execute(
self,
pool: AbcPool,
args: Sequence,
kwargs: Dict,
*,
timeout: float = None,
) -> Any:
async with atimeout(timeout):
result = await pool.execute(*args, **kwargs)
return result
| DriverX/aioredis-cluster | src/aioredis_cluster/cluster.py | cluster.py | py | 29,490 | python | en | code | 24 | github-code | 13 |
29222301431 | import dataclasses
import logging
from typing import TYPE_CHECKING
import algosdk.transaction
from algosdk.account import address_from_private_key
from algosdk.atomic_transaction_composer import AccountTransactionSigner
from algosdk.transaction import AssetTransferTxn, PaymentTxn, SuggestedParams
from algokit_utils.models import Account
if TYPE_CHECKING:
from algosdk.v2client.algod import AlgodClient
__all__ = ["TransferParameters", "transfer", "TransferAssetParameters", "transfer_asset"]
logger = logging.getLogger(__name__)
@dataclasses.dataclass(kw_only=True)
class TransferParametersBase:
"""Parameters for transferring µALGOs between accounts
Args:
from_account (Account | AccountTransactionSigner): The account (with private key) or signer that will send
the µALGOs
to_address (str): The account address that will receive the µALGOs
suggested_params (SuggestedParams | None): (optional) transaction parameters
note (str | bytes | None): (optional) transaction note
fee_micro_algos (int | None): (optional) The flat fee you want to pay, useful for covering extra fees in a
transaction group or app call
max_fee_micro_algos (int | None): (optional) The maximum fee that you are happy to pay (default: unbounded)
- if this is set it's possible the transaction could get rejected during network congestion
"""
from_account: Account | AccountTransactionSigner
to_address: str
suggested_params: SuggestedParams | None = None
note: str | bytes | None = None
fee_micro_algos: int | None = None
max_fee_micro_algos: int | None = None
@dataclasses.dataclass(kw_only=True)
class TransferParameters(TransferParametersBase):
"""Parameters for transferring µALGOs between accounts"""
micro_algos: int
@dataclasses.dataclass(kw_only=True)
class TransferAssetParameters(TransferParametersBase):
"""Parameters for transferring assets between accounts
Args:
asset_id (int): The asset id that will be transfered
amount (int): The amount to send
clawback_from (str | None): An address of a target account from which to perform a clawback operation. Please
note, in such cases senderAccount must be equal to clawback field on ASA metadata.
"""
asset_id: int
amount: int
clawback_from: str | None = None
def _check_fee(transaction: PaymentTxn | AssetTransferTxn, max_fee: int | None) -> None:
if max_fee is not None:
# Once a transaction has been constructed by algosdk, transaction.fee indicates what the total transaction fee
# Will be based on the current suggested fee-per-byte value.
if transaction.fee > max_fee:
raise Exception(
f"Cancelled transaction due to high network congestion fees. "
f"Algorand suggested fees would cause this transaction to cost {transaction.fee} µALGOs. "
f"Cap for this transaction is {max_fee} µALGOs."
)
if transaction.fee > algosdk.constants.MIN_TXN_FEE:
logger.warning(
f"Algorand network congestion fees are in effect. "
f"This transaction will incur a fee of {transaction.fee} µALGOs."
)
def transfer(client: "AlgodClient", parameters: TransferParameters) -> PaymentTxn:
"""Transfer µALGOs between accounts"""
params = parameters
params.suggested_params = parameters.suggested_params or client.suggested_params()
from_account = params.from_account
sender = address_from_private_key(from_account.private_key) # type: ignore[no-untyped-call]
transaction = PaymentTxn(
sender=sender,
receiver=params.to_address,
amt=params.micro_algos,
note=params.note.encode("utf-8") if isinstance(params.note, str) else params.note,
sp=params.suggested_params,
) # type: ignore[no-untyped-call]
result = _send_transaction(client=client, transaction=transaction, parameters=params)
assert isinstance(result, PaymentTxn)
return result
def transfer_asset(client: "AlgodClient", parameters: TransferAssetParameters) -> AssetTransferTxn:
"""Transfer assets between accounts"""
params = parameters
params.suggested_params = parameters.suggested_params or client.suggested_params()
sender = address_from_private_key(parameters.from_account.private_key) # type: ignore[no-untyped-call]
suggested_params = parameters.suggested_params or client.suggested_params()
xfer_txn = AssetTransferTxn(
sp=suggested_params,
sender=sender,
receiver=params.to_address,
close_assets_to=None,
revocation_target=params.clawback_from,
amt=params.amount,
note=params.note,
index=params.asset_id,
rekey_to=None,
) # type: ignore[no-untyped-call]
result = _send_transaction(client=client, transaction=xfer_txn, parameters=params)
assert isinstance(result, AssetTransferTxn)
return result
def _send_transaction(
client: "AlgodClient",
transaction: PaymentTxn | AssetTransferTxn,
parameters: TransferAssetParameters | TransferParameters,
) -> PaymentTxn | AssetTransferTxn:
if parameters.fee_micro_algos:
transaction.fee = parameters.fee_micro_algos
if parameters.suggested_params is not None and not parameters.suggested_params.flat_fee:
_check_fee(transaction, parameters.max_fee_micro_algos)
signed_transaction = transaction.sign(parameters.from_account.private_key) # type: ignore[no-untyped-call]
client.send_transaction(signed_transaction)
txid = transaction.get_txid() # type: ignore[no-untyped-call]
logger.debug(
f"Sent transaction {txid} type={transaction.type} from "
f"{address_from_private_key(parameters.from_account.private_key)}" # type: ignore[no-untyped-call]
)
return transaction
| algorandfoundation/algokit-utils-py | src/algokit_utils/_transfer.py | _transfer.py | py | 5,947 | python | en | code | 4 | github-code | 13 |
40104755522 | """The module wallet.accounting.balance_sheet test the BalanceSheet implementation."""
from datetime import datetime
from zeppelin_cash.accounting.america import usd
from zeppelin_cash.accounting.balance_sheet import BalanceSheet
from zeppelin_cash.accounting.money import Money
def test_balance_sheet_init() -> None:
"""Test a BalanceSheet instance can be created."""
sheet = BalanceSheet(datetime.now())
assert sheet.cash.quantity() == 0
assert sheet.current_assets().quantity() == 0
def test_current_assets() -> None:
"""Test that current assets can be calculated."""
sheet = BalanceSheet(datetime.now())
sheet.cash = Money(100, usd())
sheet.accounts_receivable = Money(15, usd())
sheet.inventory = Money(10, usd())
sheet.prepaid_expenses = Money(11, usd())
assert sheet.current_assets().quantity() == 136
def test_net_fixed_assets() -> None:
"""Test that net fixed assets can be calculated."""
sheet = BalanceSheet(datetime.now())
sheet.fixed_assets_at_cost = Money(100, usd())
sheet.accumulated_depreciation = Money(10, usd())
assert sheet.net_fixed_assets().quantity() == 90
def test_total_assets() -> None:
"""Test that total assets can be calculated."""
sheet = BalanceSheet(datetime.now())
# current assets
sheet.cash = Money(100, usd())
sheet.accounts_receivable = Money(15, usd())
sheet.inventory = Money(10, usd())
sheet.prepaid_expenses = Money(11, usd())
# net fixed assets
sheet.fixed_assets_at_cost = Money(100, usd())
sheet.accumulated_depreciation = Money(10, usd())
# other assets
sheet.other_assets = Money(12, usd())
assert sheet.total_assets().quantity() == 238
def test_current_liabilities() -> None:
"""Test that the current liabilities can be calculated."""
sheet = BalanceSheet(datetime.now())
sheet.accounts_payable = Money(100, usd())
sheet.accrued_expenses = Money(50, usd())
sheet.current_portion_of_debt = Money(25, usd())
sheet.income_taxes_payable = Money(10, usd())
assert sheet.current_liabilities().quantity() == 185
def test_shareholders_equity() -> None:
"""Test that shareholders equity can be calculated."""
sheet = BalanceSheet(datetime.now())
sheet.capital_stock = Money(10, usd())
sheet.retained_earnings = Money(15, usd())
assert sheet.shareholders_equity().quantity() == 25
def test_total_liabilities_and_equity() -> None:
"""Test that total liabilities and equity can be calculated."""
sheet = BalanceSheet(datetime.now())
# current liabilities
sheet.accounts_payable = Money(100, usd())
sheet.accrued_expenses = Money(50, usd())
sheet.current_portion_of_debt = Money(25, usd())
sheet.income_taxes_payable = Money(10, usd())
# long term debt
sheet.long_term_debt = Money(22, usd())
# shareholder's equity
sheet.capital_stock = Money(10, usd())
sheet.retained_earnings = Money(15, usd())
assert sheet.total_liabilities_and_equity().quantity() == 232
def test_capital_employed() -> None:
"""Test that capital employed can be calculated."""
sheet = BalanceSheet(datetime.now())
# current assets
sheet.cash = Money(100, usd())
sheet.accounts_receivable = Money(15, usd())
sheet.inventory = Money(10, usd())
sheet.prepaid_expenses = Money(11, usd())
# current liabilities
sheet.accounts_payable = Money(100, usd())
sheet.accrued_expenses = Money(50, usd())
sheet.current_portion_of_debt = Money(25, usd())
sheet.income_taxes_payable = Money(10, usd())
assert sheet.capital_employed().quantity() == -49
def test_adding_balance_sheet() -> None:
"""Test that balance sheet instances can be added."""
now = datetime.now()
sheet1 = BalanceSheet(now)
sheet1.cash = Money(100, usd())
sheet2 = BalanceSheet(now)
sheet2.inventory = Money(500, usd())
total = sheet1 + sheet2
assert total.total_assets().quantity() == 600
def test_balance_sheet_as_string() -> None:
"""Test that balance sheet can be represented as a string."""
sheet = BalanceSheet(datetime.now())
sheet.cash = Money(100, usd())
sheet.inventory = Money(150, usd())
assert "Cash: {}".format(sheet.cash) in str(sheet)
assert "Inventory: {}".format(sheet.inventory) in str(sheet)
| GeorgeSaussy/zeplin_cash | src/zeppelin_cash/accounting/balance_sheet_test.py | balance_sheet_test.py | py | 4,319 | python | en | code | 0 | github-code | 13 |
4512128210 | #
# @lc app=leetcode.cn id=84 lang=python
#
# [84] 柱状图中最大的矩形
#
# https://leetcode-cn.com/problems/largest-rectangle-in-histogram/description/
#
# algorithms
# Hard (42.74%)
# Likes: 1213
# Dislikes: 0
# Total Accepted: 126.1K
# Total Submissions: 294.1K
# Testcase Example: '[2,1,5,6,2,3]'
#
# 给定 n 个非负整数,用来表示柱状图中各个柱子的高度。每个柱子彼此相邻,且宽度为 1 。
#
# 求在该柱状图中,能够勾勒出来的矩形的最大面积。
#
#
#
#
#
# 以上是柱状图的示例,其中每个柱子的宽度为 1,给定的高度为 [2,1,5,6,2,3]。
#
#
#
#
#
# 图中阴影部分为所能勾勒出的最大矩形面积,其面积为 10 个单位。
#
#
#
# 示例:
#
# 输入: [2,1,5,6,2,3]
# 输出: 10
#
#
# @lc code=start
class Solution(object):
# 这里得到一个区域里面的最大矩形面积
# 这个区间域为[b, e)
# 注意e是取不到的
def getRangeMaxArea(self, heights, b, e):
# 如果为空区间
if b >= e:
return 0
# 如果区间中只有一个元素
if b + 1 == e:
return heights[b]
# 如果有多个元素。那么找到范围里面的最小值
# 如果有多个最小值,那么我们就找离中心最近的那个,尽量把区域进行等分
mid = b + ((e-b) >> 1)
minIndex = b
for i in range(b + 1, e):
if heights[i] < heights[minIndex]:
minIndex = i
elif (heights[i] == heights[minIndex]):
# 多个最小值,那么谁离mid更近,我们用谁
if abs(mid - i) < abs(mid - minIndex):
minIndex = i
# 在使用 最小值 情况下的面积
useMinIndexArea = heights[minIndex] * (e - b)
# 不用 minIndex 那么就会把区间分为两部分
leftMaxArea = self.getRangeMaxArea(heights, b, minIndex)
rightMaxArea = self.getRangeMaxArea(heights, minIndex + 1, e)
return max(useMinIndexArea, max(leftMaxArea, rightMaxArea))
def largestRectangleArea(self, A):
N = 0 if not A else len(A)
return self.getRangeMaxArea(A, 0, N)
# @lc code=end
| lagoueduCol/Algorithm-Dryad | 16.Rectangle/84.柱状图中最大的矩形.dq.py | 84.柱状图中最大的矩形.dq.py | py | 2,250 | python | zh | code | 134 | github-code | 13 |
74174163539 | import socket
import numpy as np
socketUDP = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
transmissor = ("127.0.0.1", 2020)
receptor = ("127.0.0.1", 3030)
socketUDP.bind(receptor)
buff_size = 10000
sequence = 0
def rdt_rcv():
while True:
message, source = socketUDP.recvfrom(buff_size)
if source == transmissor:
print("Pacote recebido, checando se é corrupto...")
if source.is_not_corrupt(source):
print("não é corrupto")
sequence, source = source.extract(message)
if sequence == sequence:
print("Numero de sequencia esperado, Reenviando pacotes..", sequence)
sequence ^= sequence
return np.frombuffer(message, dtype=np.uint16)
else:
print("Pacote duplicado")
socketUDP.sendto(transmissor(b"ACK"),buff_size)
else:
print("Pacote corrupto")
socketUDP.sendto(transmissor(b"NAK"), buff_size)
if __name__ == "__main__":
rcvpkt = rdt_rcv()
while True:
rcvpkt.rdt_rcv()
print(f"Dados recebidos {rcvpkt}")
| gabrigabe/pythonredes | receiver.py | receiver.py | py | 1,207 | python | en | code | 0 | github-code | 13 |
31235876639 | def homework_4(Str): # 请同学记得把档案名称改成自己的学号(ex.1104813.py)
if len(Str) < 100:
if len(Str)<2: #若字串长度小于2(即字串中只有一个字或没有字)则符合回文条件
return True
if Str[0]!=Str[-1]: #检查字串头尾是否相同
return False
else:
return homework_4(Str[1:-1]) #若字串头尾相同,则删除头尾再执行一次function
else:
reverse_string = ''.join(reversed(Str[-100:]))
if Str[0:100] != reverse_string:
return False
else:
return homework_4(Str[100:-100])
if __name__ == '__main__':
Str = "abba"
print(homework_4(Str))
| daniel880423/Member_System | file/hw4/1100415/hw4_s1100415_3.py | hw4_s1100415_3.py | py | 740 | python | zh | code | 0 | github-code | 13 |
29825019954 | import copy
import math
import os
import pickle
import random
import re
import time
import zipfile
import requests
import torch
import unicodedata
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import numpy as np
def download_dataset(url, save_file_name, save_folder):
if not os.path.isfile(save_file_name):
with open(save_file_name, "wb") as target:
target.write(requests.get(url).content)
with zipfile.ZipFile(save_file_name, 'r') as zip_ref:
zip_ref.extractall(save_folder)
os.remove(save_file_name)
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def unicodeToAscii(s):
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def normalizeString(s):
# Lowercase, trim, and remove non-letter characters
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def readLangs(lang1, lang2, file_path, reverse=False):
# print("Reading lines...")
# Read the file and split into lines
lines = open(file_path + '%s-%s.txt' % (lang1, lang2), encoding='utf-8'). \
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
def filterPair(p, MAX_LENGTH, eng_prefixes):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[1].startswith(eng_prefixes)
def filterPairs(pairs, MAX_LENGTH, eng_prefixes):
return [pair for pair in pairs if filterPair(pair, MAX_LENGTH, eng_prefixes)]
def prepareData(lang1, lang2, MAX_LENGTH, eng_prefixes, file_path, reverse=False):
# 1. Read text file and split into lines, split lines into pairs
# 2. Normalize text, filter by length and content
# 3. Make word lists from sentences in pairs
input_lang, output_lang, pairs = readLangs(lang1, lang2, file_path, reverse)
# print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs, MAX_LENGTH, eng_prefixes)
# print("Trimmed to %s sentence pairs" % len(pairs))
# print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
# print("Counted words:")
# print(input_lang.name, input_lang.n_words)
# print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence, EOS_token, device):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(pair, input_lang, output_lang, EOS_token, device):
input_tensor = tensorFromSentence(input_lang, pair[0], EOS_token, device)
target_tensor = tensorFromSentence(output_lang, pair[1], EOS_token, device)
return input_tensor, target_tensor
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length,
device, SOS_token, EOS_token, teacher_forcing_ratio, saved_model_device,
encoder_path, decoder_path):
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_tensor.size(0)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[ei])
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, _, decoder_attention = decoder(decoder_input, encoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, _, decoder_attention = decoder(decoder_input, encoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
def trainIters(encoder, decoder, n_iters, input_lang, output_lang, SOS_token, EOS_token, device, encoder_optimizer,
decoder_optimizer, criterion, teacher_forcing_ratio, max_length, pairs, saved_model_device,
encoder_path, decoder_path, print_every=1000, plot_every=100):
# 1. Start a timer
# 2. Initialize optimizers and criterion
# 3. Create set of training pairs
# 4. Start empty losses array for plotting
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
least_loss_avg = np.inf
encoder_optimizer = encoder_optimizer(encoder.parameters())
decoder_optimizer = decoder_optimizer(decoder.parameters())
training_pairs = [tensorsFromPair(random.choice(pairs), input_lang, output_lang, EOS_token, device)
for i in range(n_iters)]
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_tensor = training_pair[0]
target_tensor = training_pair[1]
loss = train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion,
max_length, device, SOS_token, EOS_token, teacher_forcing_ratio, saved_model_device,
encoder_path, decoder_path)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
# Save best model
if print_loss_avg <= least_loss_avg:
least_loss_avg = print_loss_avg
print("Pickling in progress ...")
best_decoder = copy.deepcopy(decoder)
best_encoder = copy.deepcopy(encoder)
best_decoder.to(saved_model_device)
best_encoder.to(saved_model_device)
with open(decoder_path, "wb") as f:
pickle.dump(best_decoder, f)
with open(encoder_path, "wb") as f:
pickle.dump(best_encoder, f)
print("Pickling Done!")
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
def evaluate(encoder, decoder, input_lang, output_lang, EOS_token, SOS_token, device, sentence, max_length):
with torch.no_grad():
input_tensor = tensorFromSentence(input_lang, sentence, EOS_token, device)
input_length = input_tensor.size()[0]
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[ei])
encoder_outputs[ei] += encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device) # SOS
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, _, decoder_attention = decoder(decoder_input, encoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
if topi.item() == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[topi.item()])
decoder_input = topi.squeeze().detach()
return decoded_words, decoder_attentions[:di + 1]
def evaluateRandomly(encoder, decoder, pairs, input_lang, output_lang, EOS_token, SOS_token, device, max_length, n=10):
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, input_lang, output_lang, EOS_token, SOS_token,
device, pair[0], max_length)
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
def showAttention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluateAndShowAttention(input_lang, encoder1, attn_decoder1, output_lang, EOS_token, SOS_token, device,
sentence, max_length):
output_words, attentions = evaluate(encoder1, attn_decoder1, input_lang, output_lang, EOS_token, SOS_token, device,
sentence, max_length)
print('input =', sentence)
print('output =', ' '.join(output_words))
showAttention(sentence, output_words, attentions)
| anthony-chukwuemeka-nwachukwu/Translation | preprocess.py | preprocess.py | py | 11,717 | python | en | code | 0 | github-code | 13 |
70527727057 | import re, string, unicodedata
import nltk
import contractions
import inflect
from bs4 import BeautifulSoup
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
def strip_html(text):
soup = BeautifulSoup(text, "html.parser")
return soup.get_text()
def remove_between_square_brackets(text):
return re.sub('\[[^]]*\]', '', text)
def replace_contractions(text):
"""Replace contractions in string of text"""
return contractions.fix(text)
def denoise_text(text):
text = strip_html(text)
text = remove_between_square_brackets(text)
text = replace_contractions(text)
return text
def removeStopword(str):
stop_words = set(stopwords.words('stopwords_id'))
word_tokens = word_tokenize(str)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
return ' '.join(filtered_sentence)
def stemming(str):
factory = StemmerFactory()
stemmer = factory.create_stemmer()
return stemmer.stem(str)
def cleaning(str):
#remove non-ascii
str = unicodedata.normalize('NFKD', str).encode('ascii', 'ignore').decode('utf-8', 'ignore')
#remove URLs
str = re.sub(r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))', '', str)
#remove punctuations
str = re.sub(r'[^\w]|_',' ',str)
#remove digit from string
str = re.sub("\S*\d\S*", "", str).strip()
#remove digit or numbers
str = re.sub(r"\b\d+\b", " ", str)
#to lowercase
str = str.lower()
#Remove additional white spaces
str = re.sub('[\s]+', ' ', str)
return str
def preprocessing(str):
str = denoise_text(str)
str = cleaning(str)
str = removeStopword(str)
str = stemming(str)
return str
#test the code
str = "Saya membeli buku dengan Copyright © 2008 John Wiley & Sons, Ltd."
print(preprocessing(str))
if __name__ == "__main__":
fo = open("news.txt","r")
fw = open("clean_data.txt","a")
for f in fo:
str = preprocessing(f)
fw.write(str+'\n')
| Yuriowindiatmoko2401/tugas-text-analytics-1 | preprocessing.py | preprocessing.py | py | 2,212 | python | en | code | 0 | github-code | 13 |
21051238045 | T = int(input())
coin_types = [50000, 10000, 5000, 1000, 500, 100, 50, 10]
for t in range(1, T+1):
n = int(input())
result = []
for coin in coin_types:
result.append(n//coin)
n %= coin
print(f'#{t}')
for i in result:
print(i, end=' ')
print()
| jinho9610/py_algo | sw_academy/1970.py | 1970.py | py | 293 | python | en | code | 0 | github-code | 13 |
8452969954 | from z3 import *
def display(board):
for i in range(len(board)):
print(board[i])
def intialize():
n = int(input("Please input an int: "))
board = []
for i in range(n):
temp = []
for x in range(n):
temp.append(0)
board.append(temp)
return board
def main():
mySolver = Solver()
board = intialize()
#################################
#Our code goes here
#################################
if __name__ == "__main__":
main()
| GreyLight02/CSE-260-Project | Main.py | Main.py | py | 520 | python | en | code | 0 | github-code | 13 |
17051536314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InsPeriodDTO import InsPeriodDTO
from alipay.aop.api.domain.EcomLogisticsOrderDTO import EcomLogisticsOrderDTO
from alipay.aop.api.domain.PayOrderDTO import PayOrderDTO
from alipay.aop.api.domain.EcomSubOrderDTO import EcomSubOrderDTO
class EcomOrderDTO(object):
def __init__(self):
self._actual_pay_fee = None
self._attributes = None
self._buy_amount = None
self._buyer_id = None
self._buyer_nick = None
self._charge_duration = None
self._charge_guarantee_plan_type = None
self._credit_deposit_money = None
self._discount_fee = None
self._ext_info = None
self._gmt_create = None
self._item_id = None
self._item_pict_url = None
self._item_price = None
self._item_title = None
self._item_total_value = None
self._logistics_order = None
self._main_order_id = None
self._order_fee = None
self._order_id = None
self._order_type = None
self._pay_order = None
self._post_fee = None
self._seller_id = None
self._seller_nick = None
self._sub_order_list = None
self._trade_days = None
self._trade_end_time = None
self._trade_start_time = None
@property
def actual_pay_fee(self):
return self._actual_pay_fee
@actual_pay_fee.setter
def actual_pay_fee(self, value):
self._actual_pay_fee = value
@property
def attributes(self):
return self._attributes
@attributes.setter
def attributes(self, value):
self._attributes = value
@property
def buy_amount(self):
return self._buy_amount
@buy_amount.setter
def buy_amount(self, value):
self._buy_amount = value
@property
def buyer_id(self):
return self._buyer_id
@buyer_id.setter
def buyer_id(self, value):
self._buyer_id = value
@property
def buyer_nick(self):
return self._buyer_nick
@buyer_nick.setter
def buyer_nick(self, value):
self._buyer_nick = value
@property
def charge_duration(self):
return self._charge_duration
@charge_duration.setter
def charge_duration(self, value):
if isinstance(value, InsPeriodDTO):
self._charge_duration = value
else:
self._charge_duration = InsPeriodDTO.from_alipay_dict(value)
@property
def charge_guarantee_plan_type(self):
return self._charge_guarantee_plan_type
@charge_guarantee_plan_type.setter
def charge_guarantee_plan_type(self, value):
self._charge_guarantee_plan_type = value
@property
def credit_deposit_money(self):
return self._credit_deposit_money
@credit_deposit_money.setter
def credit_deposit_money(self, value):
self._credit_deposit_money = value
@property
def discount_fee(self):
return self._discount_fee
@discount_fee.setter
def discount_fee(self, value):
self._discount_fee = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def item_pict_url(self):
return self._item_pict_url
@item_pict_url.setter
def item_pict_url(self, value):
self._item_pict_url = value
@property
def item_price(self):
return self._item_price
@item_price.setter
def item_price(self, value):
self._item_price = value
@property
def item_title(self):
return self._item_title
@item_title.setter
def item_title(self, value):
self._item_title = value
@property
def item_total_value(self):
return self._item_total_value
@item_total_value.setter
def item_total_value(self, value):
self._item_total_value = value
@property
def logistics_order(self):
return self._logistics_order
@logistics_order.setter
def logistics_order(self, value):
if isinstance(value, EcomLogisticsOrderDTO):
self._logistics_order = value
else:
self._logistics_order = EcomLogisticsOrderDTO.from_alipay_dict(value)
@property
def main_order_id(self):
return self._main_order_id
@main_order_id.setter
def main_order_id(self, value):
self._main_order_id = value
@property
def order_fee(self):
return self._order_fee
@order_fee.setter
def order_fee(self, value):
self._order_fee = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, value):
self._order_type = value
@property
def pay_order(self):
return self._pay_order
@pay_order.setter
def pay_order(self, value):
if isinstance(value, PayOrderDTO):
self._pay_order = value
else:
self._pay_order = PayOrderDTO.from_alipay_dict(value)
@property
def post_fee(self):
return self._post_fee
@post_fee.setter
def post_fee(self, value):
self._post_fee = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def seller_nick(self):
return self._seller_nick
@seller_nick.setter
def seller_nick(self, value):
self._seller_nick = value
@property
def sub_order_list(self):
return self._sub_order_list
@sub_order_list.setter
def sub_order_list(self, value):
if isinstance(value, list):
self._sub_order_list = list()
for i in value:
if isinstance(i, EcomSubOrderDTO):
self._sub_order_list.append(i)
else:
self._sub_order_list.append(EcomSubOrderDTO.from_alipay_dict(i))
@property
def trade_days(self):
return self._trade_days
@trade_days.setter
def trade_days(self, value):
self._trade_days = value
@property
def trade_end_time(self):
return self._trade_end_time
@trade_end_time.setter
def trade_end_time(self, value):
self._trade_end_time = value
@property
def trade_start_time(self):
return self._trade_start_time
@trade_start_time.setter
def trade_start_time(self, value):
self._trade_start_time = value
def to_alipay_dict(self):
params = dict()
if self.actual_pay_fee:
if hasattr(self.actual_pay_fee, 'to_alipay_dict'):
params['actual_pay_fee'] = self.actual_pay_fee.to_alipay_dict()
else:
params['actual_pay_fee'] = self.actual_pay_fee
if self.attributes:
if hasattr(self.attributes, 'to_alipay_dict'):
params['attributes'] = self.attributes.to_alipay_dict()
else:
params['attributes'] = self.attributes
if self.buy_amount:
if hasattr(self.buy_amount, 'to_alipay_dict'):
params['buy_amount'] = self.buy_amount.to_alipay_dict()
else:
params['buy_amount'] = self.buy_amount
if self.buyer_id:
if hasattr(self.buyer_id, 'to_alipay_dict'):
params['buyer_id'] = self.buyer_id.to_alipay_dict()
else:
params['buyer_id'] = self.buyer_id
if self.buyer_nick:
if hasattr(self.buyer_nick, 'to_alipay_dict'):
params['buyer_nick'] = self.buyer_nick.to_alipay_dict()
else:
params['buyer_nick'] = self.buyer_nick
if self.charge_duration:
if hasattr(self.charge_duration, 'to_alipay_dict'):
params['charge_duration'] = self.charge_duration.to_alipay_dict()
else:
params['charge_duration'] = self.charge_duration
if self.charge_guarantee_plan_type:
if hasattr(self.charge_guarantee_plan_type, 'to_alipay_dict'):
params['charge_guarantee_plan_type'] = self.charge_guarantee_plan_type.to_alipay_dict()
else:
params['charge_guarantee_plan_type'] = self.charge_guarantee_plan_type
if self.credit_deposit_money:
if hasattr(self.credit_deposit_money, 'to_alipay_dict'):
params['credit_deposit_money'] = self.credit_deposit_money.to_alipay_dict()
else:
params['credit_deposit_money'] = self.credit_deposit_money
if self.discount_fee:
if hasattr(self.discount_fee, 'to_alipay_dict'):
params['discount_fee'] = self.discount_fee.to_alipay_dict()
else:
params['discount_fee'] = self.discount_fee
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.item_pict_url:
if hasattr(self.item_pict_url, 'to_alipay_dict'):
params['item_pict_url'] = self.item_pict_url.to_alipay_dict()
else:
params['item_pict_url'] = self.item_pict_url
if self.item_price:
if hasattr(self.item_price, 'to_alipay_dict'):
params['item_price'] = self.item_price.to_alipay_dict()
else:
params['item_price'] = self.item_price
if self.item_title:
if hasattr(self.item_title, 'to_alipay_dict'):
params['item_title'] = self.item_title.to_alipay_dict()
else:
params['item_title'] = self.item_title
if self.item_total_value:
if hasattr(self.item_total_value, 'to_alipay_dict'):
params['item_total_value'] = self.item_total_value.to_alipay_dict()
else:
params['item_total_value'] = self.item_total_value
if self.logistics_order:
if hasattr(self.logistics_order, 'to_alipay_dict'):
params['logistics_order'] = self.logistics_order.to_alipay_dict()
else:
params['logistics_order'] = self.logistics_order
if self.main_order_id:
if hasattr(self.main_order_id, 'to_alipay_dict'):
params['main_order_id'] = self.main_order_id.to_alipay_dict()
else:
params['main_order_id'] = self.main_order_id
if self.order_fee:
if hasattr(self.order_fee, 'to_alipay_dict'):
params['order_fee'] = self.order_fee.to_alipay_dict()
else:
params['order_fee'] = self.order_fee
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.order_type:
if hasattr(self.order_type, 'to_alipay_dict'):
params['order_type'] = self.order_type.to_alipay_dict()
else:
params['order_type'] = self.order_type
if self.pay_order:
if hasattr(self.pay_order, 'to_alipay_dict'):
params['pay_order'] = self.pay_order.to_alipay_dict()
else:
params['pay_order'] = self.pay_order
if self.post_fee:
if hasattr(self.post_fee, 'to_alipay_dict'):
params['post_fee'] = self.post_fee.to_alipay_dict()
else:
params['post_fee'] = self.post_fee
if self.seller_id:
if hasattr(self.seller_id, 'to_alipay_dict'):
params['seller_id'] = self.seller_id.to_alipay_dict()
else:
params['seller_id'] = self.seller_id
if self.seller_nick:
if hasattr(self.seller_nick, 'to_alipay_dict'):
params['seller_nick'] = self.seller_nick.to_alipay_dict()
else:
params['seller_nick'] = self.seller_nick
if self.sub_order_list:
if isinstance(self.sub_order_list, list):
for i in range(0, len(self.sub_order_list)):
element = self.sub_order_list[i]
if hasattr(element, 'to_alipay_dict'):
self.sub_order_list[i] = element.to_alipay_dict()
if hasattr(self.sub_order_list, 'to_alipay_dict'):
params['sub_order_list'] = self.sub_order_list.to_alipay_dict()
else:
params['sub_order_list'] = self.sub_order_list
if self.trade_days:
if hasattr(self.trade_days, 'to_alipay_dict'):
params['trade_days'] = self.trade_days.to_alipay_dict()
else:
params['trade_days'] = self.trade_days
if self.trade_end_time:
if hasattr(self.trade_end_time, 'to_alipay_dict'):
params['trade_end_time'] = self.trade_end_time.to_alipay_dict()
else:
params['trade_end_time'] = self.trade_end_time
if self.trade_start_time:
if hasattr(self.trade_start_time, 'to_alipay_dict'):
params['trade_start_time'] = self.trade_start_time.to_alipay_dict()
else:
params['trade_start_time'] = self.trade_start_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = EcomOrderDTO()
if 'actual_pay_fee' in d:
o.actual_pay_fee = d['actual_pay_fee']
if 'attributes' in d:
o.attributes = d['attributes']
if 'buy_amount' in d:
o.buy_amount = d['buy_amount']
if 'buyer_id' in d:
o.buyer_id = d['buyer_id']
if 'buyer_nick' in d:
o.buyer_nick = d['buyer_nick']
if 'charge_duration' in d:
o.charge_duration = d['charge_duration']
if 'charge_guarantee_plan_type' in d:
o.charge_guarantee_plan_type = d['charge_guarantee_plan_type']
if 'credit_deposit_money' in d:
o.credit_deposit_money = d['credit_deposit_money']
if 'discount_fee' in d:
o.discount_fee = d['discount_fee']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'item_id' in d:
o.item_id = d['item_id']
if 'item_pict_url' in d:
o.item_pict_url = d['item_pict_url']
if 'item_price' in d:
o.item_price = d['item_price']
if 'item_title' in d:
o.item_title = d['item_title']
if 'item_total_value' in d:
o.item_total_value = d['item_total_value']
if 'logistics_order' in d:
o.logistics_order = d['logistics_order']
if 'main_order_id' in d:
o.main_order_id = d['main_order_id']
if 'order_fee' in d:
o.order_fee = d['order_fee']
if 'order_id' in d:
o.order_id = d['order_id']
if 'order_type' in d:
o.order_type = d['order_type']
if 'pay_order' in d:
o.pay_order = d['pay_order']
if 'post_fee' in d:
o.post_fee = d['post_fee']
if 'seller_id' in d:
o.seller_id = d['seller_id']
if 'seller_nick' in d:
o.seller_nick = d['seller_nick']
if 'sub_order_list' in d:
o.sub_order_list = d['sub_order_list']
if 'trade_days' in d:
o.trade_days = d['trade_days']
if 'trade_end_time' in d:
o.trade_end_time = d['trade_end_time']
if 'trade_start_time' in d:
o.trade_start_time = d['trade_start_time']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/EcomOrderDTO.py | EcomOrderDTO.py | py | 16,934 | python | en | code | 241 | github-code | 13 |
23152453435 | # -*- coding: utf-8 -*-
"""
Averages the results per fold over all folds and stores them in a new csv file.
Created on Tue Feb 2 10:29:48 2021
@author: lbechberger
"""
import argparse
from code.util import read_csv_results_files, write_csv_results_file
parser = argparse.ArgumentParser(description='Average fold results')
parser.add_argument('input_files', help = 'csv files containing the individual fold results')
parser.add_argument('folds', type = int, help = 'number of folds')
parser.add_argument('output_file', help = 'csv file for the aggregated results')
args = parser.parse_args()
headline, content = read_csv_results_files(args.input_files, range(args.folds), ['regressor', 'targets'], 'regressor')
write_csv_results_file(args.output_file, headline, content, 'regressor') | lbechberger/LearningPsychologicalSpaces | code/ml/regression/average_folds.py | average_folds.py | py | 788 | python | en | code | 10 | github-code | 13 |
41745841225 | import pandas as pd
pd.set_option("display.max_columns", 500)
pd.set_option("display.expand_frame_repr", False)
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
from scipy.stats import gaussian_kde
sys.path.insert(1, os.path.abspath("../ionsrcopt"))
import load_data as ld
from source_features import SourceFeatures
from processing_features import ProcessingFeatures
def main():
######################
###### SETTINGS ######
######################
clustered_data_folder = "Data_Clustered/" # Base folder of clustered data
filename = "JanNov2018.csv" # The file to load
source_stability = 1 # 1 if we want to look at a stable source, 0 else
cluster = 51 # The cluster to plot or None if you want to plot all data
features = [
SourceFeatures.BIASDISCAQNV,
SourceFeatures.GASAQN,
SourceFeatures.OVEN1AQNP,
SourceFeatures.SAIREM2_FORWARDPOWER,
SourceFeatures.SOLINJ_CURRENT,
SourceFeatures.SOLCEN_CURRENT,
SourceFeatures.SOLEXT_CURRENT,
SourceFeatures.SOURCEHTAQNI,
SourceFeatures.BCT25_CURRENT,
] # Features to be displayed
normalize = True # Do we want to standard scale the data?
bandwidth = np.array(
[0.014, 0.011, 0.014, 0.014, 0.014, 0.014, 0.014, 0.014, 0.014]
) # bandwidth for unnormalized data
# bandwidth = 0.02
######################
######## CODE ########
######################
# Load file into a data frame
path = clustered_data_folder + filename
df = ld.read_data_from_csv(path, None, None)
df = ld.fill_columns(df, None, fill_nan_with_zeros=True)
df = ld.convert_column_types(df)
df = df.loc[df[ProcessingFeatures.SOURCE_STABILITY] == source_stability, :].copy()
total_duration = df[ProcessingFeatures.DATAPOINT_DURATION].sum()
data = df[features].values
weights = df[ProcessingFeatures.DATAPOINT_DURATION].values
if normalize:
# data = (data - np.mean(data, axis=0)) / np.std(data, axis=0) #Standard scaling
# data = (data - np.min(data, axis=0)) / (np.max(data, axis=0) - np.min(data, axis=0)) #MinMax scaling
# data = data / np.max(np.absolute(data), axis=0) #Max scaling
data = (data - np.median(data, axis=0)) / (
np.quantile(data, q=0.9, axis=0) - np.quantile(data, q=0.1, axis=0)
) # Robust scaler
if cluster is not None:
data = data[df[ProcessingFeatures.CLUSTER] == cluster]
weights = weights[df[ProcessingFeatures.CLUSTER] == cluster]
resolution = 5000
# if cluster is not None:
# bandwidth *= 0.2
num_kde_samples = 40000
cluster_duration = np.sum(weights)
percentage_of_values = cluster_duration / total_duration
plot_cluster(
data,
weights,
features,
feature_ranges=None,
median=None,
resolution=resolution,
bandwidth=bandwidth,
num_kde_samples=num_kde_samples,
cluster=cluster,
percentage_of_values=percentage_of_values,
)
def plot_cluster(
data,
weights,
features,
feature_ranges,
median,
resolution,
bandwidth,
num_kde_samples,
cluster,
percentage_of_values,
):
if isinstance(bandwidth, float):
bandwidth = [bandwidth for i in range(len(features))]
fig, ax = plt.subplots(len(features), 1, sharex=False)
for i, feature in enumerate(features):
grid, kde = estimate_distribution(
data,
weights,
i,
resolution,
bandwidth=bandwidth[i],
num_kde_samples=num_kde_samples,
percentage_of_values=percentage_of_values,
)
ax[i].set_title("{}".format(feature))
ax[i].tick_params(axis="both", which="major")
if feature_ranges:
ax[i].set_xlim(*feature_ranges[i])
# ax.set_ylim(*feature_ranges[i][1])
if median is not None:
ax[i].axvline(x=median[i], color="red")
ax[i].grid(True)
ax[i].plot(grid, kde)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
fig.suptitle("Densities of specified features of cluster {}".format(cluster))
plt.subplots_adjust(
left=0.05, bottom=0.05, right=0.95, top=0.93, wspace=None, hspace=0.4
)
plt.show()
def estimate_distribution(
data,
weights,
current_dimension,
num_steps,
bandwidth=0.1,
num_kde_samples=15000,
percentage_of_values=1,
):
sample_size = min(num_kde_samples, len(data))
sample = np.random.randint(0, len(data), size=sample_size)
datapoints = data[sample, current_dimension]
weights_sample = None
if not weights is None:
weights_sample = weights[sample]
min_val = np.amin(datapoints)
max_val = np.amax(datapoints)
grid = np.linspace(min_val, max_val, num_steps)
kde = gaussian_kde(
dataset=datapoints,
bw_method=bandwidth / np.std(datapoints, axis=0),
weights=weights_sample,
)
dens = kde.evaluate(grid)
return grid, dens * percentage_of_values
if __name__ == "__main__":
main()
| mihailescum/ionsrcopt | visualization/cluster_distributions.py | cluster_distributions.py | py | 5,185 | python | en | code | 0 | github-code | 13 |
9771088976 | class Solution:
def hIndex(self, citations: List[int]) -> int:
n= len(citations)
left = 0
right = n - 1
maximum = float(-inf)
while left <= right:
mid = (right - left)//2 + left
if citations[mid] <= n - mid:
left = mid + 1
maximum= max(maximum, citations[mid])
else:
right = mid -1
return max(maximum , n - left)
| Matiyas1994/A2svcompitative-Programming | Camp Progress sheet/275. H-Index II.py | 275. H-Index II.py | py | 511 | python | en | code | 2 | github-code | 13 |
33251891840 | #tarjan 算法
#https://blog.csdn.net/jeryjeryjery/article/details/52829142?locationNum=4&fps=1
#求任意顶点开始的联通图 有且仅存在一个 且dfn[u] == low[u]
from collections import OrderedDict
matric = [[0,1,1,0,0,0],[0,0,0,1,0,0],[0,0,0,1,1,0],[1,0,0,0,0,1],[0,0,0,0,0,1],[0,0,0,0,0,0]]
dfn = OrderedDict()
low = OrderedDict()
flag = dict()
count = 0
n = 6
num = 0
class Stack(object):
def __init__(self):
self.items = list()
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def clear(self):
del self.items[:]
def empty(self):
return self.size() == 0
def size(self):
return len(self.items)
def top(self):
return self.items[self.size() - 1]
s = Stack()
def tarjan(u):
global s,num,n,count,flag,stack,dfn,low,matric
count = count + 1
dfn[u] = low[u] = count
s.push(u)
flag[u] = True
#print("visiting {0} ...".format(str(u + 1)))
for i in range(n):
if matric[u][i] == 0:
continue
if flag.get(i, False) is True:
if (dfn[i] < low[u]):
low[u] = dfn[i]
else:
tarjan(i)
low[u] = min(low[u], low[i])
if (dfn[u] == low[u] and s.empty() is False):
print("********连通图********")
m = s.pop()
flag[m] = False
print(m + 1)
while m != u and s.empty() is False:
num = num + 1
m = s.pop()
flag[m] = False
print(m+1)
print("*********************")
if __name__ == "__main__":
pass
tarjan(3)
print("连通图数量...")
print(num)
| donydex/Saasi-dony | 3.tarjan.py | 3.tarjan.py | py | 1,677 | python | en | code | 0 | github-code | 13 |
2354729301 | from itertools import chain
import numpy as np
import Markov_Chain
def stationery_distribution_convergence(Q, nsim):
chain = Markov_Chain.Markov_Chain(Q)
for n in range(nsim):
n = 10**(n+1)
stat_dist = chain.simulate(n)
print("After {} runs the stationary distrubution has converged to: {}.".format(n, stat_dist))
def once_s_always_s(Q):
chain = Markov_Chain.Markov_Chain(Q)
stat_dist = chain.simulate(10**5)
print("The stationary distribution of the chain after 10^5 runs is close: {}".format(stat_dist))
print("We can multiply {} and {} to receive {}".format(stat_dist, Q, stat_dist.dot(Q)))
matrix= np.array([[1/3, 2/3], [1/2, 1/2]])
#stationery_distribution_convergence(Q=matrix, nsim=5)
#once_s_always_s(Q=matrix) | Scolpe/Stat-ML | Markov_Chain/Presentations.py | Presentations.py | py | 770 | python | en | code | 0 | github-code | 13 |
39829638470 | from rest_framework import serializers
from restaurants.models import MenuItem, Restaurant
from restaurants.serializers import RestaurantSerializer
class MenuItemSerializer(serializers.ModelSerializer):
restaurant = serializers.SlugRelatedField(
slug_field='name',
queryset=Restaurant.objects.all()
)
class Meta:
model = MenuItem
fields = [
'id', 'name', 'description', 'price', 'restaurant', 'created_at'
]
read_only_fields = ['created_at',] | brightmorkli37/food_delivery | restaurants/serializers/menu_item_serializer.py | menu_item_serializer.py | py | 516 | python | en | code | 0 | github-code | 13 |
27187996913 | from collections import deque
import sys
input = sys.stdin.readline
def bfs(start):
global cnt
global check
queue = deque([start])
tmp_cnt = 1
population = arr[start[0]][start[1]]
visited[start[0]][start[1]] = cnt + 1
di, dj = [0, 1, 0, -1], [1, 0, -1, 0]
while queue:
n = queue.popleft()
for k in range(4):
ni, nj = n[0] + di[k], n[1] + dj[k]
if 0 <= ni < N and 0 <= nj < N and visited[ni][nj] == 0:
if L <= abs(arr[n[0]][n[1]] - arr[ni][nj]) <= R:
visited[ni][nj] = cnt + 1
population += arr[ni][nj]
tmp_cnt += 1
queue.append([ni, nj])
cnt += 1
def change(start):
queue_change = deque([start])
new = population // tmp_cnt
arr[start[0]][start[1]] = new
tmp_visited = [[False] * N for _ in range(N)]
tmp_visited[start[0]][start[1]] = True
while queue_change:
t = queue_change.popleft()
for k in range(4):
ni, nj = t[0] + di[k], t[1] + dj[k]
if 0 <= ni < N and 0 <= nj < N and visited[ni][nj] == cnt and tmp_visited[ni][nj] == False:
tmp_visited[ni][nj] = True
arr[ni][nj] = new
queue_change.append([ni, nj])
if tmp_cnt > 1:
change(start)
check += 1
N, L, R = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(N)]
ans = 0
while True:
visited = [[0] * N for _ in range(N)]
check = 0
cnt = 0
for i in range(N):
for j in range(N):
if visited[i][j] == 0:
bfs([i, j])
if check == 0:
break
else:
ans += 1
print(ans) | Nam4o/Algorithm | 백준/Gold/16234. 인구 이동/인구 이동.py | 인구 이동.py | py | 1,856 | python | en | code | 1 | github-code | 13 |
22102595042 | import os
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, LinearLR
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from torch.utils.tensorboard import SummaryWriter
from executors.epoch_manager import EpochManager
from configs import Config, Resnet50Config
from models import Resnet
from metrics import BalancedAccuracy
from datasets import MixUpDecorator, OverfitModeDecorator
from transforms import LabelSmoothing
from utils import split_params4weight_decay, zero_gamma_resnet, LinearStochasticDepth
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATASET_ROOT = os.path.join(ROOT, 'datasets', 'data')
cfg = Config(ROOT_DIR=ROOT, DATASET_DIR=DATASET_ROOT,
dataset_name='AlmostCifar', out_features=12,
model_name='Resnet50_tricks', device='cpu',
batch_size=1, lr=5e-4, weight_decay=5e-4, momentum=0.9,
debug=True, show_each=100,
overfit=True, seed=None)
# model
model_cfg = Resnet50Config(in_channels=3, out_features=cfg.out_features)
model = Resnet(model_cfg, stochastic_depth=LinearStochasticDepth).to(cfg.device)
keys = train_key, valid_key = 'train', 'valid'
if cfg.seed is not None:
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
torch.cuda.manual_seed(cfg.seed)
jitter_param = (0.6, 1.4)
norm = [transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.405],
std=[0.229, 0.224, 0.225])]
image_transforms = {train_key: transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=jitter_param,
saturation=jitter_param,
hue=(-.25, .25)),
*norm]),
valid_key: transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
*norm])}
target_transforms = {train_key: transforms.Compose([LabelSmoothing(cfg.out_features, alpha=0.1),
])}
datasets_dict = {k: datasets.ImageFolder(root=os.path.join(DATASET_ROOT, k),
transform=image_transforms[k] if k in image_transforms else None,
target_transform=target_transforms[k] if k in target_transforms else None)
for k in keys}
# overfit
if cfg.overfit:
shuffle = False
overfit_mode = OverfitModeDecorator(cfg.batch_size)
for key in datasets_dict.keys():
datasets_dict[key] = overfit_mode(datasets_dict[key])
else:
shuffle = True
# Add Mixup
mixup_decorator = MixUpDecorator(cfg.out_features)
datasets_dict[train_key] = mixup_decorator(datasets_dict[train_key])
dataloaders_dict = {train_key: DataLoader(datasets_dict[train_key],
batch_size=cfg.batch_size, shuffle=shuffle),
valid_key: DataLoader(datasets_dict[valid_key],
batch_size=cfg.batch_size, shuffle=shuffle)}
# zero gamma in batch norm
zero_gamma_resnet(model)
# weight decay
if cfg.weight_decay is not None:
wd_params, no_wd_params = split_params4weight_decay(model)
params = [dict(params=wd_params, weight_decay=cfg.weight_decay),
dict(params=no_wd_params)]
else:
params = model.parameters()
optimizer = optim.SGD(params, lr=cfg.lr, momentum=cfg.momentum)
criterion = nn.CrossEntropyLoss()
writer = SummaryWriter(log_dir=cfg.LOG_PATH)
metrics = [BalancedAccuracy(model_cfg.out_features),
]
metrics_dict = {train_key: metrics, valid_key: metrics}
epochs = 20
end_warmup = 4
scheduler = [LinearLR(optimizer, start_factor=0.1, total_iters=end_warmup),
CosineAnnealingLR(optimizer, epochs - end_warmup)]
class_names = datasets_dict[train_key].classes
epoch_manager = EpochManager(dataloaders_dict=dataloaders_dict, class_names=class_names,
model=model, optimizer=optimizer, criterion=criterion, cfg=cfg,
scheduler=scheduler, writer=writer, metrics=metrics_dict)
save_each = 5
for epoch in range(epochs):
if epoch == end_warmup:
epoch_manager.switch_scheduler()
epoch_manager.train(train_key, epoch)
if epoch % save_each == 0 and epoch != 0:
epoch_manager.save_model(epoch)
for i, param_group in enumerate(epoch_manager.optimizer.param_groups):
epoch_manager.writer.add_scalar(f'scheduler lr/param_group{i}',
param_group['lr'], epoch)
epoch_manager.validation(valid_key, epoch)
| Jlevan25/resnet | executors/experiment_2.py | experiment_2.py | py | 5,190 | python | en | code | 0 | github-code | 13 |
35128492329 | # github python link - https://github.com/mission-peace/interview/blob/master/python/geometry/skylinedrawing.py
# tushar roy video - https://www.youtube.com/watch?v=GSBLe8cKu0s&t=867s&ab_channel=TusharRoy-CodingMadeSimple
# build list of object to store points in sorted order according to (point, height, is_start)
# sort based on 3 edge cases: 1. both start at one point, keep higher height before smaller
# by negating both (h2 before h1) 2. both end at same point, h1 before h2 3. one start, one
# end at same point, (start before end - make start negative)
# sort the list
# create queue and result, initialize queue with key - height 0, value - 1
# go through the list to add values to the result
# add values to the queue from list
# if start object, save height as key and increase count by 1.
# if end object, decrease count by 1 for that height
# add to result whenever max curr height changes.
# use prev and curr variables to keep track of highest key in queue
# time: O(NlogN) - add, delete and finding max take logN
# space: O(logN)
class BuildingPoint(object):
def __init__(self, point, height, is_start):
self.point = point
self.height = height
self.is_start = is_start
def __lt__(self, other): # less than https://blog.finxter.com/python-__lt__-magic-method/
if self.point != other.point:
return self.point < other.point
else:
if self.is_start:
h1 = -self.height
else:
h1 = self.height
if other.is_start:
h2 = -other.height
else:
h2 = other.height
return h1 < h2
class Solution:
def getSkyline(self, buildings: List[List[int]]) -> List[List[int]]:
building_points = []
for building in buildings:
building_points.append(BuildingPoint(building[0], building[2], True))
building_points.append(BuildingPoint(building[1], building[2], False))
building_points.sort()
queue, result = {}, []
queue[0] = 1
prev_max_height = 0
for building_point in building_points:
if building_point.is_start:
if building_point.height in queue:
queue[building_point.height] += 1
else:
queue[building_point.height] = 1
else:
if queue[building_point.height] == 1:
del queue[building_point.height]
else:
queue[building_point.height] -= 1
curr_max_height = max(queue.keys())
if curr_max_height != prev_max_height:
result.append([building_point.point, curr_max_height])
prev_max_height = curr_max_height
return result
if __name__ == '__main__':
buildings = [[1, 3, 4], [3, 4, 4], [2, 6, 2], [8, 11, 4], [7, 9, 3], [10, 11, 2]]
print(get_skyline(buildings)) | aakanksha-j/LeetCode | 218. The Skyline Problem/nlogn_using_priority_queue_1.py | nlogn_using_priority_queue_1.py | py | 3,019 | python | en | code | 0 | github-code | 13 |
26617611902 | import argparse
if __name__ == '__main__':
# argument parsing to grab input file
parser = argparse.ArgumentParser(description="Process a list of binary numbers for diagnostic report")
required = parser.add_argument_group("required arguments")
required.add_argument("-i", "--input_file", help="path to the input file", required=True)
args = parser.parse_args()
if args.input_file is None:
print("No input file passed in")
exit(1)
try:
input = open(args.input_file, "r")
except:
print("Input file path '%s' is invalid" % args.input_file)
exit(1)
# go through diagnostic report
lines = input.readlines()
byte_counts = None # indexed by the bit position, number of 1s set
majority_threshold = len(lines) / 2
for line in lines:
binary_string = str(line).strip()
if byte_counts is None:
byte_counts = [0] * len(binary_string)
for i in range(len(binary_string)):
if binary_string[i] == '1':
byte_counts[len(byte_counts) - 1 - i] += 1
# construct gamma and epsilon rate
gamma = 0
epsilon = 0
for i in range(len(byte_counts)):
if byte_counts[i] > majority_threshold:
gamma += 2 ** i
else:
assert(byte_counts[i] != majority_threshold)
epsilon += 2 ** i
print("Gamma:", gamma)
print("Epsilon:", epsilon)
print("Answer:", gamma * epsilon)
exit(0)
| gmurr20/advent_of_code_2021 | day3/day3_p1.py | day3_p1.py | py | 1,488 | python | en | code | 0 | github-code | 13 |
32873672373 | # Parts of code inspired from SuperPoint[https://github.com/rpautrat/SuperPoint]
import cv2
import numpy as np
import torch
from numpy.random import uniform
from scipy.stats import truncnorm
from superpoint.data.data_utils.config_update import dict_update
from superpoint.data.data_utils.kp_utils import filter_points, compute_keypoint_map, warp_points
import kornia.geometry.transform as tf
import kornia
import matplotlib.pyplot as plt
class Homographic_aug():
def __init__(self, config, device="cpu") -> dict:
self.config = config["params"]
self.erosion = config["valid_border_margin"]
self.device = device
def sample_homography(self, shape, translation=True, rotation=True, scaling=True, perspective=True, scaling_amplitude=0.1,
n_scales=5, n_angles=25, perspective_amplitude_x=0.1,perspective_amplitude_y=0.1,
patch_ratio=0.5,max_angle=1.57,allow_artifacts=False,translation_overflow=0.):
std_trunc = 2
# Corners of the input patch
margin = (1 - patch_ratio) / 2
pts1 = margin + np.array([[0, 0],
[0, patch_ratio],
[patch_ratio, patch_ratio],
[patch_ratio, 0]])
pts2 = pts1.copy()
# Random perspective and affine perturbations
if perspective:
if not allow_artifacts:
perspective_amplitude_x = min(perspective_amplitude_x, margin)
perspective_amplitude_y = min(perspective_amplitude_y, margin)
else:
perspective_amplitude_x = perspective_amplitude_x
perspective_amplitude_y = perspective_amplitude_y
perspective_displacement = truncnorm(-std_trunc, std_trunc, loc=0., scale=perspective_amplitude_y/2).rvs(1)
h_displacement_left = truncnorm(-std_trunc, std_trunc, loc=0., scale=perspective_amplitude_x/2).rvs(1)
h_displacement_right = truncnorm(-std_trunc, std_trunc, loc=0., scale=perspective_amplitude_x/2).rvs(1)
pts2 += np.array([[h_displacement_left, perspective_displacement],
[h_displacement_left, -perspective_displacement],
[h_displacement_right, perspective_displacement],
[h_displacement_right, -perspective_displacement]]).squeeze()
# Random scaling
# sample several scales, check collision with borders, randomly pick a valid one
if scaling:
scales = truncnorm(-std_trunc, std_trunc, loc=1, scale=scaling_amplitude/2).rvs(n_scales)
scales = np.concatenate((np.array([1]), scales), axis=0)
center = np.mean(pts2, axis=0, keepdims=True)
scaled = (pts2 - center)[np.newaxis, :, :] * scales[:, np.newaxis, np.newaxis] + center
if allow_artifacts:
valid = np.arange(1,n_scales+1) # all scales are valid except scale=1
else:
valid = (scaled >= 0.) * (scaled <= 1.)
valid = valid.prod(axis=1).prod(axis=1)
valid = np.where(valid)[0]
idx = valid[np.random.randint(valid.shape[0], size=1)].squeeze().astype(int)
pts2 = scaled[idx,:,:]
# Random translation
if translation:
t_min, t_max = np.min(pts2, axis=0), np.min(1 - pts2, axis=0)
if allow_artifacts:
t_min += translation_overflow
t_max += translation_overflow
pts2 += np.array([uniform(-t_min[0], t_max[0],1), uniform(-t_min[1], t_max[1], 1)]).T
# Random rotation
# sample several rotations, check collision with borders, randomly pick a valid one
if rotation:
angles = np.linspace(-max_angle, max_angle, num=n_angles)
angles = np.concatenate((np.array([0.]),angles), axis=0) # in case no rotation is valid
center = np.mean(pts2, axis=0, keepdims=True)
rot_mat = np.reshape(np.stack([np.cos(angles), -np.sin(angles), np.sin(angles),
np.cos(angles)], axis=1), [-1, 2, 2])
rotated = np.matmul( (pts2 - center)[np.newaxis,:,:], rot_mat) + center
if allow_artifacts:
valid = np.arange(1,n_angles+1) # all scales are valid except scale=1
else:
valid = (rotated >= 0.) * (rotated <= 1.)
valid = valid.prod(axis=1).prod(axis=1)
valid = np.where(valid)[0]
idx = valid[np.random.randint(valid.shape[0], size=1)].squeeze().astype(int)
pts2 = rotated[idx,:,:]
# Rescale to actual size
shape = np.array(shape[::-1]) # different convention [y, x]
pts1 *= shape[np.newaxis,:]
pts2 *= shape[np.newaxis,:]
homography = cv2.getPerspectiveTransform(np.float32(pts1), np.float32(pts2))
homography = torch.as_tensor(homography,dtype=torch.float32,device=self.device).unsqueeze(dim=0)
homography = torch.inverse(homography)
return homography
def compute_valid_mask(self, shape, homography, erosion=2):
if len(homography.shape)==2:
homography = homography.unsqueeze(0)
batch_size = homography.shape[0]
image = torch.ones(tuple([batch_size,1,*shape]),device=self.device, dtype=torch.float32)
mask = tf.warp_perspective(image, homography, (shape), mode="nearest", align_corners=True)
if erosion>0:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (erosion*2,)*2)
kernel = torch.as_tensor(kernel,device=self.device, dtype=torch.float32)
mask = kornia.morphology.erosion(mask,kernel)
return mask.to(torch.int32)
def __call__(self, image, points):
image_shape = image.shape[2:]
homography = self.sample_homography(shape=image_shape,**self.config) # size= (1,3,3)
warped_image = tf.warp_perspective(image, homography, (image_shape), mode="bilinear", align_corners=True) # size = (H,W)
warped_valid_mask = self.compute_valid_mask(image_shape, homography, erosion=self.erosion) #size = (1,1,H,W)
warped_points = warp_points(points, homography, device=self.device) # size = (N,2)
warped_points = filter_points(warped_points, image_shape, device=self.device) # size = (N,2)
warped_points_heatmap = compute_keypoint_map(warped_points, image_shape, device=self.device) # size = (H,W)
data = {'warp':{'image': warped_image.squeeze(), #(H,W)
'kpts': warped_points, #(N,2)
'kpts_heatmap': warped_points_heatmap, #(H,W)
'valid_mask':warped_valid_mask.squeeze()}, #(H,W)
'homography':homography.squeeze(), #(3,3)
}
return data | AliYoussef97/SuperPoint-NeRF-Pytorch | superpoint/superpoint/data/data_utils/homographic_augmentation.py | homographic_augmentation.py | py | 6,993 | python | en | code | 5 | github-code | 13 |
398709141 | from django.urls import path
from familia import views
urlpatterns = [
path('', views.inicio2, name='inicio2'),
path('verfamilia/', views.verfamilia, name='verfamilia'),
path('vermascota/', views.vermascota, name='vermascota'),
path('cargarpersona/', views.cargar_persona, name='cargar_persona'),
path('cargarmascota/', views.cargar_mascota, name='cargar_mascota'),
path('buscarpersona/', views.buscar_persona, name='buscar_persona'),
path('buscarmascota/', views.buscar_mascota, name='buscar_mascota'),
] | rmc-git/Entrega1Carcer | entregablemvt/familia/urls.py | urls.py | py | 534 | python | es | code | 0 | github-code | 13 |
3647406109 | from twython import TwythonStreamer
from twython import TwythonError
from twython import TwythonRateLimitError
from twython import TwythonAuthError
from time import sleep
from .config import db
from credentials import app_key
from credentials import app_secret
from credentials import auth_token
from credentials import auth_secret
class Stream(TwythonStreamer):
def on_success(self, tweet):
"""Store tweet when received."""
# extend stream
# on_success add tweet to db
db.tweets.insert(tweet)
def on_error(self, status_code, data):
"""Handle streaming error."""
print(status_code)
return True
def on_timeout(self):
"""Handle request timeout."""
print("Timeout...")
return True
class MonitorTweets(object):
def __init__(self):
cr = [app_key, app_secret, auth_token, auth_secret]
self.stream = Stream(*cr)
def start(self):
"""Start the twitter stream."""
# comma separated user ids
users = db.users.find()
users = [user["id_str"] for user in users]
users = ",".join(users)
print("Starting stream...")
args = {"follow": users, "language": "en"}
while True:
try:
# start stream
self.stream.statuses.filter(**args)
except TwythonRateLimitError as e:
print("[Twython Exception] Rate Limit")
sleep(e.retry_after)
continue
except Exception as e:
# catch exceptions and restart
self.stop()
print("[Exception] \n%s" % e)
print("Restarting stream...")
sleep(60) # wait
continue
else:
# end if requested
print("Breaking...")
break
def stop(self):
"""Stop the twitter stream."""
# end stream
self.stream.disconnect()
print("Ending stream...")
def restart(self):
"""Restart the twitter stream."""
self.stop()
self.start()
| taylorrees/penemue | penemue/utils/monitor_tweets.py | monitor_tweets.py | py | 2,157 | python | en | code | 2 | github-code | 13 |
25112971269 | import calendar as pycal
from datetime import datetime
from dateutil.relativedelta import relativedelta
from flaskr.python_helpers import cal_helpers as chs
cal = pycal.Calendar(6)
current_date = datetime.today()
day = current_date.day
month = current_date.month
year = current_date.year
week, index = chs.get_week()
forward = False
def set_current_date() -> (int, int, int):
"""Returns current day, month, year shown on the selected week of the calendar"""
return day, month, year
def get_formatted_week() -> list:
"""Returns the current week of the month as a list of tuples with the format (day_of_month, weekday)"""
# this function does not format the weeks in the calendar corretly at the moment. There is a bug when moving
# between months and/or years.
# counts the number of zeros in the week
zeros = 0
for day, weekday in week:
if day == 0:
zeros += 1
# Replaces zeros with previous or upcoming days
# if zeros exist get the correct days for replacement
if zeros != 0:
# if week is the last week in the month
if forward == False and index == 0:
ending_date = current_date - relativedelta(weeks=1)
ending_month = ending_date.month
ending_year = ending_date.year
prev_month = cal.monthdayscalendar(year, ending_month)
prev_week = prev_month[len(prev_month) - 1]
other_days = prev_week[0:zeros]
else:
upcoming_month = (current_date + relativedelta(weeks=1)).month
next_month = cal.monthdayscalendar(year, upcoming_month)
next_week = next_month[0]
start_index = len(next_week) - zeros
other_days = next_week[start_index:len(next_week)]
# replace 0 with correct day
i = 0
weekdays = []
for day, weekday in week:
if day == 0:
weekdays.append(tuple([other_days[i], weekday]))
i += 1
else:
weekdays.append(tuple([day, weekday]))
return weekdays
def week_move(direction):
"""Updates week if the next arrow is clicked on the weekly view"""
global index, week, month, year, day, current_date, forward
if direction == "next":
index += 1
month_length = len(chs.get_month(year, month))
current_date = current_date + relativedelta(weeks=1)
day = current_date.day
# if next week is the beginning of the next month
if index >= month_length:
index = 0
forward = True
month = current_date.month
year = current_date.year
next_cal_month = chs.get_month(year, month)
week = next_cal_month[index]
else:
# if we are still in the same month but moving to the next month and weeks overlap
if index == month_length - 1 and current_date.month != month:
week = chs.get_month(year, month)[index]
month = current_date.month
year = current_date.year
else:
year = current_date.year
month = current_date.month
week = chs.get_month(year, month)[index]
elif direction == "prev":
forward = False
index -= 1
current_date = current_date - relativedelta(weeks=1)
day = current_date.day
year = current_date.year
# if previous week is the end of the previous month
if index < 0:
month = current_date.month
prev_cal_month = chs.get_month(year, month)
index = len(prev_cal_month) - 1
week = prev_cal_month[index]
else:
if (index == 0 and current_date.month != month):
month = current_date.month
index = len(chs.get_month(year, month)) - 1
week = chs.get_month(year, month)[index]
def reset_date():
"""Returns current day, month, year shown on the selected week of the calendar"""
# reset date if user logs out
global current_date, day, month, year, week, index
current_date, day, month, year = chs.get_todays_date()
week, index = chs.get_week() | npaolini-8/CalendarPlusPlus_public | flaskr/python_helpers/week_functions.py | week_functions.py | py | 4,190 | python | en | code | 0 | github-code | 13 |
72704226897 | # -*- coding: utf8 -*-
import os
from time import *
import subprocess
import re
import json
from collections import Counter
import mysql.connector
BITCOIND_PATH = '/home/abrochec/blockchain/bitcoin-0.16.1'
cnx =mysql.connector.connect(user='root',password='Alexis2018!',host='localhost',database='miners') #10
cursor = cnx.cursor()
#part of the code that take last block timestamp
def get_lastblocktimestamp():
listoftime=[]
lenght = subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'getblockcount'], stdout=subprocess.PIPE).stdout.decode('utf-8')
blockhash = subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'getblockhash', lenght], stdout=subprocess.PIPE).stdout.decode('utf-8')
block = subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'getblock', blockhash], stdout=subprocess.PIPE).stdout.decode('utf-8')
listoftime = [] #30
jsonblock = json.loads(block)
listoftime=jsonblock["time"]
return listoftime
def get_blocktimestamp(lenght):
blockhash = subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'getblockhash', lenght], stdout=subprocess.PIPE).stdout.decode('utf-8')
block = subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'getblock', blockhash], stdout=subprocess.PIPE).stdout.decode('utf-8')
listoftime = [] #30
jsonblock = json.loads(block)
listoftime=jsonblock["time"]
return listoftime
def get_listoftransactionidfromblock(block):
listoftransactionid = [] #list storing the transaction ids
jsonblock = json.loads(block) #20
#Converting to json and getting the list of transaction
if len(jsonblock["tx"])>1:
for i in range(1, len(jsonblock["tx"])):
listoftransactionid.append(jsonblock["tx"][i])
else:
listoftransactionid=[0]
return listoftransactionid
def get_blockidfromblocktimestamp(time):
yearprior=int(time)-1
str_yearprior=str(yearprior)
query=("SELECT blockid FROM minersinfo WHERE date>"+str_yearprior+" ORDER BY blockid ASC LIMIT 1")
cursor.execute(query)
yearpriorblockid=cursor.fetchall()
return yearpriorblockid[0]
def get_transaction(height):
transactionids=[]
blockhash = subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'getblockhash', str(height)], stdout=subprocess.PIPE).stdout.decode('utf-8')
block = subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'getblock', blockhash], stdout=subprocess.PIPE).stdout.decode('utf-8')
transactionids = get_listoftransactionidfromblock(block)
return transactionids
def get_decodedtransaction(transactionid):
rawtransaction= subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'getrawtransaction', transactionid], stdout=subprocess.PIPE).stdout.decode('utf-8')
decodedtransaction= subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'decoderawtransaction', rawtransaction.strip()], stdout=subprocess.PIPE).stdout.decode('utf-8')
return decodedtransaction
query=("INSERT INTO txid VALUES (%s ,%s)")
queryVin=("INSERT INTO input VALUES (%s ,%s)")
queryVout=("INSERT INTO output VALUES (%s ,%s)")
def get_Vinaddresses(transactionid, timestamp):
decodedtransaction=[]
rawtransaction=[]
rawtransaction= subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'getrawtransaction',transactionid , ' 1'], stdout=subprocess.PIPE).stdout.decode('utf-8')
# print(rawtransaction)
jsonblock = json.loads(rawtransaction)
# del(jsonblock["vout"])
del(jsonblock["hex"])
#print(jsonblock)
# decodedtransaction= subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'decoderawtransaction', rawtransaction.strip()], stdout=subprocess.PIPE).stdout.decode('utf-8')
# print(decodedtransaction)
listofvinaddresses = [] #list of addresses of the input of the current transaction
listofvinposition = [] #list of the position of the vin in the previous transaction
listofvintransaction =[] #list of the previous transaction
listofvouttransaction = []
#Converting the decoding transaction to json
# jsontransaction = json.loads(decodedtransaction)
#appel de getVin
if 'addresses' in jsonblock['vout'][0]['scriptPubKey']:
for j in range(0, len(jsonblock["vout"])):
listofvouttransaction.append(jsonblock["vout"][j]["scriptPubKey"]["addresses"])
if 'txid' in jsonblock['vin'][0]:
for j in range(0, len(jsonblock["vin"])):
listofvintransaction.append(jsonblock["vin"][j]["txid"])
if 'vout' in jsonblock['vin'][0]:
for j in range(0, len(jsonblock["vin"])):
listofvinposition.append(jsonblock["vin"][j]["vout"])
listofinputtransaction=','.join(listofvintransaction)
if len(listofinputtransaction)<50000:
data_transaction=(transactionid, timestamp)
cursor.execute(query, data_transaction) #la c'est la partie de la main transaction
cnx.commit()
for i in listofvintransaction:
data_input=(transactionid, i)
cursor.execute(queryVin, data_input)
for i in listofvouttransaction[0]:
data_output=(transactionid, i)
cursor.execute(queryVout, data_output)
cnx.commit()
else :
print("list of input transaction too big")
else:
data_transaction=(transactionid,timestamp)
cursor.execute(query, data_transaction)
# main part:
lenght = subprocess.run([BITCOIND_PATH + '/bin/bitcoin-cli', 'getblockcount'], stdout=subprocess.PIPE).stdout.decode('utf-8')
lastblocktimestamp=get_lastblocktimestamp()
print(lastblocktimestamp)
print(lenght)
DatabaseHeight=("SELECT timestamp FROM txid ORDER BY timestamp DESC LIMIT 1" )
cursor.execute(DatabaseHeight)
blocktimestamp=cursor.fetchall()
if not blocktimestamp:
blockheight=70000
else:
height=get_blockidfromblocktimestamp(blocktimestamp[0][0])
calcul=int(height[0])+1
blockheight=calcul
while blockheight!=lenght:
listoftransaction=[]
listoftransaction=get_transaction(str(blockheight))
listoftransactions=listoftransaction[0]
if listoftransaction[0]!=0:
blocktimestamp=get_blocktimestamp(str(blockheight))
for i in listoftransaction:
indice=str(i)
get_Vinaddresses(i, blocktimestamp)
blockheight+=1
cursor.close()
cnx.close()
| alexisbrochec/blockchain | newgraph.py | newgraph.py | py | 6,954 | python | en | code | 0 | github-code | 13 |
35827838529 | #!/usr/bin/env python3
def string_to_list(line: str) -> list:
arr = []
for letter in line:
if letter == '\n':
continue
arr.append(int(letter))
return arr
def sum_matching_digits(array: list) -> int:
sum = 0
previous = array[0]
for idx in range(1, len(array)):
if array[idx] == previous:
sum += array[idx]
previous = array[idx]
if array[0] == array[-1]:
sum += array[0]
print(sum)
return sum
def halfway_calculation(line: list) -> int:
result = 0
# print(line)
steps_ahead = int(len(line) / 2)
for idx in range(0, len(line)):
# print(f"{line[idx] = }")
# print(f"{line[(idx + steps_ahead) % len(line)] = }")
if line[idx] == line[(idx + steps_ahead) % len(line)]:
result += line[idx]
return result
def main():
with open("input.txt") as file:
lines = file.readlines()
for line in lines:
print(line, end='')
print("\n")
# sum of all digits that match the next digit in the list
# The list is circular, so the digit after the last digit is the first digit in the list.
# PART 1:
# for line in lines:
# line = string_to_list(line) # converts string to list of integers
# result = sum_matching_digits(line)
# print(f"part 1: {result}")
# PART 2:
for line in lines:
line = string_to_list(line)
result = halfway_calculation(line)
print(result)
if __name__=="__main__":
main() | KyleSpicer/advent_of_code | 2017/day1/day1.py | day1.py | py | 1,570 | python | en | code | 0 | github-code | 13 |
16239393031 | import jetson.inference # NVIDIA module for object detection
import jetson.utils # NVIDIA module for camera capture
import sys # to call functions
net= jetson.inference.detectNet("SSD-Mobilenet-v2", threshold=0.5) # load the object detection model
camera = jetson.utils.gstCamera(1280, 720, "/dev/video0") # for camera capture
display = jetson.utils.glDisplay() # create display with glDisplay object
count=0
while display.IsOpen():
img, width, height = camera.CaptureRGBA() # camera capture returning with width
detections = net.Detect(img, width, height) # perform detections, this takes img, width and height from camera.capture()
if detections:
print("detected {:d} objects :".format(len(detections))) # print the detections
count= count+1
for detection in detections:
print(detection) # print Object Detection Result like classID
display.RenderOnce(img, width, height) # render the image
display.SetTitle("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS())) # window title anf Fps
print("Total number of objects detected:"),
print(count)
| priyankasaini24/Object_detection_on_road_with_driver_monitoring | objects_detection.py | objects_detection.py | py | 1,349 | python | en | code | 0 | github-code | 13 |
10269887416 | import tensorflow as tf
def repeat_end(val, n, k):
return [val for i in range(n)] + [k]
def reduce_with(vec, sizes, fn, final_shape):
n_groups = tf.shape(sizes)[0]
start_array = tf.TensorArray(dtype=tf.float32, size=n_groups, infer_shape=False).split(value=vec, lengths=sizes)
end_array = tf.TensorArray(dtype=tf.float32, size=n_groups, infer_shape=True)
result = tf.while_loop((lambda i, sa, ea: i < n_groups),
(lambda i, sa, ea: (i+1, sa, ea.write(i, fn(sa.read(i))))),
[0, start_array, end_array])[2].stack()
return tf.reshape(result, final_shape)
def decode_final_reducer(reducer):
if reducer == "min":
return (lambda x: tf.reduce_min(x, axis=[1, 2]))
elif reducer == "mean":
return (lambda x: tf.reduce_mean(x, axis=[1, 2]))
elif reducer == "sum":
return (lambda x: tf.reduce_sum(x, axis=[1, 2]))
elif reducer == "max":
return (lambda x: tf.reduce_max(x, axis=[1, 2]))
else:
raise Exception("Expecting min, mean, or max")
def decode_msg_reducer(reducer):
if reducer == "min":
return (lambda x: tf.reduce_min(tf.concat([x, tf.zeros([1, tf.shape(x)[1]])], axis=0), axis=0))
elif reducer == "mean":
return (lambda x: tf.reduce_mean(tf.concat([x, tf.zeros([1, tf.shape(x)[1]])], axis=0), axis=0))
elif reducer == "sum":
return (lambda x: tf.reduce_sum(x, axis=0))
elif reducer == "max":
return (lambda x: tf.reduce_max(tf.concat([x, tf.zeros([1, tf.shape(x)[1]])], axis=0), axis=0))
else:
raise Exception("Expecting min, mean, or max")
def decode_transfer_fn(transfer_fn):
if transfer_fn == "relu": return tf.nn.relu
elif transfer_fn == "tanh": return tf.nn.tanh
elif transfer_fn == "sig": return tf.nn.sigmoid
else:
raise Exception("Unsupported transfer function %s" % transfer_fn)
| dselsam/neurosat | python/util.py | util.py | py | 1,911 | python | en | code | 253 | github-code | 13 |
71314195857 | import numpy as np
from collections import Counter, defaultdict
from minimize import minimize
import scipy as sp
import copy
import hashlib
class memoize(object):
def __init__(self, func):
self.func = func
self.lu = {}
def __call__(self, *args):
try:
ha = hash(args)
return self.lu[args]
# numpy array in there
except TypeError:
new_args = []
for a in args:
try:
hash(a)
new_args.append(a)
except TypeError:
b = a.view(np.uint8)
b_as_str = hashlib.sha1(b).hexdigest()
new_args.append(b_as_str)
ha = hash(tuple(new_args))
if ha in self.lu:
return self.lu[ha]
else:
r = self.func(*args)
self.lu[ha] = r
return r
def log_sum_exp(x, axis=-1):
"""Compute log(sum(exp(x))) in a numerically stable way.
Use second argument to specify along which dimensions the logsumexp
shall be computed. If -1 (which is also the default), logsumexp is
computed along the last dimension.
From R. Memisevic
"""
if len(x.shape) < 2: #only one possible dimension to sum over?
x_max = x.max()
return x_max + np.log(np.sum(np.exp(x - x_max)))
else:
if axis != -1:
x = x.transpose(range(axis) + range(axis + 1, len(x.shape)) + [axis])
last = len(x.shape) - 1
x_max = x.max(last)
return x_max + np.log(np.sum(np.exp(x - x_max[..., None]), last))
def softmax(x):
if x.ndim == 1:
x = x.reshape((1, -1))
max_x = np.max(x, axis=1).reshape((-1, 1))
exp_x = np.exp(x - max_x)
return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))
class SparseMaxEnt(object):
""" Also called a log-linear model, or logistic regression.
Implementation using sparsity for discrete features"""
def __init__(self, feature_function, n_features, n_classes,
random_state=None, shuffle=True, optimizer="lbfgs",
verbose=True):
# feature function returns list of indices
# features are only indicator
# assume sparse setup
self.n_features = n_features
self.n_classes = n_classes
self.random_state = random_state
self.shuffle = shuffle
self.optimizer = optimizer
if random_state == None:
raise ValueError("Random state must not be None!")
self.params = 0.02 * random_state.randn(self.n_classes * self.n_features + self.n_classes)
#self.params = np.zeros((self.n_classes * self.n_features + self.n_classes,))
self.weights = self.params[:self.n_classes * self.n_features].reshape(self.n_features, self.n_classes)
self.biases = self.params[-self.n_classes:]
# memoize it
self.feature_function = feature_function
self.mem_feature_function = memoize(feature_function)
self.verbose = verbose
def fit(self, data, labels, l1_weight_cost=0., l2_weight_cost=0.):
if self.optimizer == "lbfgs":
from scipy.optimize import minimize
res = minimize(self.f_and_g, self.params.copy(),
(data, labels, l1_weight_cost, l2_weight_cost), method="L-BFGS-B", jac=True,
options={"ftol": 1E-4})
p = res.x
elif self.optimizer == "minimize_cg":
max_n_line_search = np.inf
p, g, n_line_searches = minimize(self.params.copy(),
(data, labels, l1_weight_cost, l2_weight_cost),
self.f_and_g,
True,
maxnumlinesearch=max_n_line_search,
verbose=self.verbose)
else:
raise ValueError("Unknown optimizer setting {}".format(self.optimizer))
if self.verbose:
print("Training complete!")
self.update_params(p)
def _oh(self, x, max_classes=None):
if max_classes == None:
n_classes = self.n_classes
else:
n_classes = max_classes
#list of list == lol
# need to normalize...
try:
max_len = max([len(xi) for xi in x])
empty = np.zeros((len(x), max_len)) - 1
for n, xi in enumerate(x):
empty[n, :len(xi)] = xi
except TypeError:
max_len = 1
empty = np.zeros((len(x), max_len)) - 1
for n, xi in enumerate(x):
empty[n] = xi
result = np.zeros([len(x)] + [n_classes], dtype="int")
z = np.zeros(len(x)).astype("int64")
for c in range(n_classes):
z *= 0
z[np.where(empty == c)[0]] = 1
result[..., c] += z
return result
def _uh(self, oh_x):
return oh_x.argmax(len(oh_x.shape)-1)
def loglikelihoods(self, data, pseudolabels):
# trim means return regardless of matching original data length
active_idxs = self.feature_function(data)
inds = [n for n in range(len(active_idxs)) if hasattr(active_idxs[n], "flatten") or active_idxs[n] != None]
not_inds = [n for n in range(len(active_idxs)) if not hasattr(active_idxs[n], "flatten") and active_idxs[n] == None]
active_idxs = [active_idxs[ii] for ii in inds]
label_scores = np.zeros((len(active_idxs), self.n_classes))
for n in range(len(active_idxs)):
active_idx = active_idxs[n]
active_weights = self.weights[active_idx, :]
active_biases = self.biases
sscores = active_weights.sum(axis=0) + active_biases
label_scores[n] = sscores
sprobs = softmax(label_scores)
final_probs = []
si = 0
for ii in range(len(data)):
if ii in inds:
new = sprobs[si]
final_probs.append(new)
si += 1
elif ii in not_inds:
new = 0. * sprobs[0] - 1.
final_probs.append(new)
else:
raise ValueError("This shouldnt happen")
sprobs = np.array(final_probs)
sub_idx = [l for l in list(range(len(data))) if l not in not_inds]
lls = np.zeros_like(sprobs[:, 0]) - 1E8
lls[sub_idx] = np.log(sprobs[list(range(len(data))), pseudolabels][sub_idx])
return lls
def predict_proba(self, data):
# trim means return regardless of matching original data length
active_idxs = self.feature_function(data)
inds = [n for n in range(len(active_idxs)) if hasattr(active_idxs[n], "flatten") or active_idxs[n] != None]
not_inds = [n for n in range(len(active_idxs)) if not hasattr(active_idxs[n], "flatten") and active_idxs[n] == None]
active_idxs = [active_idxs[ii] for ii in inds]
label_scores = np.zeros((len(active_idxs), self.n_classes))
for n in range(len(active_idxs)):
active_idx = active_idxs[n]
active_weights = self.weights[active_idx, :]
active_biases = self.biases
sscores = active_weights.sum(axis=0) + active_biases
label_scores[n] = sscores
sprobs = softmax(label_scores)
final_probs = []
si = 0
for ii in range(len(data)):
if ii in inds:
new = sprobs[si]
final_probs.append(new)
si += 1
elif ii in not_inds:
new = 0. * sprobs[0] - 1.
final_probs.append(new)
else:
raise ValueError("This shouldnt happen")
return np.array(final_probs)
def _cost_and_grads(self, data, labels, l1_weight_cost, l2_weight_cost):
assert len(data) == len(labels)
# switch to block transform...
# preparation for block transform
active_idxs = self.mem_feature_function(data)
if len(active_idxs) != len(labels):
raise ValueError("feature_function should return same number of datapoints! Return None for entries to ignore in training")
# short circuit OR to avoid issues with array compare
inds = [n for n in range(len(active_idxs)) if hasattr(active_idxs[n], "flatten") or active_idxs[n] != None]
if self.shuffle:
self.random_state.shuffle(inds)
active_idxs = [active_idxs[ii] for ii in inds]
labels = [labels[ii] for ii in inds]
label_scores = np.zeros((len(labels), self.n_classes))
for n in range(len(active_idxs)):
active_idx = active_idxs[n]
active_weights = self.weights[active_idx, :]
active_biases = self.biases
sscores = active_weights.sum(axis=0) + active_biases
label_scores[n] = sscores
sprobs = softmax(label_scores)
# https://stats.stackexchange.com/questions/45643/why-l1-norm-for-sparse-models
nll = -np.sum(np.log(sprobs)[list(range(len(labels))), labels])
nll = nll / float(len(labels)) + l1_weight_cost * np.sum(np.abs(self.weights)).sum() + l2_weight_cost * np.sum(self.weights ** 2).sum()
if self.verbose:
print("nll {}".format(nll))
# see non-sparse derivation http://cs231n.github.io/neural-networks-case-study/#loss
dsprobs = sprobs
dsprobs[list(range(len(labels))), labels] -= 1
dsprobs /= float(len(labels))
sgrad_w = np.zeros((self.n_features, self.n_classes))
sgrad_b = np.zeros((self.n_classes,))
# use cached active_idxs
#for n, (x, y) in enumerate(zip(data, labels)):
# active_idx = sorted(list(set(self.feature_function(x))))
# if len(active_idx) == 0:
# continue
for n in range(len(active_idxs)):
active_idx = active_idxs[n]
sgrad_w[active_idx] += dsprobs[n]
sgrad_b += dsprobs[n]
sgrad_w += l1_weight_cost * np.sign(self.weights)
sgrad_w += l2_weight_cost * self.weights
grads = np.hstack((sgrad_w.flatten(), sgrad_b))
if self.verbose:
print("grads_norm {}".format(np.sqrt((grads ** 2).sum())))
return nll, grads
def f_and_g(self, x, features, labels, l1_weight_cost, l2_weight_cost):
xold = self.params.copy()
self.update_params(x.copy())
result = self._cost_and_grads(features, labels, l1_weight_cost, l2_weight_cost)
self.update_params(xold.copy())
return result
def update_params(self, new_params):
""" Update model parameters."""
self.params[:] = new_params.copy()
| kastnerkyle/pachet_experiments | maxent.py | maxent.py | py | 10,813 | python | en | code | 17 | github-code | 13 |
24147360884 | guests = ['Петя', 'Ваня', 'Саша', 'Лиза', 'Катя']
all_guests = 5
answer = ""
while answer != "пора спать":
print(f"Сейчас на вечеринке {all_guests} человек. {guests}")
answer = input("Гость пришел или ушел: ")
if answer == "пришел":
name = input("Имя гостя: ")
if all_guests >= 6:
print(f"Прости, {name}, но мест больше нет")
else:
guests.append(name)
print(f"Привет, {name}")
all_guests += 1
elif answer == "ушел":
all_guests -= 1
name = input("Имя гостя: ")
guests.remove(name)
print(f"Пока, {name}")
if answer == "пора спать":
print("Вечеринка закончилась, все легли спать")
# зачёт!
| ilnrzakirov/Python_basic | Module16/04_party/main.py | main.py | py | 896 | python | ru | code | 0 | github-code | 13 |
13539090247 | from odoo import models, fields
class StudentRecord(models.Model):
_name = "student.student"
name = fields.Char(string='Name', required=True)
middle_name = fields.Char(string='Middle Name', required=True)
last_name = fields.Char(string='Last Name', required=True)
photo = fields.Binary(string='Photo')
student_age = fields.Integer(string='Age')
student_dob = fields.Date(string="Date of Birth")
student_gender = fields.Selection([('m', 'Male'), ('f', 'Female'), ('o', 'Other')], string='Gender')
class MetrcPackage(models.Model):
_name = "metrc.packages"
label = fields.Char(string='Label')
name = fields.Char(string='Name')
metrc_id = fields.Integer(string='Metrc id', required=True)
metrc_package_type = fields.Char(string='Metrc package type')
metrc_room_id = fields.Integer(string='Age')
metrc_packaged_date = fields.Date(string='Metrc packaged date')
metrc_lab_testing_state = fields.Char(string='Metrc lab testing date')
metrc_lab_testing_state_date = fields.Date(string='Metrc packaged date')
metrc_is_sample = fields.Integer(string='Metrc is sample')
metrc_received_facility_license = fields.Char(string='Metrc recieved facility license')
metrc_on_hold = fields.Integer(string='On hold')
metrc_archieve_date = fields.Date(string='Metrc archived date')
metrc_finished_date = fields.Date(string='Metrc finished date')
metrc_item_id = fields.One2many('metrc.items', 'id')
metrc_product_type_id = fields.Integer(string='Metrc product type id')
metrc_unit_of_measure_name = fields.Char(string='Metrc unit of measure')
user_id = fields.Integer(string='User id')
class MetrcRoom(models.Model):
_name = "metrc.rooms"
name = fields.Char(string='Metrc room name')
metrc_id = fields.Integer(string='Metrc id')
user_id = fields.Integer(string='User id')
class MetrcCategories(models.Model):
_name = "metrc.categories"
name = fields.Char(string='Metrc category name')
product_category_type = fields.Char(string='Category type')
quantity_type = fields.Char(string='Quantity type')
requires_strain = fields.Integer(string='Requires Strain', default = 0)
requires_item_brand = fields.Integer(string='Requires Item Brand', default = 0)
requires_administration_method = fields.Integer(string='Requires Administration Method', default = 0)
requires_cbd_percent = fields.Integer(string='Requires CBD percent', default = 0)
requires_cbd_content = fields.Integer(string='Requires CBD Content', default = 0)
requires_thc_percent = fields.Integer(string='Requires THC Percent', default = 0)
requires_thc_content = fields.Integer(string='Requires THC Content', default = 0)
requires_unit_volume = fields.Integer(string='Requires unit volumne', default = 0)
requires_unit_weight = fields.Integer(string='Requires unit weight', default = 0)
requires_serving_size = fields.Integer(string='Requires serving size', default = 0)
require_supply_duration_dates = fields.Integer(string='Requires supply duration dates', default = 0)
requires_ingredients = fields.Integer(string='Requires ingredients', default = 0)
requires_product_photo = fields.Integer(string='Requires product photo', default = 0)
can_contain_seeds = fields.Integer(string='Can contain seeds', default = 0)
can_be_remediated = fields.Integer(string='Can be remediated', default = 0)
user_id = fields.Integer(string='User id', default = 0)
display_name = fields.Char(string='Quantity type')
class MetrcStrains(models.Model):
_name = "metrc.strains"
name = fields.Char(string='Name')
thc_level = fields.Char(string='Metrc lab testing date')
cbd_level = fields.Char(string='Metrc lab testing date')
indica = fields.Char(string='Metrc lab testing date')
sativa = fields.Char(string='Metrc lab testing date')
genetics = fields.Char(string='Metrc lab testing date')
metrc_id = fields.Integer(string='Metrc id')
test_status = fields.Char(string='Test Status')
class MetrcItems(models.Model):
_name = "metrc.items"
name = fields.Char(string='Metrc item name')
product_category_name = fields.Char(string='Metrc product category')
product_category_type = fields.Char(string='Metrc product category type')
quantity_type = fields.Char(string='Quantity type')
default_lab_testing_state = fields.Char(string='Default lab testing state')
unit_of_measure_name = fields.Char(string='Unit measure name')
approval_state = fields.Char(string='Approval state')
strain_id = fields.Integer(string='Strain id')
strain_name = fields.Char(string='Strain name')
administration_method = fields.Char(string='Adminitration method')
cbd_percent = fields.Char(string='CBD percent')
cbd_content = fields.Char(string='CBD content')
cbd_unit_measure = fields.Char(string='CBD unit measure')
thc_percent = fields.Char(string='THC percent')
thc_content = fields.Char(string='THC content')
thc_unit_measure = fields.Char(string='THC unit measure')
unit_volume = fields.Char(string='THC unit volume')
volume_unit_measure = fields.Char(string='Volume unit measure')
unit_weight = fields.Char(string='Unit weight')
weight_unit_measure = fields.Char(string='Weight unit measure')
serving_size = fields.Char(string='Serving size')
supply_duration_days = fields.Char(string='Supply duration days')
unit_quantity = fields.Char(string='Unit quantity')
quantity_unit_measure = fields.Char(string='Quantity unit measure')
ingredients = fields.Char(string='Ingredients')
user_id = fields.Integer(string='User id')
metrc_id = fields.Integer(string='Metrc id')
metrc_category_id = fields.Integer(string='Metrc category id')
class MetrcUnits(models.Model):
_name = "metrc.units"
name = fields.Char(string='Metrc units name')
abbreviation = fields.Char(string='Metrc abbreviation')
quantity_type = fields.Char(string='Metrc quantity type')
class MetrcTransfers(models.Model):
_name = "metrc.transfers"
metrc_id = fields.Integer(string='Metrc id')
manifest_number = fields.Char(string='Manifest number')
shipment_license_type = fields.Char(string='Shipment license type')
shipper_facility_license_number = fields.Char(string='Facility license number')
shipper_facility_name = fields.Char(string='Shipper facility name')
name = fields.Char(string='Name')
transporter_facility_license_number = fields.Char(string='Transporter facility license number')
transporter_facility_name = fields.Char(string='Transporter facility name')
driver_name = fields.Char(string='Driver name')
driver_occupational_license_number = fields.Char(string='Driver occupational license number')
driver_vehicle_license_number = fields.Char(string='Driver vehicle license number')
vehicle_make = fields.Char(string='Vehicle make')
vehicle_model = fields.Char(string='Vehicle model')
vehicle_license_plate_number = fields.Char(string='Vehicle license plate number')
delivery_count = fields.Integer(string='Delivery count')
received_delivery_count = fields.Integer(string='Received delivery count')
package_count = fields.Integer(string='Package count')
received_package_count = fields.Integer(string='Received package count')
contains_plant_package = fields.Integer(string='Contains plant package')
contains_product_package = fields.Integer(string='Contains product package')
contains_testing_sample = fields.Integer(string='Contains testing sample')
contains_product_requires_remediation = fields.Integer(string='Contains product requires remediation')
contains_remediated_product_package = fields.Integer(string='Contains remediated product package')
created_date_time = fields.Date(string='Created date time')
created_by_user_name = fields.Char(string='Created by user name')
last_modified = fields.Date(string='Last modified ')
delivery_id = fields.Integer(string='Delivery id')
recipient_facility_license_number = fields.Char(string='Recipient facility license number')
recipient_facility_name = fields.Char(string='Recipient facility name')
shipment_type_name = fields.Char(string='Shipment type name')
shipment_transaction_type = fields.Char(string='Shipment transaction type')
estimated_departure_date_time = fields.Date(string='Estimated departure date time')
actual_departure_date_time = fields.Date(string='Actual departure date time')
estimated_arrival_date_time = fields.Date(string='Estimated arrival date time = ')
actual_arrival_date_time = fields.Date(string='Actual arrival date time')
delivery_package_count = fields.Integer(string='Delivery package count')
delivery_received_package_count = fields.Integer(string='Delivery received package count')
received_date_time = fields.Date(string='Received date time')
user_id = fields.Integer(string='User id') | Raju-dev/smart-hemp | raju/models.py | models.py | py | 8,998 | python | en | code | 0 | github-code | 13 |
38024554208 | from AthenaCommon import CfgMgr
from AthenaCommon.Constants import * # FATAL,ERROR etc.
from AthenaCommon.SystemOfUnits import *
def getParticleBrokerSvcNoOrdering(name="ISF_ParticleBrokerSvcNoOrdering", **kwargs):
kwargs.setdefault('EntryLayerTool', 'ISF_EntryLayerTool')
kwargs.setdefault('GeoIDSvc', 'ISF_GeoIDSvc')
kwargs.setdefault('AlwaysUseGeoIDSvc', False)
from ISF_Config.ISF_jobProperties import ISF_Flags
kwargs.setdefault('ValidateGeoIDs', ISF_Flags.ValidationMode())
kwargs.setdefault('ValidationOutput', ISF_Flags.ValidationMode())
kwargs.setdefault('ValidationStreamName', "ParticleBroker")
from G4AtlasApps.SimFlags import simFlags
kwargs.setdefault('BarcodeService', simFlags.TruthStrategy.BarcodeServiceName())
return CfgMgr.ISF__ParticleBrokerDynamicOnReadIn(name, **kwargs)
def getParticleBrokerSvc(name="ISF_ParticleBrokerSvc", **kwargs):
#kwargs.setdefault('ParticleOrderingTool', 'ISF_InToOutSubDetOrderingTool')
kwargs.setdefault('ParticleOrderingTool', 'ISF_ParticleOrderingTool')
return getParticleBrokerSvcNoOrdering(name, **kwargs)
def getAFIIParticleBrokerSvc(name="ISF_AFIIParticleBrokerSvc", **kwargs):
kwargs.setdefault('EntryLayerTool', 'ISF_AFIIEntryLayerTool')
return getParticleBrokerSvc(name, **kwargs)
def getSimHitService(name="ISF_SimHitService", **kwargs):
from ISF_Config.ISF_jobProperties import ISF_Flags
kwargs.setdefault('ValidationOutput', ISF_Flags.ValidationMode())
return CfgMgr.ISF__SimHitSvc(name, **kwargs)
def getNoG4SimHitService(name="ISF_NoG4SimHitService", **kwargs):
kwargs.setdefault("SensitiveDetectorMasterTool", "EmptySensitiveDetectorMasterTool")
kwargs.setdefault("FastSimulationMasterTool", "EmptyFastSimulationMasterTool")
return getSimHitService(name, **kwargs)
def getPileupSimHitService(name="ISF_PileupSimHitService", **kwargs):
kwargs.setdefault('SeparateInDetPileupHits', True)
return getNoG4SimHitService(name, **kwargs)
def getISFEnvelopeDefSvc(name="ISF_ISFEnvelopeDefSvc", **kwargs):
# ATLAS common envlope definitions
kwargs.setdefault("ATLASEnvelopeDefSvc", "AtlasGeometry_EnvelopeDefSvc")
return CfgMgr.ISF__ISFEnvelopeDefSvc(name, **kwargs)
def getAFIIEnvelopeDefSvc(name="ISF_AFIIEnvelopeDefSvc", **kwargs):
from AthenaCommon.SystemOfUnits import mm
# ATLAS common envlope definitions
kwargs.setdefault("ISFEnvelopeDefSvc", "ISF_ISFEnvelopeDefSvc")
kwargs.setdefault("InDetMaxExtentZ", 3549.5*mm)
return CfgMgr.ISF__AFIIEnvelopeDefSvc(name, **kwargs)
def getGeoIDSvc(name="ISF_GeoIDSvc", **kwargs):
# with ISF volume definitions
kwargs.setdefault("EnvelopeDefSvc", "ISF_ISFEnvelopeDefSvc")
return CfgMgr.ISF__GeoIDSvc(name, **kwargs)
def getAFIIGeoIDSvc(name="ISF_AFIIGeoIDSvc", **kwargs):
kwargs.setdefault("EnvelopeDefSvc", "ISF_AFIIEnvelopeDefSvc")
return getGeoIDSvc(name, **kwargs)
def getParticleKillerSvc(name="ISF_ParticleKillerSvc", **kwargs):
kwargs.setdefault('Identifier', "ParticleKiller")
return CfgMgr.ISF__ParticleKillerSimSvc(name, **kwargs)
def getGenParticleFilters():
genParticleFilterList = []
genParticleFilterList = ['ISF_ParticleFinalStateFilter'] # not used for Quasi-stable particle simulation
from G4AtlasApps.SimFlags import simFlags
if "ATLAS" in simFlags.SimLayout():
from AthenaCommon.BeamFlags import jobproperties
if jobproperties.Beam.beamType() != "cosmics":
genParticleFilterList += ['ISF_ParticlePositionFilterDynamic']
if (not simFlags.CavernBG.statusOn) or simFlags.CavernBG.get_Value() == 'Signal':
genParticleFilterList += ['ISF_EtaPhiFilter']
genParticleFilterList += ['ISF_GenParticleInteractingFilter']
return genParticleFilterList
def getInputConverter(name="ISF_InputConverter", **kwargs):
from G4AtlasApps.SimFlags import simFlags
kwargs.setdefault('BarcodeSvc', simFlags.TruthStrategy.BarcodeServiceName())
kwargs.setdefault("UseGeneratedParticleMass", False)
kwargs.setdefault("GenParticleFilters", getGenParticleFilters())
return CfgMgr.ISF__InputConverter(name, **kwargs)
def getLongLivedInputConverter(name="ISF_LongLivedInputConverter", **kwargs):
kwargs.setdefault("GenParticleFilters" , [ 'ISF_ParticleSimWhiteList',
'ISF_ParticlePositionFilterDynamic',
'ISF_EtaPhiFilter',
'ISF_GenParticleInteractingFilter', ] )
kwargs.setdefault('QuasiStableParticlesIncluded', True)
return getInputConverter(name, **kwargs)
#
# Generic Truth Service Configurations
#
def getGenericTruthService(name="ISF_TruthService", **kwargs):
from G4AtlasApps.SimFlags import simFlags
kwargs.setdefault('BarcodeSvc', simFlags.TruthStrategy.BarcodeServiceName())
kwargs.setdefault('SkipIfNoChildren', True)
kwargs.setdefault('SkipIfNoParentBarcode', True)
kwargs.setdefault('ForceEndVtxInRegions', [])
long_lived_simulators = ['LongLived', 'longLived']
from ISF_Config.ISF_jobProperties import ISF_Flags
is_long_lived_simulation = any(x in ISF_Flags.Simulator() for x in long_lived_simulators) #FIXME this should be set in a nicer way.
if is_long_lived_simulation:
kwargs.setdefault('QuasiStableParticlesIncluded', True)
return CfgMgr.ISF__TruthSvc(name, **kwargs)
def getValidationTruthService(name="ISF_ValidationTruthService", **kwargs):
kwargs.setdefault('BeamPipeTruthStrategies', [])
kwargs.setdefault('IDTruthStrategies', ['ISF_ValidationTruthStrategy'] )
kwargs.setdefault('CaloTruthStrategies', ['ISF_ValidationTruthStrategy'] )
kwargs.setdefault('MSTruthStrategies', [])
kwargs.setdefault('IgnoreUndefinedBarcodes', True)
kwargs.setdefault('PassWholeVertices', True)
return getGenericTruthService(name, **kwargs)
#
# MC12 Truth Service Configurations
#
def getMC12BeamPipeTruthStrategies():
return ['ISF_MCTruthStrategyGroupID']
def getMC12IDTruthStrategies():
return ['ISF_MCTruthStrategyGroupID', 'ISF_MCTruthStrategyGroupIDHadInt']
def getMC12CaloTruthStrategies():
return ['ISF_MCTruthStrategyGroupCaloMuBrem']
def getMC12MSTruthStrategies():
return []
def getMC12TruthService(name="ISF_MC12TruthService", **kwargs):
beam_pipe_strategies = getMC12BeamPipeTruthStrategies()
id_strategies = getMC12IDTruthStrategies()
calo_strategies = getMC12CaloTruthStrategies()
ms_strategies = getMC12MSTruthStrategies()
kwargs.setdefault('BeamPipeTruthStrategies', beam_pipe_strategies) # this is used for beam pipe but not BeamPipeCentral which uses same as ID
kwargs.setdefault('IDTruthStrategies', id_strategies)
kwargs.setdefault('CaloTruthStrategies', calo_strategies)
kwargs.setdefault('MSTruthStrategies', ms_strategies)
kwargs.setdefault('IgnoreUndefinedBarcodes', False)
kwargs.setdefault('PassWholeVertices', True)
return getGenericTruthService(name, **kwargs)
def getTruthService(name="ISF_TruthService", **kwargs):
from ISF_Config.ISF_jobProperties import ISF_Flags
if ISF_Flags.ValidationMode() :
return getValidationTruthService(name, **kwargs)
else:
return getMC12TruthService(name, **kwargs)
def getMC12LLPTruthService(name="ISF_MC12TruthLLPService", **kwargs):
llp_strategies = ['ISF_LLPTruthStrategy']
beam_pipe_strategies = getMC12BeamPipeTruthStrategies() + llp_strategies
id_strategies = getMC12IDTruthStrategies() + llp_strategies
calo_strategies = getMC12CaloTruthStrategies() + llp_strategies
ms_strategies = getMC12MSTruthStrategies() + llp_strategies
kwargs.setdefault('BeamPipeTruthStrategies', beam_pipe_strategies)
kwargs.setdefault('IDTruthStrategies', id_strategies)
kwargs.setdefault('CaloTruthStrategies', calo_strategies)
kwargs.setdefault('MSTruthStrategies', ms_strategies)
return getMC12TruthService(name, **kwargs)
def getMC12PlusTruthService(name="ISF_MC12PlusTruthService", **kwargs):
# importing Reflex dictionary to access AtlasDetDescr::AtlasRegion enum
import ROOT, cppyy
cppyy.loadDictionary('AtlasDetDescrDict')
AtlasRegion = ROOT.AtlasDetDescr
kwargs.setdefault('ForceEndVtxInRegions', [AtlasRegion.fAtlasID] )
return getMC12TruthService(name, **kwargs)
#
# MC15 Truth Service Configurations
#
def getMC15BeamPipeTruthStrategies():
return ['ISF_MCTruthStrategyGroupID_MC15']
def getMC15IDTruthStrategies():
return ['ISF_MCTruthStrategyGroupID_MC15', 'ISF_MCTruthStrategyGroupIDHadInt_MC15']
def getMC15CaloTruthStrategies():
return ['ISF_MCTruthStrategyGroupCaloMuBrem', 'ISF_MCTruthStrategyGroupCaloDecay_MC15']
def getMC15MSTruthStrategies():
return []
def getMC15TruthService(name="ISF_MC15TruthService", **kwargs):
# importing Reflex dictionary to access AtlasDetDescr::AtlasRegion enum
import ROOT, cppyy
cppyy.loadDictionary('AtlasDetDescrDict')
AtlasRegion = ROOT.AtlasDetDescr
beam_pipe_strategies = getMC15BeamPipeTruthStrategies()
id_strategies = getMC15IDTruthStrategies()
calo_strategies = getMC15CaloTruthStrategies()
ms_strategies = getMC15MSTruthStrategies()
kwargs.setdefault('BeamPipeTruthStrategies', beam_pipe_strategies) # this is used for beam pipe but not BeamPipeCentral which uses same as ID
kwargs.setdefault('IDTruthStrategies', id_strategies)
kwargs.setdefault('CaloTruthStrategies', calo_strategies)
kwargs.setdefault('MSTruthStrategies', ms_strategies)
kwargs.setdefault('IgnoreUndefinedBarcodes', False)
kwargs.setdefault('PassWholeVertices', False) # new for MC15 - can write out partial vertices.
kwargs.setdefault('ForceEndVtxInRegions', [AtlasRegion.fAtlasID])
return getGenericTruthService(name, **kwargs)
def getMC15aTruthService(name="ISF_MC15aTruthService", **kwargs):
kwargs.setdefault('ForceEndVtxInRegions', [])
return getMC15TruthService(name, **kwargs)
def getMC15aPlusTruthService(name="ISF_MC15aPlusTruthService", **kwargs):
# importing Reflex dictionary to access AtlasDetDescr::AtlasRegion enum
import ROOT, cppyy
cppyy.loadDictionary('AtlasDetDescrDict')
AtlasRegion = ROOT.AtlasDetDescr
kwargs.setdefault('ForceEndVtxInRegions', [AtlasRegion.fAtlasID])
return getMC15TruthService(name, **kwargs)
def getMC15aPlusLLPTruthService(name="ISF_MC15aPlusLLPTruthService", **kwargs):
llp_strategies = ['ISF_LLPTruthStrategy']
beam_pipe_strategies = getMC15BeamPipeTruthStrategies() + llp_strategies
id_strategies = getMC15IDTruthStrategies() + llp_strategies
calo_strategies = getMC15CaloTruthStrategies() + llp_strategies
ms_strategies = getMC15MSTruthStrategies() + llp_strategies
kwargs.setdefault('BeamPipeTruthStrategies', beam_pipe_strategies)
kwargs.setdefault('IDTruthStrategies', id_strategies)
kwargs.setdefault('CaloTruthStrategies', calo_strategies)
kwargs.setdefault('MSTruthStrategies', ms_strategies)
return getMC15aPlusTruthService(name, **kwargs)
#
# MC16 Truth Service Configurations
#
def getMC16TruthService(name="ISF_MC16TruthService", **kwargs):
return getMC15aPlusTruthService(name, **kwargs)
def getMC16LLPTruthService(name="ISF_MC16LLPTruthService", **kwargs):
return getMC15aPlusLLPTruthService(name, **kwargs)
#
# MC18 Truth Service Configurations
#
def getMC18TruthService(name="ISF_MC18TruthService", **kwargs):
return getMC15aPlusTruthService(name, **kwargs)
def getMC18LLPTruthService(name="ISF_MC18LLPTruthService", **kwargs):
return getMC15aPlusLLPTruthService(name, **kwargs)
| rushioda/PIXELVALID_athena | athena/Simulation/ISF/ISF_Core/ISF_Services/python/ISF_ServicesConfig.py | ISF_ServicesConfig.py | py | 11,722 | python | en | code | 1 | github-code | 13 |
70292793618 | from abc import abstractmethod
class PrintEdition:
def __init__(self,title ='', format = '',pages = 0):
self.__title = title
self.__format = format
self.__pages = pages
def print(self):
print(f'Название: {self.__title}\nФормат: {self.__format}\nКоличество страниц: {self.__pages}')
def scan(self):
self.__title = input("Введите название: ")
self.__format = input("Введите формат: ")
self.__pages = int(input("Введите количество страниц: "))
def format_bool(self, name):
return (self.__format == name)
def pages_interval(self, start, end):
return (start <= self.__pages and self.__pages <= end)
def print_file(self, file):
file.write(f'Название: {self.__title}\nФормат: {self.__format}\nКоличество страниц: {self.__pages}\n')
def scan_file(self,path):
items = path.readline().strip().split(",")
self.__title = items[0]
self.__format = items[1]
self.__pages = int(items[2])
class Book(PrintEdition):
def __init__(self, title = '', format = '', pages = 0, author =''):
super().__init__(title,format,pages)
self.__author = author
def print(self):
super().print()
print(f'Автор: {self.__author}\n')
def scan(self):
print('Заполните данные о книге: ')
super().scan()
self.__author = input("Введите имя автора: ")
def print_file(self,file):
file.write('\nКнига\n')
super().print_file(file)
file.write(f'Автор: {self.__author}\n')
def scan_file(self,path):
super().scan_file(path)
self.__author = path.readline().strip()
def get_author(self):
return self.__author
def GetCopy(self):
return self()
class Magazine(PrintEdition):
def __init__(self, title = '', format = '', pages = 0, editor = ''):
super().__init__(title, format, pages)
self.__editor = editor
def get_editor(self):
return self.__editor
def print(self):
super().print()
print(f'Редактор: {self.__editor}\n')
def scan(self):
print('Заполните данные о журнале: ')
super().scan()
self.__editor = input("Введите имя редактора: ")
def print_file(self,file):
file.write('\nЖурнал\n')
super().print_file(file)
file.write(f'Редактор: {self.__editor}\n')
def scan_file(self,path):
super().scan_file(path)
self.__editor = path.readline().strip()
def GetCopy(self):
return self()
class Print_Collection(PrintEdition):
def __init__(self, printlist = None):
if isinstance(printlist,list):
self.__printlist = printlist
else:
self.__printlist = []
def scan_from_keyboard(self):
num = int(input("Введите количество печатных изданий: "))
ps = []
for _ in range(num):
p = ''
ans = input('Выберите тип Книга(К) / Журнал(Ж): ').strip()
if ans in ('К', 'Книга'):
p = Book()
elif ans in ('Ж', 'Журнал'):
p = Magazine()
p.scan()
ps.append(p)
self.__printlist = ps
def print_to_display(self):
for elem in self.__printlist:
if(isinstance(elem,Book)):
print("Книга")
elem.print()
elif(isinstance(elem,Magazine)):
print("Журнал")
elem.print()
def scan_from_file(self,path):
ps2 = []
with open(path) as file:
num = int(file.readline())
for _ in range(num):
p = ''
ans = file.readline().strip()
if ans == 'B':
p = Book()
elif ans == 'M':
p = Magazine()
p.scan_file(file)
ps2.append(p)
self.__printlist = ps2
def pages_intervals(self,start,end):
result = []
for p in self.__printlist:
if(p.pages_interval(start,end)):
result.append(p)
return result
def special_format(self, format):
result = []
for p in self.__printlist:
if(p.format_bool(format)):
result.append(p)
return result
def output_to_file(self,path):
# with open(path,'w',encoding="UTF-8") as file:
for p in self.__printlist:
p.print_file(path)
def main():
path = "input.txt"
ps = Print_Collection()
ps.scan_from_keyboard()
print("\nСписок добавленный с клавиатуры: ")
ps.print_to_display()
ps2 = Print_Collection()
print("\nСписок, добавленный из файла: ")
ps2.scan_from_file(path)
ps2.print_to_display()
start = int(input("Введите начало интервала: "))
end = int(input("Введите конец интервала: "))
diapazone = Print_Collection(ps2.pages_intervals(start,end))
print(f'\nСписок с интервалом от {start} до {end} страниц: \n')
diapazone.print_to_display()
format = input("Введите формат: ")
format_list = Print_Collection(ps2.special_format(format))
print(f'\nСписок с указанным форматом "{format}" \n')
format_list.print_to_display()
out_path = "output.txt"
with open(out_path, 'w', encoding="UTF-8") as file:
file.write('\nСписок элементов с клавиатуры: \n')
ps.output_to_file(file)
file.write('\nСписок элементов из файла: \n')
ps2.output_to_file(file)
file.write(f'\nСписок с интервалом от {start} до {end} страниц: \n')
diapazone.output_to_file(file)
file.write(f'\nСписок с указанным форматом "{format}" \n')
format_list.output_to_file(file)
if __name__ == '__main__':
main()
| speedevil123/python_labs | labs_python/lab_13/13lab.py | 13lab.py | py | 6,682 | python | en | code | 0 | github-code | 13 |
15726100490 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#请将组员名单详列如下,并将范例说明用的“王大锤”及其学号取代为你的组员。若你的组员超过三人,请自行加上。
crewDICT = {1: {"姓名": "林容与",
"学号": "B05505006"},
2: {"姓名":"冯美玲",
"学号":"B05505041"},
3: {"姓名":"陈怡瑄",
"学号":"B05505046"},
}
# 第一题:请利用 wave 和 struct 套件读出 44100.wav 的内容。该档案的取样率为 44100hz,请将其重新取样为 11025hz并另存新档。
import wave
import struct
sound = wave.open("./44100.wav", 'rb')
nchannels, sampwidth, framerate, nframes, comptype, compname = sound.getparams()
showAll = True # Show all data in raw string at once.
if showAll == True:
tapeAll = sound.readframes(nframes)
else:
for i in range(0, nframes):
waveData = sound.readframes(1)
tapeClip = struct.unpack("<h", waveData)
print(tapeClip)
sound.close()
#重新取样
source = wave.open("./44100.wav", 'rb')
result = wave.open("./11025.wav", 'wb')
result.setparams((nchannels, sampwidth, framerate//4, nframes//4, comptype, compname))
for i in range(0, nframes//4):
waveData = source.readframes(1)
result.writeframes(waveData)
source.readframes(3)
result.close()
source.close()
# 第二题:请查询 Python3 的 decode() 文件,利用 Python3 的 decode() 将以下三个字串转成中文字串并印出。
b1 = b"\xa5x\xa4j\xa4u\xac\xec"
b2 = b"\xe5\x8f\xb0\xe5\xa4\xa7\xe5\xb7\xa5\xe7\xa7\x91"
b3 = b"\xff\xfe\xf0S'Y\xe5]\xd1y"
print(b1.decode('cp950'))
print(b2.decode('utf-8'))
print(b3.decode('utf-16'))
# 第三题:请查询 Python3 的 encode() 文件,利用 Python3 的 encode() 将以下的字串转成 cp950, utf-8 和 utf-16 的编码。
s0 = "计算机概论与程式设计"
s_cp950 = s0.encode('cp950')
s_utf8 = s0.encode('utf-8')
s_utf16 = s0.encode('utf-16')
# 第四题:请说明 Wifi 和 Bluetooth 之间...
# (a). 哪一种传输方式较为耗电?
#Wifi
# (b). 哪一种传输方式较快速?
#Wifi
# (c). 请实际测试:请查出你的手机型号采用的 Bluetooth 规格,再用你的手机拍摄一张照片,
# 并透过 Bluetooth 传送该照片到朋友的手机里。 考量到双方手机的蓝牙设备规格以及照
# 片的分辨率、档案大小,理论上应该耗时多少时间完成传送?而实际上又耗了多少时间进行
# 传送?
#档案大小:1.03MB、照片分辨率:72dpi 4032 x 3024
#手机iphone6s 蓝牙4.2 传输速度:24Mbps (maximum)
#理论耗时:0.433秒
#实际耗时:2.85秒
#最后并请列出所有可能影响传送时间的因素。
#1.干扰源(附近有使用2.4GHz波段的其他装置、)
#2.已连接的使用中蓝牙无线装置数量
| PeterWolf-tw/ESOE-CS101-2016 | homework04_group1.py | homework04_group1.py | py | 2,892 | python | zh | code | 15 | github-code | 13 |
39841420756 | import sys
from collections.abc import Mapping
from typing import Any, Optional
import pymaid
from . import __version__
from .constants import DATA_DIR
if sys.version_info >= (3, 11):
import tomllib
else:
import tomli as tomllib
def get_data_dir(dname=__version__):
return DATA_DIR / "output/raw" / dname
def read_toml(fpath):
with open(fpath, "rb") as f:
return tomllib.load(f)
NO_DEFAULT = object()
class Config:
def __init__(self, d: Optional[dict[str, Any]] = None) -> None:
if d is None:
d = dict()
self._d = d
def get(self, *keys, default=NO_DEFAULT, as_config=True) -> Any:
"""Default only applies to final key."""
d = self._d
for idx, k in enumerate(keys, 1):
try:
d = d[k]
except KeyError as e:
if idx == len(keys) and default is not NO_DEFAULT:
return default
raise e
if as_config and isinstance(d, Mapping):
return type(self)(d)
return d
@classmethod
def from_toml(cls, path):
with open(path, "rb") as f:
d = tomllib.load(f)
return cls(d)
def __hash__(self):
hashable = hashable_toml_dict(self._d)
return hash(hashable)
def hex_digest(self):
return hex(hash(self)).split("x")[1]
def hashable_toml_dict(d: dict[str, Any]):
out = []
for k, v in sorted(d.items()):
if isinstance(v, list):
v = hashable_toml_list(v)
elif isinstance(v, dict):
v = hashable_toml_dict(v)
out.append((k, v))
return tuple(out)
def hashable_toml_list(lst: list):
out = []
for v in sorted(lst):
if isinstance(v, list):
v = hashable_toml_list(v)
elif isinstance(v, dict):
v = hashable_toml_dict(v)
out.append(v)
return tuple(out)
def hash_toml(fpath) -> str:
orig = read_toml(fpath)
hashable = hashable_toml_dict(orig)
return hex(hash(hashable))[2:]
def get_catmaid_instance(*dicts) -> pymaid.CatmaidInstance:
kwargs = dict()
for d in dicts:
if d:
kwargs.update(d)
return pymaid.CatmaidInstance.from_environment(**kwargs)
| clbarnes/catmaid_publish | src/catmaid_publish/io_helpers.py | io_helpers.py | py | 2,265 | python | en | code | 0 | github-code | 13 |
70956257938 | from PyQt5.QtWidgets import QWidget, QVBoxLayout
from PyQt5.QtCore import Qt
from services.WindowService import WindowService
from widgets.Map.MapMode import MapMode
from widgets.Map.Createbox.TableList import TableList
class CreateboxWidget(QWidget):
def __init__(self, parent=None):
super(CreateboxWidget, self).__init__(parent)
self.parent = parent
self.setLayout(QVBoxLayout())
self.layout().setAlignment(Qt.AlignTop | Qt.AlignLeft)
self.layout().setContentsMargins(0, 0, 0, 0)
self.layout().setSpacing(0)
self.raise_()
self.setAttribute(Qt.WA_StyledBackground, True)
self.layout().addWidget(TableList(self))
parent.modeObserver().subscribe(self.onModeChanges)
WindowService.resizeSubject.subscribe(self.onWindowResize)
def onModeChanges(self, mode):
if mode == MapMode.CREATE_ITEMS:
self.show()
window_geometry = self.parent.parent().frameGeometry()
self.move(window_geometry.width() - 400, 0)
else:
self.hide()
def onWindowResize(self, event):
geometry = WindowService.instance.frameGeometry()
self.setGeometry(geometry.width() - 400, 0, 200, geometry.height())
| GeorgeHulpoi/piu-restaurant-management | widgets/Map/Createbox/CreateboxWidget.py | CreateboxWidget.py | py | 1,256 | python | en | code | 0 | github-code | 13 |
70953487378 | import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import copy
dataset = np.loadtxt("one.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,1:27]
Y = dataset[:,0]-624
Y = Y.astype(int)
s = X.shape
for i in range(s[0]):
for j in range(s[1]):
if (X[i][j]< -50):
X[i][j]= X[i][j]
X_ = np.zeros((1,644))
X_[0]=-100
i=21
X_[0,Y] = X[:,i]
S = X_.reshape(23,28)
old = copy.copy(S)
#print old
# matrix to track the original signal strenght -- shouldn't change
A = np.ones((23,28))
A[S==-100] = 0
# interpolate the S -- fill the empty grid
for itr in range(3000):
for i in range(23):
for j in range(28):
if (A[i][j]!=1):
if(j==0):
if(i==0):
S[i,j] = ( S[i,j+1] + S[i+1,j] )/2
elif(i==22):
S[i,j] = (S[i-1,j] + S[i,j+1])/2
else:
S[i,j] = (S[i,j+1]+S[i-1,j]+S[i+1,j])/3
elif (i==0):
if(j==0):
S[i,j] = ( S[i,j+1] + S[i+1,j] )/2
elif(j==27):
S[i,j] = ( S[i,j-1] + S[i+1,j] )/2
else:
S[i,j] = (S[i,j+1]+S[i,j-1]+S[i+1,j])/3
elif(j==27):
if(i==0):
S[i,j] = ( S[i,j-1] + S[i+1,j] )/2
elif(i==22):
S[i,j] = (S[i,j-1] + S[i-1,j])/2
else:
S[i,j] = (S[i-1,j]+S[i+1,j]+S[i,j-1])/3
elif(i==22):
if(j==0):
S[i,j] = (S[i-1,j] + S[i,j+1])/2
elif(j==27):
S[i,j] = (S[i,j-1] + S[i-1,j])/2
else:
S[i,j] = (S[i,j-1]+S[i,j+1]+S[i-1,j])/3
else:
S[i,j]= (S[i,j-1] + S[i,j+1] + S[i-1,j] + S[i+1,j])/4
diff = old-S
square = np.square(diff)
sum_mean = np.sum(square)/(23*28)
#plt.figure()
#plt.contour(S)
plt.contour(S)
plt.show()
| gauravshelangia/MTarget_Server | contour plots /interpolate.py | interpolate.py | py | 2,213 | python | en | code | 1 | github-code | 13 |
73708422736 | import os
import shutil
import traceback
from abc import abstractmethod
from typing import Dict, List, Optional, Tuple
from keras_preprocessing.image import save_img
from src.datasets.abstract_dataset import AbstractDataset
from src.datasets.dataset_path_creator import DatasetPathCreator
ImageLabel = Dict[str, str]
ProcessingPath = Tuple[str, str] # Represents the input path to an image and the output path after resizing
ProcessingPaths = List[ProcessingPath]
class ProcessingSettings:
def __init__(self, show_errors: bool, throw_errors: bool):
"""
Contains the settings for the processor
:param show_errors: if True, prints errors
:param throw_errors: if True, errors are not caught and will stop the program
"""
self.show_errors = show_errors
self.throw_errors = throw_errors
class BaseProcessor:
BAR = "-" * 50
def __init__(self, dataset_path_creator: DatasetPathCreator, source_image_dir: str):
"""
Handles processing of the datasets
:param dataset_path_creator: handles making the paths to the input data for a dataset
:param source_image_dir: the directory containing the source (unprocessed) images
"""
self.dataset_path_creator = dataset_path_creator
self.input_image_dir = os.path.join(dataset_path_creator.source_dir, source_image_dir)
self.n_processed = 0
self.n_failed = 0
os.makedirs(dataset_path_creator.image_dir, exist_ok=True)
@abstractmethod
def create_output_paths(self, path_to_input_images: str) -> ProcessingPaths:
"""
Creates the output paths for each input image dir
:param path_to_input_images: path to input images
:return: a list of tuple containing the input and output image path pairs
"""
pass
def pre_process(self, settings: ProcessingSettings):
"""
Can be overridden by child class to perform pre-processing
:param settings: contains the appropriate settings for the processing run
:return: None
"""
pass
def post_process(self, settings: ProcessingSettings):
"""
Can be overridden by child class to perform steps after pre-processing is run
:param settings: contains the appropriate settings for the processing run
:return: None
"""
pass
def print_status(self, override: bool = True):
"""
Prints the status of the pre-processing
:param override: If True, uses \r as the newline character else \n is used
:return: None
"""
end = "\r" if override else "\n"
print("Processed: ", self.n_processed, "Failed: ", self.n_failed, end=end)
def print_bar(self):
"""
Prints a divider bar
:return: None
"""
print(self.BAR)
def print_exception(self, e: Exception):
"""
Prints the exception to console
:param e: the exception
:return: None
"""
traceback.print_exc()
print("Processing %s has failed" % self.__class__.__name__)
print(e)
def process(self, settings: ProcessingSettings) -> Optional[Exception]:
"""
Performs processing on the dataset
:param settings: contains the appropriate settings for the processing run
:return: the exception if one occurred
"""
self.print_bar()
print("Processing : " + self.__class__.__name__)
try:
self.pre_process(settings)
self.resize_images(self._get_image_paths(), settings)
self.post_process(settings)
self.print_status(override=False)
except Exception as e:
self.print_exception(e)
return e
def _get_image_paths(self) -> List:
"""
Gets all paths pointing to the images in a dataset
:return: a list of paths
"""
return AbstractDataset.get_image_paths(self.input_image_dir)
def _copy_label_file(self):
"""
Moves the label file from the source path to the final project directory.
:return: None
"""
src = os.path.join(self.dataset_path_creator.source_dir, self.dataset_path_creator.label_file_name)
dst = self.dataset_path_creator.label_file
shutil.copyfile(src, dst)
def resize_images(self, image_paths: List, settings: ProcessingSettings):
"""
Resizes all images in a dataset
:param image_paths: list of image paths
:param settings: contains the appropriate settings for the processing run
:return: None
"""
for entry_name in image_paths:
entry_outputs = self.create_output_paths(entry_name)
for entry_output in entry_outputs:
input_path, output_path = entry_output
error = self.read_resize_save_image(input_path, output_path, settings)
if error is not None:
self.n_failed += 1
self.n_processed += 1
if self.n_processed % 5000 == 0:
self.print_status()
@staticmethod
def read_resize_save_image(input_image_path: str, output_image_path: str, settings: ProcessingSettings) -> Optional[
Exception]:
"""
Reads, resizes and saves image
:param input_image_path: the path to the input image
:param output_image_path: path of where to save the image
:param settings: contains the appropriate settings for the processing run
:return: the exception if one occurred
"""
try:
file_name = os.path.split(output_image_path)[1]
if os.path.isfile(output_image_path) or file_name[0] == ".":
return
if not os.path.isfile(input_image_path):
raise Exception("No image found at path: ", input_image_path)
image = AbstractDataset.decode_image_from_path(input_image_path)
save_img(output_image_path, image)
except Exception as e:
if settings.show_errors:
print("FAILED:", input_image_path)
print(e)
if settings.throw_errors:
raise e
return e
@staticmethod
def create_generic_single_output(entry_name: str, output_folder: str) -> ProcessingPaths:
"""
Creates a tuple containing the input and output image path for a dataset with a single path per image
:param entry_name: path to input images
:param output_folder: the output dir name
:return: a tuple containing the input and output image path inside a list (in case of multiple paths)
"""
image_file_name = os.path.split(entry_name)[1]
output_image_path = os.path.join(output_folder, image_file_name)
return [(entry_name, output_image_path)]
| thearod5/calorie-predictor | src/preprocessing/base_processor.py | base_processor.py | py | 6,933 | python | en | code | 1 | github-code | 13 |
6948619834 | from typing import *
import math
class Solution:
def visiblePoints(self, points: List[List[int]], angle: int, location: List[int]) -> int:
angles = []
cnt = 0
for point in points:
if point[0] == location[0] and point[1] == location[1]:
cnt += 1
continue
angles.append(math.atan2(location[1] - point[1], location[0] - point[0]))
angles.sort()
angles.extend([x + math.pi * 2 for x in angles])
angle = angle / 180 * math.pi
l, r = 0, 0
ret = 0
while r < len(angles):
while angles[r] - angles[l] > angle:
l += 1
ret = max(ret, r - l + 1)
r += 1
return ret + cnt
if __name__ == '__main__':
sol=Solution()
points = [[2, 1], [2, 2], [3, 3]]
angle = 90
location = [1, 1]
print(sol.visiblePoints(points,angle,location))
| Xiaoctw/LeetCode1_python | 数学/可见点的最大数目_1610.py | 可见点的最大数目_1610.py | py | 926 | python | en | code | 0 | github-code | 13 |
46397372994 | import base64
import hashlib
import os
import logging
import select
import socket
import struct
import sys
import threading
import time
from aiy.vision.streaming.presence import PresenceServer
import aiy.vision.streaming.proto.messages_pb2 as pb2
from http.server import BaseHTTPRequestHandler
from io import BytesIO
from itertools import cycle
from picamera import PiVideoFrameType
AVAHI_SERVICE = '_aiy_vision_video._tcp'
ENCODING_BIT_RATE = 1000000
TX_QUEUE_SIZE = 15
WS_PORT = 4664
TCP_PORT = 4665
ANNEXB_PORT = 4666
def _close_socket(sock):
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
class StreamingServer(object):
def __init__(self, camera):
self._lock = threading.Lock()
self._logger = logging.getLogger(__name__)
self._camera = camera
self._stream_count = 0
self._tcp_socket = None
self._web_socket = None
self._annexb_socket = None
self._thread = None
self._closed = False
self._waiting_for_key = False
self._start_time = time.monotonic()
self._seq = 0
self._clients = []
def run(self):
with self._lock:
if self._thread:
self._logger.error('Server already running')
return
self._closed = False
self._thread = threading.Thread(target=self._server_thread)
self._thread.start()
def close(self):
to_join = None
clients = None
with self._lock:
if self._closed:
return
self._closed = True
clients = self._clients
self._clients = []
if self._tcp_socket:
_close_socket(self._tcp_socket)
self._tcp_socket = None
if self._web_socket:
_close_socket(self._web_socket)
self._web_socket = None
if self._annexb_socket:
_close_socket(self._annexb_socket)
self._annexb_socket = None
if self._thread:
to_join = self._thread
self._thread = None
if clients:
self._logger.info('Closing %d clients', len(clients))
for client in clients:
client.close()
if to_join:
to_join.join()
self._logger.info('Server closed')
def _server_thread(self):
tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tcp_socket.bind(('', TCP_PORT))
tcp_socket.listen()
tcp_port = tcp_socket.getsockname()[1]
web_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
web_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
web_socket.bind(('', WS_PORT))
web_socket.listen()
web_port = web_socket.getsockname()[1]
annexb_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
annexb_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
annexb_socket.bind(('', ANNEXB_PORT))
annexb_socket.listen()
annexb_port = annexb_socket.getsockname()[1]
with self._lock:
self._tcp_socket = tcp_socket
self._web_socket = web_socket
self._annexb_socket = annexb_socket
self._logger.info('Listening on ports tcp: %d web: %d annexb: %d',
tcp_port, web_port, annexb_port)
presence = PresenceServer(AVAHI_SERVICE, tcp_port)
presence.run()
while True:
with self._lock:
if self._closed:
break
socks = [tcp_socket, web_socket, annexb_socket]
try:
rlist, _, _ = select.select(socks, socks, socks)
for ready in rlist:
client_sock, client_addr = ready.accept()
if ready == tcp_socket:
kind = 'tcp'
client = _ProtoClient(self, client_sock, client_addr)
elif ready == web_socket:
kind = 'web'
client = _WsProtoClient(self, client_sock, client_addr)
elif ready == annexb_socket:
kind = 'annexb'
client = _AnnexbClient(self, client_sock, client_addr)
else:
# Shouldn't happen.
client_sock.close()
continue
self._logger.info('New %s connection from %s:%d', kind,
client_addr[0], client_addr[1])
with self._lock:
self._clients.append(client)
client.start()
except:
self._logger.info('Server sockets closed')
self._logger.info('Server shutting down')
presence.close()
_close_socket(tcp_socket)
_close_socket(web_socket)
_close_socket(annexb_socket)
with self._lock:
self._tcp_socket = None
self._web_socket = None
self._annexb_socket = None
def _stream_control(self, enable):
start_recording = False
stop_recording = False
with self._lock:
if enable:
self._stream_count += 1
start_recording = self._stream_count == 1
else:
self._stream_count -= 1
stop_recording = self._stream_count == 0
if start_recording:
self._logger.info('Start recording')
self._camera.start_recording(
_EncoderSink(self),
format='h264',
profile='baseline',
inline_headers=True,
bitrate=ENCODING_BIT_RATE,
intra_period=0)
if stop_recording:
self._logger.info('Stop recording')
self._camera.stop_recording()
def _client_closed(self, client):
with self._lock:
if client in self._clients:
self._clients.remove(client)
def _on_video_data(self, data):
frame_type = self._camera.frame.frame_type
is_key = frame_type == PiVideoFrameType.key_frame
is_delta = frame_type == PiVideoFrameType.frame
is_codec_data = frame_type == PiVideoFrameType.sps_header
if is_key:
self._waiting_for_key = False
needs_key = False
if is_codec_data:
with self._lock:
for client in self._clients:
needs_key |= client.send_codec_data(self._camera.resolution, data)
elif is_key or is_delta:
needs_key = False
pts = int((time.monotonic() - self._start_time) * 1e6)
with self._lock:
for client in self._clients:
needs_key |= client.send_frame_data(is_key, self._seq, pts, data)
self._seq += 1
else:
self._logger.info('Unknown frame %d bytes', len(data))
if needs_key:
self._request_key_frame()
def send_inference_data(self, data):
needs_key = False
with self._lock:
for client in self._clients:
needs_key |= client.send_inference_data(data)
if needs_key:
self._request_key_frame()
def _request_key_frame(self):
if not self._waiting_for_key:
self._logger.info('Requesting key frame')
self._camera.request_key_frame()
self._waiting_for_key = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class _EncoderSink(object):
def __init__(self, server):
self._server = server
def write(self, data):
self._server._on_video_data(data)
def flush(self):
pass
class _Client(object):
def __init__(self, server, socket, addr):
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
self._logger = logging.getLogger(__name__)
self._streaming = False
self._closed = False
self._server = server
self._socket = socket
self._ip = addr[0]
self._port = addr[1]
self._tx_q = []
self._needs_codec_data = True
self._needs_key = True
self._rx_thread = threading.Thread(target=self._rx_thread)
self._tx_thread = threading.Thread(target=self._tx_thread)
def start(self):
self._rx_thread.start()
self._tx_thread.start()
def __del__(self):
self.close()
def close(self):
with self._lock:
if self._closed:
return
self._closed = True
self._cond.notifyAll()
streaming = self._streaming
self._streaming = False
_close_socket(self._socket)
self._log_info('Connection closed')
if streaming:
self._server._stream_control(False)
self._server._client_closed(self)
def send_codec_data(self, resolution, data):
with self._lock:
if not self._streaming:
return False
self._needs_codec_data = False
return self._queue_codec_data_locked(resolution, data)
def send_frame_data(self, is_key, seq, pts, data):
with self._lock:
if not self._streaming:
return False
if self._needs_codec_data:
return True
if self._needs_key and not is_key:
return True
self._needs_key = False
return self._queue_frame_data_locked(is_key, seq, pts, data)
def send_inference_data(self, data):
with self._lock:
if not self._streaming:
return False
return self._queue_inference_data_locked(data)
def _log(self, func, fmt, *args):
args = (self._ip, self._port) + args
func('%s:%d: ' + fmt, *args)
def _log_info(self, fmt, *args):
self._log(self._logger.info, fmt, *args)
def _log_warning(self, fmt, *args):
self._log(self._logger.warning, fmt, *args)
def _log_error(self, fmt, *args):
self._log(self._logger.error, fmt, *args)
def _queue_message_locked(self, message):
dropped = False
self._tx_q.append(message)
while len(self._tx_q) > TX_QUEUE_SIZE:
dropped = True
self._tx_q.pop(0)
self._needs_codec_data = True
self._needs_key = True
self._log_warning('running behind, dropping messages')
self._cond.notifyAll()
return dropped
def _tx_thread(self):
while True:
with self._lock:
if self._closed:
break
if self._tx_q:
message = self._tx_q.pop(0)
else:
self._cond.wait()
continue
try:
self._send_message(message)
except Exception as e:
self._log_error('Failed to send data: %s', e)
self.close()
def _rx_thread(self):
while True:
with self._lock:
if self._closed:
break
message = self._receive_message()
if message:
self._handle_message(message)
else:
self.close()
class _ProtoClient(_Client):
def __init__(self, server, socket, addr):
_Client.__init__(self, server, socket, addr)
def _queue_codec_data_locked(self, resolution, data):
message = pb2.ClientBound()
message.stream_data.codec_data.width = resolution[0]
message.stream_data.codec_data.height = resolution[1]
message.stream_data.codec_data.data = data
return self._queue_message_locked(message)
def _queue_frame_data_locked(self, is_key, seq, pts, data):
message = pb2.ClientBound()
if is_key:
message.stream_data.frame_data.type = pb2.FrameData.KEY
else:
message.stream_data.frame_data.type = pb2.FrameData.DELTA
message.stream_data.frame_data.seq = seq
message.stream_data.frame_data.pts = pts
message.stream_data.frame_data.data = data
return self._queue_message_locked(message)
def _queue_inference_data_locked(self, data):
return self._queue_message_locked(data.GetMessage())
def _handle_message(self, message):
which = message.WhichOneof('message')
try:
if which == 'stream_control':
self._handle_stream_control(message.stream_control)
else:
self._log_warning('unhandled message %s', which)
except Exception as e:
self._log_error('Error handling message %s: %s', which, e)
self.close()
def _handle_stream_control(self, stream_control):
self._log_info('stream_control %s', stream_control.enabled)
enabled = stream_control.enabled
with self._lock:
if enabled == self._streaming:
self._log_info('ignoring NOP stream_control')
return
else:
self._streaming = enabled
self._server._stream_control(enabled)
def _send_message(self, message):
buf = message.SerializeToString()
self._socket.sendall(struct.pack('!I', len(buf)))
self._socket.sendall(buf)
def _receive_bytes(self, num_bytes):
received = bytearray(b'')
while num_bytes > len(received):
buf = self._socket.recv(num_bytes - len(received))
if not buf:
break
received.extend(buf)
return bytes(received)
def _receive_message(self):
try:
buf = self._receive_bytes(4)
num_bytes = struct.unpack('!I', buf)[0]
buf = self._receive_bytes(num_bytes)
message = pb2.AiyBound()
message.ParseFromString(buf)
return message
except:
return None
class _WsProtoClient(_ProtoClient):
class WsPacket(object):
def __init__(self):
self.fin = True
self.opcode = 2
self.masked = False
self.mask = None
self.length = 0
self.payload = bytearray()
def append(self, data):
if self.masked:
data = bytes([c ^ k for c, k in zip(data, cycle(self.mask))])
self.payload.extend(data)
def serialize(self):
self.length = len(self.payload)
buf = bytearray()
b0 = 0
b1 = 0
if self.fin:
b0 |= 0x80
b0 |= self.opcode
buf.append(b0)
if self.length <= 125:
b1 |= self.length
buf.append(b1)
elif self.length >= 126 and self.length <= 65535:
b1 |= 126
buf.append(b1)
buf.extend(struct.pack('!H', self.length))
else:
b1 |= 127
buf.append(b1)
buf.extend(struct.pack('!Q', self.length))
if self.payload:
buf.extend(self.payload)
return bytes(buf)
def __init__(self, server, socket, addr):
self._handshaked = False
_ProtoClient.__init__(self, server, socket, addr)
def _receive_message(self):
try:
while True:
if self._handshaked:
break
self._process_web_request()
packets = []
while True:
packet = self._receive_packet()
if packet.opcode == 0:
# Continuation
if not packets:
self._log_error('Invalid continuation received')
return None
packets.append(packet)
elif packet.opcode == 1:
# Text, not supported.
self._log_error('Received text packet')
return None
elif packet.opcode == 2:
# Binary.
packets.append(packet)
if packet.fin:
joined = bytearray()
for p in packets:
joined.extend(p.payload)
message = pb2.AiyBound()
message.ParseFromString(joined)
return message
elif packet.opcode == 8:
# Close.
self._log_info('WebSocket close requested')
return None
elif packet.opcode == 9:
# Ping, send pong.
self._log_info('Received ping')
response = self.WsPacket()
response.opcode = 10
response.append(packet.payload)
with self._lock:
self._queue_message_locked(response)
elif packet.opcode == 10:
# Pong. Igore as we don't send pings.
self._log_info('Dropping pong')
else:
self._log_info('Dropping opcode %d', packet.opcode)
except:
return None
def _receive_packet(self):
packet = self.WsPacket()
buf = super()._receive_bytes(2)
packet.fin = buf[0] & 0x80 > 0
packet.opcode = buf[0] & 0x0F
packet.masked = buf[1] & 0x80 > 0
packet.length = buf[1] & 0x7F
if packet.length == 126:
packet.length = struct.unpack('!H', super()._receive_bytes(2))[0]
elif packet.length == 127:
packet.length = struct.unpack('!Q', super()._receive_bytes(8))[0]
if packet.masked:
packet.mask = super()._receive_bytes(4)
packet.append(super()._receive_bytes(packet.length))
return packet
def _send_message(self, message):
if isinstance(message, (bytes, bytearray)):
buf = message
else:
if isinstance(message, self.WsPacket):
packet = message
else:
packet = self.WsPacket()
packet.append(message.SerializeToString())
buf = packet.serialize()
self._socket.sendall(buf)
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_buf):
self.rfile = BytesIO(request_buf)
self.raw_requestline = self.rfile.readline()
self.parse_request()
def _process_web_request(self):
response_template = (
'HTTP/1.1 200 OK\r\n'
'Content-Length: %(content_length)s\r\n'
'Connection: Keep-Alive\r\n\r\n'
)
try:
header_buf = bytearray()
while b'\r\n\r\n' not in header_buf:
buf = self._socket.recv(2048)
if not buf:
raise Exception('Socket closed while receiving header')
header_buf.extend(buf)
if len(header_buf) >= 10 * 1024:
raise Exception('HTTP header too large')
request = self.HTTPRequest(header_buf)
connection = request.headers['Connection']
upgrade = request.headers['Upgrade']
if 'Upgrade' in connection and upgrade == 'websocket':
self._handshake(request)
elif request.command == 'GET':
content = self._get_asset(request.path)
response_hdr = response_template % {'content_length': len(content)}
response = bytearray(response_hdr.encode('ascii'))
response.extend(content)
with self._lock:
self._queue_message_locked(response)
else:
raise Exception('Unsupported request')
except Exception as e:
self.close()
raise e
def _handshake(self, request):
magic = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
response_template = (
'HTTP/1.1 101 Switching Protocols\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Accept: %(sec_key)s\r\n\r\n'
)
try:
sec_key = request.headers['Sec-WebSocket-Key']
sec_key = sec_key.encode('ascii') + magic.encode('ascii')
sec_key = base64.b64encode(hashlib.sha1(sec_key).digest()).decode('ascii')
response = response_template % {'sec_key': sec_key}
with self._lock:
self._queue_message_locked(response.encode('ascii'))
self._handshaked = True
self._log_info('Upgraded to WebSocket')
except Exception as e:
self._log_error('WebSocket handshake error: %s', e)
self.close()
def _get_asset(self, path):
if not path or '..' in path:
return 'Nice try'.encode('ascii')
if path == '/':
path = 'index.html'
elif path[0] == '/':
path = path[1:]
path = os.path.join(os.path.dirname(__file__), 'assets', path)
try:
with open(path, 'rb') as asset:
return asset.read()
except:
return b''
class _AnnexbClient(_Client):
def __init__(self, server, socket, addr):
_Client.__init__(self, server, socket, addr)
with self._lock:
self._streaming = True
self._server._stream_control(True)
def start(self):
super().start()
with self._lock:
self._streaming = True
self._server._stream_control(True)
def _queue_codec_data_locked(self, resolution, data):
return self._queue_message_locked(data)
def _queue_frame_data_locked(self, is_key, seq, pts, data):
return self._queue_message_locked(data)
def _queue_inference_data_locked(self, data):
# Silently drop inference data.
return False
def _handle_message(self, message):
pass
def _send_message(self, message):
self._socket.sendall(message)
def _receive_message(self):
try:
buf = self._socket.recv(1024)
if not buf:
return None
else:
return buf
except:
return None
class InferenceData(object):
def __init__(self):
self._message = pb2.ClientBound()
self._message.stream_data.inference_data.SetInParent()
def _get_color(value):
if isinstance(value, int):
return value
if isinstance(value, tuple):
if len(value) == 3:
color = 0xFF000000
color |= (value[0] & 0xff) << 16
color |= (value[1] & 0xff) << 8
color |= (value[2] & 0xff) << 0
return color
if len(value) == 4:
color = 0
color |= (value[0] & 0xff) << 24
color |= (value[1] & 0xff) << 16
color |= (value[2] & 0xff) << 8
color |= (value[3] & 0xff) << 0
return color
return 0xFFFFFFFF
def add_rectangle(self, x, y, w, h, color, weight):
element = self._message.stream_data.inference_data.elements.add()
element.rectangle.x = x
element.rectangle.y = y
element.rectangle.w = w
element.rectangle.h = h
element.rectangle.color = InferenceData._get_color(color)
element.rectangle.weight = weight
def add_label(self, text, x, y, color, size):
element = self._message.stream_data.inference_data.elements.add()
element.label.text = text
element.label.x = x
element.label.y = y
element.label.color = InferenceData._get_color(color)
element.label.size = size
def GetMessage(self):
return self._message
| abnerjacobsen/Smart_Office | WaitingRoomAPI/aiy/vision/streaming/server.py | server.py | py | 24,279 | python | en | code | 1 | github-code | 13 |
5719521172 | from __future__ import annotations
import pytest
from aiohttp import BasicAuth
from aioresponses import CallbackResult
from aioresponses import aioresponses
from tests import normalize_item
from vdirsyncer.exceptions import UserError
from vdirsyncer.storage.http import HttpStorage
from vdirsyncer.storage.http import prepare_auth
@pytest.mark.asyncio
async def test_list(aio_connector):
collection_url = "http://127.0.0.1/calendar/collection.ics"
items = [
(
"BEGIN:VEVENT\n"
"SUMMARY:Eine Kurzinfo\n"
"DESCRIPTION:Beschreibung des Termines\n"
"END:VEVENT"
),
(
"BEGIN:VEVENT\n"
"SUMMARY:Eine zweite Küèrzinfo\n"
"DESCRIPTION:Beschreibung des anderen Termines\n"
"BEGIN:VALARM\n"
"ACTION:AUDIO\n"
"TRIGGER:19980403T120000\n"
"ATTACH;FMTTYPE=audio/basic:http://host.com/pub/ssbanner.aud\n"
"REPEAT:4\n"
"DURATION:PT1H\n"
"END:VALARM\n"
"END:VEVENT"
),
]
responses = ["\n".join(["BEGIN:VCALENDAR"] + items + ["END:VCALENDAR"])] * 2
def callback(url, headers, **kwargs):
assert headers["User-Agent"].startswith("vdirsyncer/")
assert responses
return CallbackResult(
status=200,
body=responses.pop().encode("utf-8"),
headers={"Content-Type": "text/calendar; charset=iso-8859-1"},
)
with aioresponses() as m:
m.get(collection_url, callback=callback, repeat=True)
s = HttpStorage(url=collection_url, connector=aio_connector)
found_items = {}
async for href, etag in s.list():
item, etag2 = await s.get(href)
assert item.uid is not None
assert etag2 == etag
found_items[normalize_item(item)] = href
expected = {
normalize_item("BEGIN:VCALENDAR\n" + x + "\nEND:VCALENDAR") for x in items
}
assert set(found_items) == expected
async for href, etag in s.list():
item, etag2 = await s.get(href)
assert item.uid is not None
assert etag2 == etag
assert found_items[normalize_item(item)] == href
def test_readonly_param(aio_connector):
"""The ``readonly`` param cannot be ``False``."""
url = "http://example.com/"
with pytest.raises(ValueError):
HttpStorage(url=url, read_only=False, connector=aio_connector)
a = HttpStorage(url=url, read_only=True, connector=aio_connector)
b = HttpStorage(url=url, read_only=None, connector=aio_connector)
assert a.read_only is b.read_only is True
def test_prepare_auth():
assert prepare_auth(None, "", "") is None
assert prepare_auth(None, "user", "pwd") == BasicAuth("user", "pwd")
assert prepare_auth("basic", "user", "pwd") == BasicAuth("user", "pwd")
with pytest.raises(ValueError) as excinfo:
assert prepare_auth("basic", "", "pwd")
assert "you need to specify username and password" in str(excinfo.value).lower()
from requests.auth import HTTPDigestAuth
assert isinstance(prepare_auth("digest", "user", "pwd"), HTTPDigestAuth)
with pytest.raises(ValueError) as excinfo:
prepare_auth("ladida", "user", "pwd")
assert "unknown authentication method" in str(excinfo.value).lower()
def test_prepare_auth_guess(monkeypatch):
import requests_toolbelt.auth.guess
assert isinstance(
prepare_auth("guess", "user", "pwd"),
requests_toolbelt.auth.guess.GuessAuth,
)
monkeypatch.delattr(requests_toolbelt.auth.guess, "GuessAuth")
with pytest.raises(UserError) as excinfo:
prepare_auth("guess", "user", "pwd")
assert "requests_toolbelt is too old" in str(excinfo.value).lower()
def test_verify_false_disallowed(aio_connector):
with pytest.raises(ValueError) as excinfo:
HttpStorage(url="http://example.com", verify=False, connector=aio_connector)
assert "must be a path to a pem-file." in str(excinfo.value).lower()
| pimutils/vdirsyncer | tests/storage/test_http.py | test_http.py | py | 4,094 | python | en | code | 1,382 | github-code | 13 |
36907268197 | import itertools
# Polygonal Numbers
def P(k, n):
if k == 3:
return (n*(n+1)) // 2 # Triangle
elif k == 4:
return n*n # Square
elif k == 5:
return (n*(3*n-1)) // 2 # Pentagonal
elif k == 6:
return n*(2*n-1) # Hexagonal
elif k == 7:
return (n*(5*n-3)) // 2 # Heptagonal
elif k == 8:
return n*(3*n-2) # Octagonal
else:
return 0
def get_all_d_digit_polynomal_numbers(N, d):
all_polygonal_numbers = [[] for k in range(N)]
for k in range(3, N+3):
n = 1
p = P(k, n)
while p < 10**d:
if p >= 10**(d-1):
all_polygonal_numbers[k-3].append(p)
n += 1
p = P(k, n)
return all_polygonal_numbers
def get_first_two_digits(n):
return n // 100
def get_last_two_digits(n):
return n % 100
# This function assumes that each digit in S is a 4-digit number
# What is confusing about this question is their definition of "cyclic"
# In this question, cyclic means that, for example, in a set of size 6 we have
# ABCD --> CDEF --> EFGH --> GHIJ --> IJKL --> KLAB
# i.e. we always loop in groups of 2 digits
#
# In testing this function out it's clear that it's eay too slow to use many times
def isCyclicSet(S):
for perm in itertools.permutations(S[1:]):
for i in range(len(perm)-1):
if get_first_two_digits(perm[i]) != get_last_two_digits(perm[i+1]):
break
else:
# This only occurs if we don't break
if get_first_two_digits(S[0]) == get_last_two_digits(perm[0]) \
and get_first_two_digits(perm[-1]) == get_last_two_digits(S[0]):
return True
return False
def brute_force(N):
all_polygonal_numbers = get_all_d_digit_polynomal_numbers(N, 4)
cyclic_sets = []
for combination in itertools.product(*all_polygonal_numbers):
if isCyclicSet(combination):
cyclic_sets.append(combination)
return cyclic_sets
def DFS(N, cycle=None, candidates=None):
# This only runs once and sets up the depth-first search
if cycle is None:
# Candidates is a list of lists contain each of the 4-digit polygon numbers
# The reason we need to keep them separated is because we can only pick one number from each list
candidates = get_all_d_digit_polynomal_numbers(N, 4)
remaining = candidates[:-1]
# output list
cyclic_sets = []
# The cycle has to start somewhere, so we will assume the first number comes from the last list
# This is because the last list is the highest polygon list, which means is will be the shortest
for first in candidates[-1]:
cycle = [first]
# we search for a cycle starting at ``first``
found, cycle = DFS(N, cycle=cycle, candidates=remaining)
# if one is found, then we stop and return the cycle
if found:
cyclic_sets.append(tuple(cycle))
# otherwise, we continue to the next number in ``candidates[-1]``
return cyclic_sets
# base case: if we get a cycle of length N, then we are done
# and we just need to check that the property holds for the first and last numbers in the cycle as well
if len(cycle) == N:
first, last = cycle[0], cycle[-1]
if get_last_two_digits(last) == get_first_two_digits(first):
return True, cycle
else:
return False, cycle
# This is the DFS search
curr = cycle[-1]
for i in range(len(candidates)):
remaining = candidates[:i] + candidates[i+1:]
for nxt in candidates[i]:
# if we find a number that could be the next on the cycle
if get_last_two_digits(curr) == get_first_two_digits(nxt):
# we recurse on that number
found, new_cycle = DFS(N, cycle=cycle + [nxt], candidates=remaining)
# if it results in a valid cycle
if found:
# we return
return True, new_cycle
# if we fail to find anything, return the original cycle
return False, cycle
def main(N=6):
#cyclic_sets = brute_force(N)
cyclic_sets = DFS(N)
print(cyclic_sets)
total = sum(cyclic_sets[0])
print(f"The sum of the only ordered set of {N} cyclic 4-digit numbers for which each polygonal type is represented by a different number in the set is:", total)
return total
if __name__ == "__main__":
main() | ekeilty17/Project_Euler | P061.py | P061.py | py | 4,601 | python | en | code | 1 | github-code | 13 |
40336550035 | import tensorflow as tf
import os
class Summarizer_eager:
def __init__(self, config):
self.config = config
self.summary_placeholders = {}
self.summary_ops = {}
self.train_summary_writer = tf.summary.create_file_writer(
os.path.join(self.config.log.summary_dir, "train")
)
self.test_summary_writer = tf.summary.create_file_writer(
os.path.join(self.config.log.summary_dir, "test")
)
| yigitozgumus/Polimi_Thesis | utils/summarizer_eager.py | summarizer_eager.py | py | 468 | python | en | code | 5 | github-code | 13 |
41645536415 | #!/usr/bin/env python3
import sys
import string
def react_polymer(polymer):
lst = list(polymer)
reaction_occured = True
while reaction_occured:
reaction_occured = False
char = 0
while char < len(lst) - 1:
if abs(ord(lst[char]) - ord(lst[char+1])) == 32:
reaction_occured = True
lst.pop(char+1)
lst.pop(char)
char+=1
return "".join(lst)
def main(input_file):
polymer = None
with open(input_file) as f:
polymer = f.readline().strip()
if not polymer:
print("ERROR: Can't read input string")
sys.exit(1)
lowest_value = 1000000
lowest_char = None
for char in string.ascii_uppercase:
new_polymer = polymer.replace(char, "")
new_polymer = new_polymer.replace(char.lower(), "")
reacted = react_polymer(new_polymer)
if len(reacted) < lowest_value:
lowest_char = "{0}/{1}".format(char, char.lower())
lowest_value = len(reacted)
print("Char: {0}".format(lowest_char))
print("Length: {0}".format(lowest_value))
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Missing input file argument")
else:
main(sys.argv[1])
| billyoverton/advent2018 | day5/puzzle2.py | puzzle2.py | py | 1,275 | python | en | code | 0 | github-code | 13 |
36588683426 | class Solution:
def removeCoveredIntervals(self, intervals: List[List[int]]) -> int:
intervals.sort(key = lambda x: (x[0], -x[1]))
merge = 0
max_right = 0
for cur in intervals:
if cur[1] <= max_right:
merge += 1
else:
max_right = cur[1]
return len(intervals)-merge | ysonggit/leetcode_python | 1288_RemoveCoveredIntervals.py | 1288_RemoveCoveredIntervals.py | py | 364 | python | en | code | 1 | github-code | 13 |
6692109502 | # Напишите программу, которая найдёт произведение пар чисел списка.
# Парой считаем первый и последний элемент, второй и предпоследний и т.д.
# Пример:
# - [2, 3, 4, 5, 6] => [12, 15, 16];
# - [2, 3, 5, 6] => [12, 15]
a = [2, 3, 4, 5, 6]
b = len(a) / 2
if type(b) == float:
b += 0.5
b = int(b)
print(b)
c = []
d = []
for i in a[-1:len(a) % 2:-1]:
c.append(i)
for i in a[0:b:1]:
d.append(i)
print(c)
print(d)
e = []
for i in range(0,len(c)):
e.append(c[i]*d[i])
print(e)
| Rrider11/python2 | task3.2.py | task3.2.py | py | 623 | python | ru | code | 0 | github-code | 13 |
38275082175 | from telegram_bot import *
import urllib.request
import urllib.parse
import ssl
import bs4
import datetime
def check_saleinfo_and_send_alarm(bot):
url = 'https://quasarzone.co.kr/bbs/qb_saleinfo'
prefix = 'https://quasarzone.co.kr'
with urllib.request.urlopen(url, context=CONTEXT) as response:
html = response.read()
soup = bs4.BeautifulSoup(html, 'html.parser')
table = soup.find('div', class_='market-type-list market-info-type-list relative')
posts = table.find_all('a', class_='subject-link')
for p in posts:
title = p.text.strip()
href = p['href']
url = prefix + href
if title != BLIND_MESSAGE:
date, details = get_post_content(url)
posting_time = datetime.datetime.strptime(date, '%Y.%m.%d %H:%M')
if (executed_time - posting_time) > datetime.timedelta(minutes=CHECK_INTERVAL_MIN):
break
html_str = generate_html(title, url, date, details)
bot.send_html(html_str)
print(html_str)
def get_post_content(url):
with urllib.request.urlopen(url, context=CONTEXT) as response:
html = response.read()
soup = bs4.BeautifulSoup(html, 'html.parser')
date = soup.find('span', class_='date').text.strip()
post_table = soup.find('table')
tags = list(map(lambda x: x.text.strip(), post_table.find_all("th")))
values = list(map(lambda x: x.text.strip(), post_table.find_all('td')))
details = dict(zip(tags, values))
return date, details
def generate_html(title, url, date, details):
ret = f"<b>게시글\t</b>\t{title}\n"
ret += f"<b>게시일</b>\t{date}\n\n"
link = None
no_price = False
for tag in details:
if details[tag] == "":
continue
elif details[tag] == NO_PRICE_STRING:
no_price = True
continue
elif tag == "배송비/직배" and no_price:
continue
if tag == "링크":
link = f"<a href =\"{details[tag]}\">구매 링크 바로가기</a>\n"
else:
ret += f"{tag} : {details[tag]}\n"
ret += "\n"
if link is not None : ret += link
ret += f"<a href =\"{url}\">퀘이사존 링크 바로가기</a>\n"
return ret
CONTEXT = ssl._create_unverified_context()
BLIND_MESSAGE = '블라인드 처리된 글입니다.'
NO_PRICE_STRING = '₩ 0 (KRW)'
CHECK_INTERVAL_MIN = 10
telegram_bot_id = ''
telegram_bot_token = ''
telegram_bot = TelegramBot(token=telegram_bot_token, id=telegram_bot_id)
while True:
executed_time = datetime.datetime.now()
check_saleinfo_and_send_alarm(telegram_bot)
print(executed_time)
time.sleep(CHECK_INTERVAL_MIN * 60)
| shinners1/quasarzone-saleinfo-bot | check_saleinfo_board.py | check_saleinfo_board.py | py | 2,919 | python | en | code | 0 | github-code | 13 |
73565975696 | MANUAL_SEED = 1773
NUM_LOCAL_EPOCHS = 10
NUMBER_OF_ROUNDS = 10
BATCH_SIZE = 5
ALPHA_COEF = 0.01
DC_ROUND = 1
AGG_ROUND = 10
NUMBER_FOLDS = 5
NUMBER_CLIENTS = NUMBER_FOLDS - 1
PERCENTAGE_PERTURBED_SAMPLES = 22
NUMBER_REPLICAS = 5
ALGORITHM = None
LR = 0.1
OPTIM = "sgd"
GLOBAL_RUN_NAME = 'replicas_distribution'
RUN_NAME = 'test_plots'
# Add the dataset path and saving folder path
DATASET_FOLDER_PATH = ""
SAVING_FOLDER_PATH = ""
aggregating_layers = ["conv1.bias", "conv1.nn.0.weight", "conv1.nn.0.bias", "conv1.lin.weight", "conv1.root"]
aggregating_layers_daisy_chain = ["conv1.bias", "conv1.nn.0.weight", "conv1.nn.0.bias", "conv1.lin.weight", "conv1.root",
"conv2.bias", "conv2.nn.0.weight", "conv2.nn.0.bias", "conv2.lin.weight", "conv2.root",
"conv3.bias", "conv3.nn.0.weight", "conv3.nn.0.bias", "conv3.lin.weight", "conv3.root"] | basiralab/RepFL | constants.py | constants.py | py | 910 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.