seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
2291746376 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: HuHao <huhao1@cmcm.com>
Date: '2018/8/25'
Info:
"""
# 获取系统环境
import os
# 创建app实例和数据库实例
from app import create_app,db
# 获取数据据类模板
from app.models import User,Role,Post,Permission
# 使用 Manage 丰富启动参数支持,和 Shell 环境支持
from flask_script import Manager,Shell
# 获取脚本迁移模块支持
from flask_migrate import Migrate,MigrateCommand,upgrade
import click
# 必须放在 from .. import 之后,app 实例化之前,否则统计不全
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*') # 覆盖率统计扫描包
COV.start()
app = create_app(os.getenv('FLASKY_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app,db)
def make_shell_context():
return dict(app=app,db=db,User=User,Role=Role,Post=Post,Permission=Permission)
# 使用 python manage.py shell 目录启动,自动执行make_shell_context,并将相应 实例字典引入shell环境
manager.add_command('shell',Shell(make_context=make_shell_context))
# 使用 python manage.py db 启动时,pye自动映射 MigrateCommand类
manager.add_command('db',MigrateCommand)
# -------- 单元测试 --------
@manager.command # 通过此注解可将函数名注册为启动参数,如通过: python manage.py test_basic 就可以调度到函数
def test_basic():
import unittest
tests = unittest.TestLoader().discover('tests') # 扫描根路径下 tests 目录下的 unittest.TestCase 子类
# 执行测试
unittest.TextTestRunner(verbosity=2).run(tests) # 输出测试案例的执行结果详细程度 verbosity
# -------- 单元测试覆盖率报告(在单元测试基础上添加了覆盖率统计) --------
# python manage.py coverable 不执行覆盖率统计
# python manage.py coverable --coverage 执行覆盖率统计
@manager.command # 将下面函数名注册为启动参数
def coverable(coverage=False):
"""Run the unit tests."""
# 如果命令行启动传入了 --coverage参数,并且环境中未设置 FLASK_COVERAGE
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
# 将上面顶级代码调度,执行
os.execvp(sys.executable, [sys.executable] + sys.argv)
# 执行单元测试
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
# 如果开启了覆盖率统计开关,则保存统计结果
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage') # 统计结果输出路径
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase() # 擦除
@manager.command
def profile(length=25, profile_dir=None):
# 最多保留最近的 25次查询,如果设置了profile_dir 则可以将分析结果保存下来
"""Start the application under the code profiler."""
print(length,profile_dir)
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],profile_dir=profile_dir)
app.run(debug=False)
@manager.command
def deploy():
"""Run deployment tasks."""
# migrate database to latest revision
upgrade()
# create or update user roles
Role.insert_roles()
# ensure all users are following themselves
User.add_self_follows()
if __name__=="__main__":
manager.run()
| happy-place/flasky | manage.py | manage.py | py | 3,574 | python | zh | code | 0 | github-code | 36 |
72466882663 | """
WSGI config for mintemplate project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
import sys
import site
prev_sys_path = list(sys.path)
root = os.path.normpath(os.path.join(os.path.dirname(__file__), "../"))
sys.path.append(root)
site.addsitedir(os.path.join(root, ".env/lib/python%d.%d/site-packages" % sys.version_info[:2]))
site.addsitedir(os.path.join(root, ".env/lib64/python%d.%d/site-packages" % sys.version_info[:2]))
# addsitedir adds its directories at the end, but we want our local stuff
# to take precedence over system-installed packages.
# See http://code.google.com/p/modwsgi/issues/detail?id=112
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
os.environ.setdefault("DJANGO_SETTINGS_MODULE", os.path.basename(os.path.dirname(__file__)) + ".settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| kfarr2/Minimal-Template | mintemplate/wsgi.py | wsgi.py | py | 1,128 | python | en | code | 0 | github-code | 36 |
15231900014 | import sys
import io
import urllib.request
print('hi')
print('한글')
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
imgUrl = "http://blogfiles.naver.net/20130502_54/dbsgusrl77_136748336507323OOv_JPEG/%BF%B5%C8%AD_%C0%BA%B9%D0%C7%CF%B0%D4_%C0%A7%B4%EB%C7%CF%B0%D4_%B8%DE%C0%CE_%BF%B9%B0%ED%C6%ED_%B5%BF%BF%B5%BB%F3_%281%29.jpg"
savePath = "/Users/yuri/Documents/section2/test1.jpg"
urllib.request.urlretrieve(imgUrl,savePath)
print("다운로드 완료!")
# import sys
# import io
# import urllib.request as dw
#
# sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
# sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
#
# imgUrl ="http://post.phinf.naver.net/20160621_169/1466482468068lmSHj_JPEG/If7GeIbOPZuYwI-GI3xU7ENRrlfI.jpg"
# htmlURL ="http://google.com"
#
# savePath1 ="/library/desktop pictures/qwe.jpg"
# savePath2 ="/library/desktop pictures/1.html"
#
# dw.urlretrieve(imgUrl, savePath1)
# dw.urlretrieve(htmlURL, savePath2)
#
# print("다운로드 완료!")
| leyuri/Crawling | Chapter2/download2-1.py | download2-1.py | py | 1,112 | python | en | code | 0 | github-code | 36 |
35797953381 | # method 1
target = int(input())
Fibs = [1, 1]
i = 2
while i < target:
Fibs.append(Fibs[i - 1] + Fibs[i - 2])
i += 1
print(Fibs[target-1])
# method 2
target = int(input())
res = 0
a, b = 1, 1
for i in range(target-1):
a,b=b,a+b
print(a)
# method 3 递归实现
def Fib(n):
return 1 if n<=2 else Fib(n-1)+Fib(n-2)
print(Fib(int(input()))) | StevenCReal/PythonCode | PRACTICE/#6F斐波那契数列.py | #6F斐波那契数列.py | py | 356 | python | en | code | 1 | github-code | 36 |
17894243960 | import os
import time
from absl import app
from absl import flags
from absl import logging
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import ood_utils # local file import from baselines.cifar
import utils # local file import from baselines.cifar
from tensorboard.plugins.hparams import api as hp
flags.DEFINE_integer('ensemble_size', 3, 'Size of ensemble.')
flags.DEFINE_float('input_repetition_probability', 0.0,
'The probability that the inputs are identical for the'
'ensemble members.')
flags.DEFINE_integer('width_multiplier', 10, 'Integer to multiply the number of'
'typical filters by. "k" in ResNet-n-k.')
flags.DEFINE_integer('batch_repetitions', 4, 'Number of times an example is'
'repeated in a training batch. More repetitions lead to'
'lower variance gradients and increased training time.')
# OOD flags.
flags.DEFINE_bool('eval_on_ood', True,
'Whether to run OOD evaluation on specified OOD datasets.')
flags.DEFINE_list('ood_dataset', 'cifar100,svhn_cropped',
'list of OOD datasets to evaluate on.')
flags.DEFINE_string('saved_model_dir', None,
'Directory containing the saved model checkpoints.')
flags.DEFINE_bool('dempster_shafer_ood', False,
'Wheter to use DempsterShafer Uncertainty score.')
# Redefining default values
flags.FLAGS.set_default('corruptions_interval', 250)
flags.FLAGS.set_default('train_epochs', 250)
flags.FLAGS.set_default('l2', 3e-4)
flags.FLAGS.set_default('lr_decay_epochs', ['80', '160', '180'])
FLAGS = flags.FLAGS
def main(argv):
del argv # unused arg
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
data_dir = FLAGS.data_dir
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
ds_info = tfds.builder(FLAGS.dataset).info
train_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores // FLAGS.batch_repetitions
test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
train_dataset_size = ds_info.splits['train'].num_examples
steps_per_epoch = train_dataset_size // train_batch_size
steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size
num_classes = ds_info.features['label'].num_classes
train_builder = ub.datasets.get(
FLAGS.dataset,
data_dir=data_dir,
download_data=FLAGS.download_data,
split=tfds.Split.TRAIN,
validation_percent=1. - FLAGS.train_proportion)
train_dataset = train_builder.load(batch_size=train_batch_size)
validation_dataset = None
steps_per_validation = 0
if FLAGS.train_proportion < 1.0:
validation_builder = ub.datasets.get(
FLAGS.dataset,
data_dir=data_dir,
download_data=FLAGS.download_data,
split=tfds.Split.VALIDATION,
validation_percent=1. - FLAGS.train_proportion)
validation_dataset = validation_builder.load(batch_size=test_batch_size)
validation_dataset = strategy.experimental_distribute_dataset(
validation_dataset)
steps_per_validation = validation_builder.num_examples // test_batch_size
clean_test_builder = ub.datasets.get(
FLAGS.dataset,
data_dir=data_dir,
download_data=FLAGS.download_data,
split=tfds.Split.TEST)
clean_test_dataset = clean_test_builder.load(batch_size=test_batch_size)
train_dataset = strategy.experimental_distribute_dataset(train_dataset)
test_datasets = {
'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
}
steps_per_epoch = train_builder.num_examples // train_batch_size
steps_per_eval = clean_test_builder.num_examples // test_batch_size
num_classes = 100 if FLAGS.dataset == 'cifar100' else 10
if FLAGS.eval_on_ood:
ood_dataset_names = FLAGS.ood_dataset
ood_ds, steps_per_ood = ood_utils.load_ood_datasets(
ood_dataset_names,
clean_test_builder,
1. - FLAGS.train_proportion,
test_batch_size,
drop_remainder=FLAGS.drop_remainder_for_eval)
ood_datasets = {
name: strategy.experimental_distribute_dataset(ds)
for name, ds in ood_ds.items()
}
if FLAGS.corruptions_interval > 0:
if FLAGS.dataset == 'cifar100':
data_dir = FLAGS.cifar100_c_path
corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
for corruption_type in corruption_types:
for severity in range(1, 6):
dataset = ub.datasets.get(
f'{FLAGS.dataset}_corrupted',
corruption_type=corruption_type,
data_dir=data_dir,
severity=severity,
split=tfds.Split.TEST).load(batch_size=test_batch_size)
test_datasets[f'{corruption_type}_{severity}'] = (
strategy.experimental_distribute_dataset(dataset))
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building Keras model')
model = ub.models.wide_resnet_mimo(
input_shape=[FLAGS.ensemble_size] +
list(ds_info.features['image'].shape),
depth=28,
width_multiplier=FLAGS.width_multiplier,
num_classes=num_classes,
ensemble_size=FLAGS.ensemble_size)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Linearly scale learning rate and the decay epochs by vanilla settings.
base_lr = FLAGS.base_learning_rate * train_batch_size / 128
lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
for start_epoch_str in FLAGS.lr_decay_epochs]
lr_schedule = ub.schedules.WarmUpPiecewiseConstantSchedule(
steps_per_epoch,
base_lr,
FLAGS.lr_decay_ratio,
lr_decay_epochs,
FLAGS.lr_warmup_epochs)
optimizer = tf.keras.optimizers.SGD(
lr_schedule, momentum=1.0 - FLAGS.one_minus_momentum, nesterov=True)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'train/loss': tf.keras.metrics.Mean(),
'train/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/diversity': rm.metrics.AveragePairwiseDiversity(),
}
eval_dataset_splits = ['test']
if validation_dataset:
metrics.update({
'validation/negative_log_likelihood': tf.keras.metrics.Mean(),
'validation/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'validation/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
})
eval_dataset_splits += ['validation']
if FLAGS.eval_on_ood:
ood_metrics = ood_utils.create_ood_metrics(ood_dataset_names)
metrics.update(ood_metrics)
for i in range(FLAGS.ensemble_size):
for dataset_split in eval_dataset_splits:
metrics[f'{dataset_split}/nll_member_{i}'] = tf.keras.metrics.Mean()
metrics[f'{dataset_split}/accuracy_member_{i}'] = (
tf.keras.metrics.SparseCategoricalAccuracy())
if FLAGS.corruptions_interval > 0:
corrupt_metrics = {}
for intensity in range(1, 6):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
for i in range(FLAGS.ensemble_size):
metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
metrics['test/accuracy_member_{}'.format(i)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
if FLAGS.saved_model_dir:
logging.info('Saved model dir : %s', FLAGS.saved_model_dir)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.saved_model_dir)
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
@tf.function
def train_step(iterator):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
batch_size = tf.shape(images)[0]
main_shuffle = tf.random.shuffle(tf.tile(
tf.range(batch_size), [FLAGS.batch_repetitions]))
to_shuffle = tf.cast(tf.cast(tf.shape(main_shuffle)[0], tf.float32)
* (1. - FLAGS.input_repetition_probability),
tf.int32)
shuffle_indices = [
tf.concat([tf.random.shuffle(main_shuffle[:to_shuffle]),
main_shuffle[to_shuffle:]], axis=0)
for _ in range(FLAGS.ensemble_size)]
images = tf.stack([tf.gather(images, indices, axis=0)
for indices in shuffle_indices], axis=1)
labels = tf.stack([tf.gather(labels, indices, axis=0)
for indices in shuffle_indices], axis=1)
with tf.GradientTape() as tape:
logits = model(images, training=True)
negative_log_likelihood = tf.reduce_mean(tf.reduce_sum(
tf.keras.losses.sparse_categorical_crossentropy(
labels, logits, from_logits=True), axis=1))
filtered_variables = []
for var in model.trainable_variables:
# Apply l2 on the BN parameters and bias terms.
if ('kernel' in var.name or 'batch_norm' in var.name or
'bias' in var.name):
filtered_variables.append(tf.reshape(var, (-1,)))
l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
tf.concat(filtered_variables, axis=0))
# Scale the loss given the TPUStrategy will reduce sum all gradients.
loss = negative_log_likelihood + l2_loss
scaled_loss = loss / strategy.num_replicas_in_sync
grads = tape.gradient(scaled_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
probs = tf.nn.softmax(tf.reshape(logits, [-1, num_classes]))
flat_labels = tf.reshape(labels, [-1])
metrics['train/ece'].add_batch(probs, label=flat_labels)
metrics['train/loss'].update_state(loss)
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/accuracy'].update_state(flat_labels, probs)
for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
@tf.function
def test_step(iterator, dataset_split, dataset_name, num_steps):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
images = tf.tile(
tf.expand_dims(images, 1), [1, FLAGS.ensemble_size, 1, 1, 1])
logits = model(images, training=False)
probs = tf.nn.softmax(logits)
if dataset_name == 'clean':
per_probs = tf.transpose(probs, perm=[1, 0, 2])
metrics['test/diversity'].add_batch(per_probs)
for i in range(FLAGS.ensemble_size):
member_probs = probs[:, i]
member_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, member_probs)
metrics[f'{dataset_split}/nll_member_{i}'].update_state(member_loss)
metrics[f'{dataset_split}/accuracy_member_{i}'].update_state(
labels, member_probs)
# Negative log marginal likelihood computed in a numerically-stable way.
labels_tiled = tf.tile(
tf.expand_dims(labels, 1), [1, FLAGS.ensemble_size])
log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
labels_tiled, logits, from_logits=True)
negative_log_likelihood = tf.reduce_mean(
-tf.reduce_logsumexp(log_likelihoods, axis=[1]) +
tf.math.log(float(FLAGS.ensemble_size)))
probs = tf.math.reduce_mean(probs, axis=1) # marginalize
if dataset_name == 'clean':
metrics[f'{dataset_split}/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics[f'{dataset_split}/accuracy'].update_state(labels, probs)
metrics[f'{dataset_split}/ece'].add_batch(probs, label=labels)
elif dataset_name.startswith('ood/'):
ood_labels = 1 - inputs['is_in_distribution']
if FLAGS.dempster_shafer_ood:
per_logits = tf.split(
logits, num_or_size_splits=FLAGS.ensemble_size, axis=1)
ood_scores = [
ood_utils.DempsterShaferUncertainty(logit) for logit in per_logits
]
ood_scores = tf.reduce_mean(ood_scores, axis=0)
else:
ood_scores = 1 - tf.reduce_max(probs, axis=-1)
# Edgecase for if dataset_name contains underscores
for name, metric in metrics.items():
if dataset_name in name:
metric.update_state(ood_labels, ood_scores)
else:
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
probs, label=labels)
for _ in tf.range(tf.cast(num_steps, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})
train_iterator = iter(train_dataset)
start_time = time.time()
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
train_step(train_iterator)
current_step = (epoch + 1) * steps_per_epoch
max_steps = steps_per_epoch * (FLAGS.train_epochs)
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps, epoch + 1, FLAGS.train_epochs,
steps_per_sec, eta_seconds / 60, time_elapsed / 60))
logging.info(message)
if validation_dataset:
validation_iterator = iter(validation_dataset)
test_step(
validation_iterator, 'validation', 'clean', steps_per_validation)
datasets_to_evaluate = {'clean': test_datasets['clean']}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
test_iterator = iter(test_dataset)
logging.info('Testing on dataset %s', dataset_name)
logging.info('Starting to run eval at epoch: %s', epoch)
test_start_time = time.time()
test_step(test_iterator, 'test', dataset_name, steps_per_eval)
ms_per_example = (time.time() - test_start_time) * 1e6 / test_batch_size
metrics['test/ms_per_example'].update_state(ms_per_example)
logging.info('Done with testing on %s', dataset_name)
if FLAGS.eval_on_ood:
for ood_dataset_name, ood_dataset in ood_datasets.items():
ood_iterator = iter(ood_dataset)
logging.info('Calculating OOD on dataset %s', ood_dataset_name)
logging.info('Running OOD eval at epoch: %s', epoch)
test_step(ood_iterator, 'test', ood_dataset_name,
steps_per_ood[ood_dataset_name])
logging.info('Done with OOD eval on %s', ood_dataset_name)
corrupt_results = {}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
for i in range(FLAGS.ensemble_size):
logging.info(
'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
metrics['test/nll_member_{}'.format(i)].result(),
metrics['test/accuracy_member_{}'.format(i)].result() * 100)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
# Results from Robustness Metrics themselves return a dict, so flatten them.
total_results = utils.flatten_dictionary(total_results)
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
final_checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved last checkpoint to %s', final_checkpoint_name)
with summary_writer.as_default():
hp.hparams({
'base_learning_rate': FLAGS.base_learning_rate,
'one_minus_momentum': FLAGS.one_minus_momentum,
'l2': FLAGS.l2,
'batch_repetitions': FLAGS.batch_repetitions,
})
if __name__ == '__main__':
app.run(main)
| google/uncertainty-baselines | baselines/cifar/mimo.py | mimo.py | py | 19,130 | python | en | code | 1,305 | github-code | 36 |
28875886668 | from __future__ import absolute_import, unicode_literals
from draftjs_exporter.dom import DOM
from draftjs_exporter.error import ExporterException
from draftjs_exporter.options import Options
class EntityException(ExporterException):
pass
class EntityState:
def __init__(self, entity_decorators, entity_map):
self.entity_decorators = entity_decorators
self.entity_map = entity_map
self.entity_stack = []
self.completed_entity = None
self.element_stack = []
def apply(self, command):
if command.name == 'start_entity':
self.entity_stack.append(command.data)
elif command.name == 'stop_entity':
expected_entity = self.entity_stack[-1]
if command.data != expected_entity:
raise EntityException('Expected {0}, got {1}'.format(expected_entity, command.data))
self.completed_entity = self.entity_stack.pop()
def has_no_entity(self):
return not self.entity_stack
def get_entity_details(self, entity_key):
details = self.entity_map.get(str(entity_key))
if details is None:
raise EntityException('Entity "%s" does not exist in the entityMap' % entity_key)
return details
def render_entities(self, style_node):
if self.completed_entity is not None:
entity_details = self.get_entity_details(self.completed_entity)
opts = Options.for_entity(self.entity_decorators, entity_details['type'])
props = entity_details['data'].copy()
props['entity'] = {
'type': entity_details['type'],
}
nodes = DOM.create_element()
for n in self.element_stack:
DOM.append_child(nodes, n)
elt = DOM.create_element(opts.element, props, nodes)
self.completed_entity = None
self.element_stack = []
elif self.has_no_entity():
elt = style_node
else:
self.element_stack.append(style_node)
elt = None
return elt
| mohit-n-rajput/BT-Real-Estate | venv/lib/python3.6/site-packages/draftjs_exporter/entity_state.py | entity_state.py | py | 2,098 | python | en | code | 1 | github-code | 36 |
37416138856 | import argparse
import sys
import traceback
from logging import error, warning
from typing import Dict, List, Text
from urllib.parse import urlparse
import act.api
import requests
import urllib3
from act.api.libs import cli
import act
from act.workers.libs import worker
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
MAX_RECURSIVE = 10 # max number of redirects to attempt to follow (failsafe)
def check_redirect(
url: Text,
url_shorteners: List[Text],
user_agent: Text,
proxies: Dict[Text, Text],
timeout: int = 30,
) -> Text:
"""Take a url. Attempt to make it a http:// url and check if it is to one
of the known url shortening services. If it is.. find the first redirect"""
headers = {"User-agent": user_agent}
org_url = url
p = urlparse(url)
if p.scheme == "":
url = "http://{}".format(url)
p = urlparse(url)
if p.hostname not in url_shorteners:
return org_url
r = requests.get(
url, allow_redirects=False, timeout=timeout, headers=headers, proxies=proxies
)
if r.is_redirect:
return str(r.next.url) # type: ignore
return org_url
def process(
api: act.api.Act,
shorteners: List[Text],
user_agent: Text,
proxies: Dict[Text, Text],
output_format: Text = "json",
) -> None:
"""Read queries from stdin, resolve each one through passivedns printing
generic_uploader data to stdout"""
for query in sys.stdin:
query = query.strip()
if not query:
continue
n = 0
while True:
redirect = check_redirect(query, shorteners, user_agent, proxies)
if redirect == query or n > MAX_RECURSIVE:
break
n += 1
try:
act.api.helpers.handle_uri(api, query, output_format=output_format)
except act.api.base.ValidationError as err:
warning("Unable to add {0} [{1}]".format(query, err))
break
try:
act.api.helpers.handle_uri(api, redirect, output_format=output_format)
except act.api.base.ValidationError as err:
warning("Unable to add {0} [{1}]".format(redirect, err))
break
act.api.helpers.handle_fact(
api.fact("redirectsTo")
.source("uri", query)
.destination("uri", redirect),
output_format=output_format,
)
query = redirect
def parseargs() -> argparse.Namespace:
parser = worker.parseargs("URL unshortener worker")
parser.add_argument(
"--url-shorteners",
dest="url_shorteners",
default="adf.ly, bit.ly, bitly.com, cc.uz, evassmat.com, goo.gl, is.gd, lnkd.in, smarturl.it, www.t2m.io, tiny.cc, tinyurl.com, x.co",
help="Comma separated list of shortener-domains",
)
parser.add_argument(
"--user-agent",
dest="user_agent",
default="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
help="User-agent to present to the redirect services",
)
return cli.handle_args(parser)
def main() -> None:
"""Main function"""
# Look for default ini file in "/etc/actworkers.ini" and
# ~/config/actworkers/actworkers.ini (or replace .config with
# $XDG_CONFIG_DIR if set)
args = parseargs()
try:
shorteners = [x.strip() for x in args.url_shorteners.split(",")]
except AttributeError:
cli.fatal("Empty list of shorteners?")
actapi = worker.init_act(args)
proxies = (
{"http": args.proxy_string, "https": args.proxy_string}
if args.proxy_string
else None
)
process(actapi, shorteners, args.user_agent, proxies, args.output_format)
def main_log_error() -> None:
"Main function wrapper. Log all exceptions to error"
try:
main()
except Exception:
error("Unhandled exception: {}".format(traceback.format_exc()))
raise
if __name__ == "__main__":
main_log_error()
| mnemonic-no/act-workers | act/workers/url_shorter_unpack.py | url_shorter_unpack.py | py | 4,131 | python | en | code | 6 | github-code | 36 |
17233781871 | import argparse
rows = 128 ### sliced spectrogram height
cols = 1024 ### sliced spectrogram width
channels = 2
max_width = 10337 ### maximum raw spectrogram width
split_count = 10 ### number of slices per song
epochs = 100
batch_size = 32
spectrogram_features = ['h', 'p'] ### Percussive & harmonic component spectrogram
CLASSES = ['Cai Luong', 'Cach Mang', 'Dan Ca - Que Huong', 'Dance', 'Khong Loi',
'Thieu Nhi', 'Trinh', 'Tru Tinh', 'Rap Viet', 'Rock Viet']
num_classes = len(CLASSES)
parser = argparse.ArgumentParser()
parser.add_argument("--train_csv", type=str, default="data/train_set.csv", help='path to train set csv')
parser.add_argument("--val_csv", type=str, default="data/val_set.csv", help='path to validation set csv')
parser.add_argument("--spectr_dir", type=str, default="data/spectr/train", help='path to train spectrogram images')
parser.add_argument("--model", type=str, default="resnet18",
choices=["resnet18", "resnet34", "CRNN", "simpleCNN"], help='model type')
parser.add_argument("--checkpoint", type=str, default='music_genre_cnnit .h5', help='path to checkpoint')
parser.add_argument('--evaluate', action='store_true', help='evaluate trained model with validation data')
parser.add_argument('--use_cache', action='store_true', help='use cached .npy data from disk')
parser.add_argument('--resume', action='store_true', help='resume training from latest checkpoint')
args = parser.parse_args()
model_name = args.checkpoint ### Saved model name | taprosoft/music-genre-classification | src/config.py | config.py | py | 1,525 | python | en | code | 24 | github-code | 36 |
20410429609 | import torch
from torch import Tensor, nn
from dataclasses import dataclass, field
from typing import List, Tuple
from prediction.model import PredictionModel, PredictionModelConfig
from prediction.types import Trajectories
from prediction.utils.transform import transform_using_actor_frame_gauss
from prediction.utils.reshape import flatten, unflatten_batch
class ProbabilisticMLP(nn.Module):
## write the code for the new model here
def __init__(self, config: PredictionModelConfig) -> None:
super().__init__()
self._encoder = nn.Sequential(
nn.Flatten(),
nn.Linear(30, 64),
nn.ReLU(),
nn.Linear(64, 128),
nn.ReLU(),
)
self._decoder = nn.Sequential(
nn.Flatten(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 50),
nn.ReLU(),
)
class ProbabilisticModel(PredictionModel):
def __init__(self, config: PredictionModelConfig) -> None:
super().__init__(config)
self._encoder = ProbabilisticMLP(PredictionModelConfig)._encoder
self._decoder = ProbabilisticMLP(PredictionModelConfig)._decoder
@staticmethod
def _postprocess(
out: Tensor, batch_ids: Tensor, original_x_pose: Tensor
) -> List[Tensor]:
"""Postprocess predictions
1. Unflatten time and position dimensions
2. Transform predictions back into SDV frame
3. Unflatten batch and actor dimension
Args:
out (Tensor): predicted input trajectories [batch_size * N x T * 2]
batch_ids (Tensor): id of each actor's batch in the flattened list [batch_size * N]
original_x_pose (Tensor): original position and yaw of each actor at the latest timestep in SDV frame
[batch_size * N, 3]
Returns:
List[Tensor]: List of length batch_size of output predicted trajectories in SDV frame [N x T x 2]
"""
num_actors = len(batch_ids)
out = out.reshape(num_actors, -1, 5) # [batch_size * N x T x 6]
# Transform from actor frame, to make the prediction problem easier
transformed_out = transform_using_actor_frame_gauss(
out, original_x_pose, translate_to=False
)
# Translate so that latest timestep for each actor is the origin
out_batches = unflatten_batch(transformed_out, batch_ids)
return out_batches | dhararya/Predicting-Car-Trajectories | prediction/modules/probabilistic_model.py | probabilistic_model.py | py | 2,483 | python | en | code | 0 | github-code | 36 |
31309911488 | import numpy as np
from sigmoid import sigmoid
def predict(Theta1, Theta2, X):
#PREDICT Predict the label of an input given a trained neural network
# p = PREDICT(Theta1, Theta2, X) outputs the predicted label of X given the
# trained weights of a neural network (Theta1, Theta2)
# Useful values
m = np.shape(X)[0] #number of examples
# You need to return the following variables correctly
p = np.zeros(m);
#theta1 = sigmoid(np.dot(m, Theta1))
#theta2 = sigmoid(np.dot(theta1, Theta2))
#p = max(theta2, [], 2)
# ====================== YOUR CODE HERE ======================
# Instructions: Complete the following code to make predictions using
# your learned neural network. You should set p to a
# vector containing labels between 1 to num_labels.
#
# Hint: The max function might come in useful. In particular, the max
# function can also return the index of the max element, for more
# information see 'help max'. If your examples are in rows, then, you
# can use max(A, [], 2) to obtain the max for each row.
firstTheta = sigmoid(np.dot(np.hstack((np.ones((m, 1)), X)), np.transpose(Theta1)))
secondTheta = sigmoid(np.dot(np.hstack((np.ones((m,1)), firstTheta)), np.transpose(Theta2)))
#print(np.hstack(np.amax(secondTheta, 1)))
return (np.argmax(secondTheta, axis = 1)+1)
# =========================================================================
| EliottSimon17/NeuralNetwork | src/predict.py | predict.py | py | 1,502 | python | en | code | 0 | github-code | 36 |
34198323203 | import pytest
from loguru import logger
from pytest_mock import MockerFixture
from fastapi_cloud_logging.fastapi_cloud_logging_handler import FastAPILoggingHandler
@pytest.fixture
def logging_handler(mocker: MockerFixture) -> FastAPILoggingHandler:
return FastAPILoggingHandler(
mocker.Mock(), transport=mocker.Mock(), structured=True
)
def test_with_logger_message(logging_handler: FastAPILoggingHandler):
logger.add(logging_handler, format="{message}")
logger.info("Hello")
(_, message_payloads), args = logging_handler.transport.send.call_args
assert args["labels"]["python_logger"] == "tests.test_loguru"
assert args["source_location"] is not None
assert message_payloads == {"message": "Hello"}
def divide(a, b):
return a / b
def test_with_logger_exception(logging_handler: FastAPILoggingHandler):
logger.add(logging_handler, format="{message}")
try:
divide(5, 0)
except ZeroDivisionError:
logger.exception("An error has occurred")
(record, message_payloads), args = logging_handler.transport.send.call_args
assert record.exc_info is None
assert args["labels"]["python_logger"] == "tests.test_loguru"
assert args["source_location"] is not None
assert message_payloads["message"].startswith("An error has occurred\n")
assert len(message_payloads["traceback"]) == 2
| quoth/fastapi-cloud-logging | tests/test_loguru.py | test_loguru.py | py | 1,376 | python | en | code | 5 | github-code | 36 |
18624673274 | # Subsequence of products less than K
def subseq_product(arr, k):
n = len(arr)
product = 1
start = end = 0
result = 0
while(end < n):
product = product * arr[end]
while(start < end and product >= k):
product = product/arr[start]
start = start + 1
if product < k:
result = result + end-start +1
end = end + 1
return result
if __name__ == '__main__':
print(subseq_product([1,2,3,4], 10))
#print(subseq_product([1,9,2,8,6,4,3], 100))
| indrajitrdas/Simple-Programs | ContiguousSubArrayProductLessThanK.py | ContiguousSubArrayProductLessThanK.py | py | 593 | python | en | code | 0 | github-code | 36 |
41630548406 | import os
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader, sampler
from PIL import Image
import random
from scipy import signal
import wave
import struct
import random
class wave_spec(Dataset):
def __init__(self,root_dir,trans = None):
self.root_dir = root_dir
self.trans = trans
classes = [fn for fn in os.listdir(self.root_dir) if os.path.isdir(self.root_dir + '/' + fn) ]
class_index = {}
j = 0
for i in classes:
class_index[i] = j
j+=1
self.sounds = []
for i in classes:
self.sounds = self.sounds + [(root_dir + '/' + i + '/' + fn,class_index[i]) for fn in os.listdir(root_dir + '/' + i + '/') if fn.endswith('.wav') ]
def __getitem__(self,idx):
wave_file = wave.open(self.sounds[idx][0],'rb')
fs = wave_file.getframerate()
N = wave_file.getnframes()
stft_img = self.stft_img(wave_file,fs,N)
img = Image.fromarray(stft_img.astype('uint8'),'L')
if self.trans:
img = self.trans(img)
label = self.sounds[idx][1]
sample = {'image': img , 'label': label, 'path': self.sounds[idx][0]}
return sample
def __len__(self):
return len(self.sounds)
def stft_img(self, wave_file,fs,N):
wf = wave_file.readframes(N)
wf = np.array(struct.unpack('h'*N, wf))
_, _, Sxx = signal.spectrogram(wf, fs,return_onesided=True)
return Sxx
| devansh20la/Speech_Recognition | data_loader_spec.py | data_loader_spec.py | py | 1,324 | python | en | code | 0 | github-code | 36 |
5738397642 | """Database debug and diagnostics functions."""
import json
from typing import Any, Generator, Dict
from . import db
from . import user as usermod
from . import request as requestmod
def sprint_users(*criterions) -> Generator[str, None, None]:
"""Yields a generator whose elements are strings representing a user in json format."""
users = usermod.get_users(*criterions)
if users is None:
return
for u in users:
yield sprintv(u)
def print_users(*criterions) -> None:
"""Like sprint_users, but prints out each user instead."""
for s in sprint_users(*criterions):
print(s)
def sprintv(x: Any) -> str:
"""Prints the attributes of 'x' in json format."""
return json.dumps({k: str(v) for k, v in vars(x).items() if "_" not in k},
sort_keys=True,
indent=4)
def printv(x: Any) -> None:
"""Like sprint_users, but prints out each user instead."""
print(sprintv(x))
def sprint_requests(requests: Dict[int, db.RequestStatus]) -> str:
"""Returns a formatted string of a 'requests', dictionary of request_id's and the corresponding
RequestStatus."""
return json.dumps(
{
requestid: {
"id": requestid,
"srcnetid": requestmod.get_srcnetid(requestid),
"destnetid": requestmod.get_destnetid(requestid),
"status": status.to_readable(),
} for requestid, status in requests.items()
},
sort_keys=True,
indent=4)
| ProfessorLinstar/Gymbuddies | gymbuddies/database/debug.py | debug.py | py | 1,545 | python | en | code | 0 | github-code | 36 |
14145225992 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 20:03:31 2020
@author: b
"""
import numpy as np
values = [1, 2.4, 234, 112, 345]
array = np.array(values)
A = np.arange(1, 10, 1).reshape(3,3)
b = np.ones((3,6))
# indexing
A[0, 1]
# Slicing
# A[debut:fin:pas, debut:fin:pas]
A[:, 0]
# Subsetting
B = A[0:2, 0:2]
C = np.zeros((4, 4))
C[1:3, 1:3] = 1
C
A = np.random.randint(0, 10, [5, 5])
A
# Booelan indexing
A[A < 5] = 10
A[(A<5) & (A>2)] = 12
# Masking
A[A>5]
#Tableau de fonctions
def f(i,j):
return 10*i +j
np.fromfunction(f,(4,5),dtype =int)
###############################################
# NUMPY STATISTIQUE ET MATHEMATIQUE
###############################################
# Méthode appartenant à la classe ndarray
A = np.random.randint(0, 10, [10, 3])
np.random.rand(6) # tableaux aléatoires de 6 nombres
A.sum(axis=0)
A.cumsum()
A.prod() # produit entre les coefficients
A.min(axis=0) #minimum selon l'axe zéro
A.argmin(axis=0) # Position du minimum
A.sort()
A.argsort() # retourne la façon dont les éléments doient être ordonnés pour trier les éléments
A.mean() #variance
A.std() # écart type
A.var() #variance
A
np.corrcoef(A) #permet de tracer une matrice de corrélation
values, counts = np.unique(A, return_counts=True) #renvoie les entités et leur répétition
counts.argsort()
values[counts.argsort()] #liste dans l'ordre les éléments les plus fréquents
# Affiche les valeurs des plus fréquentes au moins fréquentes
for i, j in zip(values[counts.argsort()], counts[counts.argsort()]):
print(f'valeur {i} apparait {j}')
#NAN Corrections
# les fonctions suivantes permettent de calculer une moyennes même avec une valeur nan
A = np.random.randn(5, 5)
A[2, 2] = np.nan
A[4, 1] = np.nan
np.nanmean(A)
np.nanstd(A)
np.isnan(A) #Masque numpy avec la présence des nan
np.isnan(A).sum()
np.isnan(A).sum()/A.size
A[np.isnan(A)]=0
###############################################
# NUMPY ALGEBRE LINEAIRE
###############################################
A = np.ones((2, 3))
B = np.ones((3, 2))
A.T #Transposé
A.dot(B) #Produit matricielle
# Inversion de matrice carré de déterminant non null
A = np.random.randint(0, 10, [3, 3])
A
np.linalg.det(A)
np.linalg.inv(A)
np.linalg.pinv(A) #permet d'inverser une matrice avec qq différences
# Principale Component Analysis utilise les valeurs propres
np.linalg.eig(A)
# Exercice: standardiser une matrice A
np.random.seed(0)
A = np.random.randint(0, 100, [10, 5])
A
# Ma solution
moy = A.mean(axis=0)
ecart = A.std(axis=0)
for col in range(5):
A[:,col] = (A[:,col] - moy[col])/ecart[col]
#La solution de Guillaume
D = (A - A.mean(axis=0)) / A.std(axis=0)
D.mean(axis=0)
D.std(axis=0)
###############################################
# NUMPY BROADCASTING
###############################################
# Le broadcasting consiste à étendre les dimensions d'un tableau
# Il faut faire attention aux dimensions de nos jeux de données !!!
np.random.seed(0)
A = np.random.randint(0, 10, [2, 3])
B = np.ones((2, 1))
A + 2
A + B # Les colonnes de B ont été étendues selon les colonnes
# pour faire du broadcasting, il y a des règles
# A et B dimensions égales ou égales à 1
A = np.random.randint(0, 10, [4, 1])
B = np.ones((1, 3))
A + B #les dimensions des vecteurs ont été étendues
| b846/Data | 1b Numpy Indexing Slicing Masking.py | 1b Numpy Indexing Slicing Masking.py | py | 3,332 | python | fr | code | 0 | github-code | 36 |
23665407617 | # -*- coding:utf-8 -*-
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
class Solution:
def isSymmetrical(self, pRoot):
if pRoot == None:
return True
queue = deque()
stack = []
queue.append(pRoot)
stack.append(-1) # 注意这里,由于测试用例中有与根节点相同的val,所以这里设置为-1,避免根的val被栈弹出。
while len(queue) > 0:
elem = queue.popleft()
if elem == None:
continue
else:
queue.append(elem.left)
if elem.left == None:
if elem.left == stack[-1]:
stack.pop()
else:
stack.append(None)
elif elem.left.val == stack[-1]:
stack.pop()
else:
stack.append(elem.left.val)
queue.append(elem.right)
if elem.right == None:
if elem.right == stack[-1]:
stack.pop()
else:
stack.append(None)
elif elem.right.val == stack[-1]:
stack.pop()
else:
stack.append(elem.right.val)
if len(stack) == 1:
return True
return False
# 借鉴了层次遍历的思想,使用Python额外的数据结构双端队列deque,栈中保存遍历的元素的值,如果有两个值相等就弹栈,最后留下根节点的值,个人认为比较好理解。 | Lucky4/coding-interview | 53.py | 53.py | py | 1,758 | python | en | code | 0 | github-code | 36 |
16178101967 |
#Korean vowel combination
con_dict = [
['ㅏㅣ','ㅐ'], ['ㅑㅣ','ㅒ'], ['ㅓㅣ','ㅔ'],
['ㅕㅣ','ㅖ'], ['ㅗㅣ','ㅚ'], ['ㅗㅐ','ㅙ'],
['ㅜㅓ','ㅝ'], ['ㅜㅔ','ㅞ'], ['ㅡㅣ','ㅢ'],
['ㅣㅏ','ㅑ'], ['ㅣㅓ','ㅕ'], ['ㅣㅗ','ㅛ'],
['ㅣㅜ','ㅠ'], ['ㅡㅓ','ㅓ'], ['ㅗㅏ','ㅘ'],
]
#jongsung_list = [ 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ', 'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ', 'ㅁ', 'ㅂ', 'ㅄ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
# lis_beta = ['EP+EF', 'VCP+EF', 'B+EF', 'B+EP+EF', 'B+VCP+EF', 'EF','EP']
# lis_beta = ['EP+EF', 'EF', 'B+EF', 'B+EP+EF']
#어말을 처리해 주기 위한 것으로, 나중에 EC등이 필요해 진다면 이 부분에 EC 등을 집어넣어준다. 참고로 이는 문장의 마지막에 위치해야한다.
#특이 바로 밑의 이 부분은 형태소 태그가 이 리스트 안의 것과 일치하는 경우 단순 삭제를 하는 것
lis_beta_ef = ['EP+EP+EF', 'EP+EF', 'EF', 'UNKNOWN']
lis_beta_ef_h = ['EF', 'UNKNOWN']
lis_tag_last = ['EF', 'UNKNOWN']
# 존칭 명사 동사를 낮추기 위한 list
lis_wk = [
['ㄱㅖ', 'ㅇㅣㅆㅇㅡ'], ['ㅈㅜㅁㅜ','ㅈㅏ'], ['ㅈㅏㅂㅅㅜ','ㅁㅓㄱㅇㅡ']
]
# 존칭 종결어미
lis_end = [
'ㅅㅡㅂㄴㅣㄷㅏ', 'ㅅㅡㅂㄴㅣㄲㅏ',
'ㅂㄴㅣㄷㅏ', 'ㄴㅣㄷㅏ', 'ㅂㅅㅣㄷㅏ', 'ㅅㅣㄷㅏ', 'ㄹㄹㅐㅇㅛ','ㄹㅐㅇㅛ',
'ㅇㅡㅅㅔㅇㅛ', 'ㅅㅔㅇㅛ', 'ㄷㅔㅇㅛ', 'ㅇㅔㅇㅛ', 'ㅇㅖㅇㅛ', 'ㄴㅏㅇㅛ', 'ㅇㅡㄹㄲㅏㅇㅛ', 'ㅇㅣㄹㄲㅏㅇㅛ', 'ㄹㄲㅏㅇㅛ', 'ㅇㅡㄴㄱㅏㅇㅛ', 'ㅇㅣㄴㄱㅏㅇㅛ','ㄱㅜㄴㅇㅛ','ㄴㄱㅏㅇㅛ',
'ㄱㅗㅇㅛ','ㅇㅛ',
'ㅈㅛ',
'ㅅㅣㅂㅅㅣㅇㅗ', 'ㅅㅣㅇㅗ', 'ㅇㅗ',
'ㅂㄴㅣㄲㅏ',
]
# 반말 종결어미
lis_end_2low = [
'ㄷㅓㄹㅏ','ㄴㄷㅏ', 'ㅆㄷㅏ', 'ㄹㅗㄷㅏ', 'ㄷㅏ', 'ㄱㅔ', 'ㄴㅡㄴㄷㅏ',
'ㄹㅏ',
'ㅇㅑ',
'ㄴㅣㄲㅏ', 'ㄲㅏ', 'ㄹㄲㅏ', 'ㅈㅣ',
'ㄴㅣ',
'ㅇㅏ', 'ㅇㅓ',
'ㄷㅔ', 'ㄱㅏ','ㄹㅐ',
'ㅈㅏㄶㅇㅏ', 'ㄴㅔ','ㅇㅕ', 'ㄴㅏ','ㄱㅜㄴ','ㄱㅗ',
'ㅈㅣㅁㅏㄴ', 'ㅇㅡㄴㄷㅔ', 'ㅅㅓ', 'ㄷㅐ',
'ㄱㅓㄹ','ㄲㅔ', 'ㄴㅑ',
]
lis_ic = ['ㅇㅖ', 'ㄴㅔ', 'ㅇㅏㄴㅣㅇㅗ', 'ㅇㅏㄴㅣㅇㅛ']
P_LIST = ['.', '?', '!', '\'', '\"', 'ᆞ', 'ᆢ', 'ㆍ', '”', '’',')', '(', ',', '”']
SV_LIST = ['\'', '\"', ':', ';']
lis_plus = [
'EP', 'VCP',
]
############################## EF Dictionary ##############################
###########################################################################
############ 높임말 -> 반말 ############
#현재 만들어진 것은 EF만 잘라낼 것이다.
#원래 ef사전에 mapping 되는 것을 찾아낸다.
#python dictionary로 접근
#종결어미 처리
EF = {
###하십시오체###
#평서문
#'ㅂㄴㅣㄷㅏ': 'ㄷㅏ',
'ㅂㄴㅣㄷㅏ': 'special3',
'ㅅㅡㅂㄴㅣㄷㅏ':'special2',
'ㅇㅗㄹㅅㅣㄷㅏ':'ㄷㅏ', #**
'ㅂㅈㅣㅇㅛ':'지', #**
'ㅅㅣㅂㄴㅣㄷㅏ':'special1',
'ㅇㅡㅅㅣㅂㄴㅣㄷㅏ':'ㅇㅡㅅㅣㄴㄷㅏ',
'ㅇㅡㅅㅣㅂㄴㅣㄲㅏ':'ㅇㅡㅅㅣㄴㅣ',
#의문문
'ㅅㅡㅂㄴㅣㄲㅏ':'ㄴㅣ',
'ㅂㄴㅣㄲㅏ': 'ㄴㅣ',
'ㅅㅣㅂㄴㅣㄲㅏ':'special1', #EP+EF
#명령법
'ㅇㅡㅅㅔㅇㅛ': 'special0',
'ㅅㅔㅇㅛ':'special1',
'ㅅㅣㅇㅓㅇㅛ': 'special1',
'ㅅㅣㅂㅅㅣㅇㅗ':'ㅅㅣㅇㅗ',
#청유법
'ㅂㅅㅣㄷㅏ':'special4',
'ㅇㅡㅂㅅㅣㄷㅏ':'special4',
###하오체###
###해요체###
#평서문
'ㅇㅓㅇㅛ':'ㅇㅓ',
'ㅇㅏㅇㅛ':'ㅇㅏ',
'ㅈㅛ':'ㅈㅣ',
'ㅇㅔㅇㅛ':'ㅇㅑ',
'ㅇㅖㅇㅛ':'ㅇㅑ',
'ㅇㅛ':'special5',
'ㄷㅐㅇㅛ':'ㄷㅐ',
'ㄷㅔㅇㅛ':'ㄷㅔ',
'ㄴㅔㅇㅛ':'ㄴㅔ',
'ㄴㅡㄴㄷㅔㅇㅛ':'ㄴㅡㄴㄷㅔ',
'ㄱㅓㄷㅡㄴㅇㅛ':'ㄱㅓㄷㅡㄴ',
'ㄱㅜㄴㅇㅛ': 'ㄱㅜㄴㅏ',
'ㅇㅡㄴㄷㅔㅇㅛ':'ㅇㅡㄴㄷㅔ',
'ㅈㅏㄱㅜㅇㅛ':'ㅈㅏㄱㅜ',
'ㄴㅣㄲㅏㅇㅛ': 'ㄴㅣㄲㅏ',
'ㅈㅣㅇㅛ':'ㅈㅣ',
#의문문
'ㄴㅏㅇㅛ':'special6',
'ㄹㄲㅏㅇㅛ':'ㄹㄲㅏ',
'ㅇㅡㄹㄲㅏㅇㅛ':'ㅇㅡㄹㄲㅏ',
'ㄴㄱㅏㅇㅛ':'ㄴㄱㅏ',
'ㄹㄹㅐㅇㅛ':'ㄹㄹㅐ',
'ㄹㅐㅇㅛ':'ㄹㅐ',
'ㄱㅗㅇㅛ':'ㄱㅗ',
'ㅇㅡㄴㄱㅏㅇㅛ':'ㅇㅡㄴㄱㅏ',
'ㅇㅣㄴㄱㅏㅇㅛ':'ㅇㅣㄴㄱㅏ',
}
need_origin_EF = {
'ㅂㅅㅣㄷㅏ':'ㅈㅏ',
}
#'ㄹ'규칙 활용 -> ㄹ 규칙 활용이 일어나는 동사들을 최대한 모아둔 Dictionary
EF_R_rule= {
'ㄱㅜ':'ㄹ',
'ㄴㅗ':'ㄹ',
'ㄴㅏ':'ㄹ',
'ㄷㅗ':'ㄹ',
'ㄷㅡ':'ㄹ',
'ㄷㅏ':'ㄹ',
'ㄷㅜ':'ㄹ',
'ㅂㅜ':'ㄹ',
'ㄲㅗ':'ㄹ',
'ㅁㅣ':'ㄹ',
'ㅁㅜ':'ㄹ',
#'ㅂㅗㅍㅜ':'ㄹ', #error predicate 수정
'ㅂㅜ':'ㄹ',
'ㅅㅡ':'ㄹ',
'ㄸㅓ':'ㄹ',
}
IC = {'ㅇㅖ':'ㅇㅡㅇ', 'ㄴㅔ':'ㅇㅡㅇ', 'ㅇㅏㄴㅣㅇㅗ':'ㅇㅏㄴㅣ', 'ㅇㅏㄴㅣㅇㅛ':'ㅇㅏㄴㅣ'}
formal_vv ={
'ㄱㅖ':'ㅇㅣㅆㅇㅡ',
'ㅈㅜㅁㅜ':'ㅈㅏ',
'ㅈㅏㅂㅅㅜ':'ㅁㅓㄱㅇㅡ'
} | joowhan/Translation_Project | api/src/low/dictionary.py | dictionary.py | py | 5,445 | python | ko | code | 2 | github-code | 36 |
27374508738 | # Python XML DOM Minidom
# xml.dom.minidom — Minimal DOM implementation.
# xml.dom.minidom is a minimal implementation of the Document Object Model interface, with an API similar to that in other languages.
# It is intended to be simpler than the full DOM and also significantly smaller.
# Users who are not already proficient with the DOM should consider using the xml.etree.ElementTree module for their XML processing instead.
#
#
# Warning:
# The xml.dom.minidom module is not secure against maliciously constructed data.
# If you need to parse untrusted or unauthenticated data see XML vulnerabilities.
#
#
# This example program is a fairly realistic example of a simple program.
# In this particular case, we do not take much advantage of the flexibility of the DOM.
#
import xml.dom.minidom
document = """\
<slideshow>
<title>Demo slideshow</title>
<slide><title>Slide title</title>
<point>This is a demo</point>
<point>Of a program for processing slides</point>
</slide>
<slide><title>Another demo slide</title>
<point>It is important</point>
<point>To have more than</point>
<point>one slide</point>
</slide>
</slideshow>
"""
dom = xml.dom.minidom.parseString(document)
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def handleSlideshow(slideshow):
print("<html>")
handleSlideshowTitle(slideshow.getElementsByTagName("title")[0])
slides = slideshow.getElementsByTagName("slide")
handleToc(slides)
handleSlides(slides)
print("</html>")
def handleSlides(slides):
for slide in slides:
handleSlide(slide)
def handleSlide(slide):
handleSlideTitle(slide.getElementsByTagName("title")[0])
handlePoints(slide.getElementsByTagName("point"))
def handleSlideshowTitle(title):
print("<title>%s</title>" % getText(title.childNodes))
def handleSlideTitle(title):
print("<h2>%s</h2>" % getText(title.childNodes))
def handlePoints(points):
print("<ul>")
for point in points:
handlePoint(point)
print("</ul>")
def handlePoint(point):
print("<li>%s</li>" % getText(point.childNodes))
def handleToc(slides):
for slide in slides:
title = slide.getElementsByTagName("title")[0]
print("<p>%s</p>" % getText(title.childNodes))
handleSlideshow(dom)
| VakinduPhilliam/Python_XML_Processing | Python_XML_DOM_Minidom_DOM_Flexibility_Example.py | Python_XML_DOM_Minidom_DOM_Flexibility_Example.py | py | 2,468 | python | en | code | 2 | github-code | 36 |
7142523944 | from llama_index import ServiceContext, VectorStoreIndex, StorageContext
from llama_index.node_parser import SentenceWindowNodeParser
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index import load_index_from_storage
import os
def read_file(name):
with open(name, "r") as file:
return file.read()
def read_eval_questions(filename='./mi_questions.txt'):
eval_questions = read_file(filename)
eval_questions = [q for q in eval_questions.split('\n') if q]
return eval_questions
def build_sentence_window_index(
document, llm, embed_model="local:BAAI/bge-small-en-v1.5", save_dir="sentence_index"
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
[document], service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
def get_sentence_window_query_engine(
sentence_index,
similarity_top_k=6,
rerank_top_n=2,
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank]
)
return sentence_window_engine
| kilianovski/study | rag/utils.py | utils.py | py | 2,068 | python | en | code | 1 | github-code | 36 |
34458782519 | import numpy as np
import findiff
def curl_2d (x,y,u,v):
d_dy = findiff.FinDiff(0, y, acc=10)
d_dx = findiff.FinDiff(1, x, acc=10)
dv_dx = d_dx(v)
du_dy = d_dy(u)
curl_2d = dv_dx - du_dy
# (nul, nul, D) = Derivative_Calc.cheb_derivative(velocity)
# curl_2d = np.matmul(-v , D.transpose()) - np.matmul(D, u)
return (curl_2d)
| HarleyHanes/aerofusion-HarleyFork | Python/aerofusion/numerics/curl_calc.py | curl_calc.py | py | 341 | python | en | code | 0 | github-code | 36 |
15281460473 | class Solution:
def toGoatLatin(self, sentence: str) -> str:
sentence = sentence.split()
vowels = "aeiou"
final = ""
for i, word in enumerate(sentence):
if word[0].lower() not in vowels:
word = word[1:] + word[0] + "ma"
print(word)
elif word[0].lower() in vowels:
word += "ma"
word += ("a" * (i + 1))
final += word + " "
return final.strip()
| type0-1/LeetCode | goat-latin.py | goat-latin.py | py | 482 | python | en | code | 0 | github-code | 36 |
29155312950 | def createTreeItem(key, value):
"""
Maakt een nieuwe BST item aan met self.key=key en self.Root=value
:param key: De zoeksleutel van het item.
:param value: De waarde van het item.
:return: Geeft de gemaakte BST terug.
"""
Tree = BST()
Tree.key = key
Tree.Root = value
return Tree
class BST:
def __init__(self): # Maakt een lege boom aan.
"""
Maakt een boomklasse aan
Preconditie:/
Postconditie: Boomklasse gemaakt
"""
self.Root = None
self.left = None
self.right = None
self.key = None
self.parent = None
def isEmpty(self): # Controleert of de boom leeg is, Zo ja geeft het True terug. Anders geeft het false terug.
if self.Root == None:
return True
return False
def searchTreeInsert(self, TreeItem): # Insert een treeitem in de boom.
"""
Insert een node
:param TreeItem: De nieuweTreeitem
:return: True of False afhankelijk of de node er al in zit of niet.
Preconditie: Je kan geen node toevoegen met een key die al in de boom zit.
Postconditie: Node is toegevoegd.
"""
if BST.isEmpty(self): # Als de boom leeg, is het nieuwe item de root.
self.Root = TreeItem.Root
self.key = TreeItem.key
return True
if BST.searchTreeRetrieve(self, TreeItem.key)[1]: # Als er een item gevonden kan worden met dezelfde searchkey, dan kan je deze node niet toevoegen aan de boom.
return False
if self.key > TreeItem.key: # Als de key van het nieuwe item kleiner is controleren we of er een linkerkind is of niet. Zo ja, dan gaan we recursief tot we een plek vinden voor het nieuwe item. Zo nee, is het nieuwe item het linkerkind. En de ouder van het nieuwe item is de tree zelf.
if self.left:
return BST.searchTreeInsert(self.left, TreeItem)
else:
self.left = TreeItem
self.left.parent = self
return True
else:
if self.right: # Als de key van het nieuwe item groter is controleren we of er een rechterkind is of niet. Zo ja, dan gaan we recursief tot we een plek vinden voor het nieuwe item. Zo nee, is het nieuwe item het rechterkind. En de ouder van het nieuwe item is de tree zelf.
return BST.searchTreeInsert(self.right, TreeItem)
else:
self.right = TreeItem
self.right.parent = self
return True
def searchTreeRetrieve(self, key):
"""
Zoekt een key in de tree en geeft de value terug.
:param key: De searchkey van het item dat gevonden moet worden.
:return:(value),True/False
Precondities: Er mogen geen items in de boom zijn met eenzelfde zoeksleutel.
Precondities: Geeft de value van de gezochte zoeksleutel terug.
"""
if BST.isEmpty(self): # Als de boom leeg is geeft het none terug als value en false als bool.
return None, False
if self.key == key: # Als de huidige root dezelfde zoeksleutel heeft als de gezochte, wordt de waarde van de root samen met True teruggegeven.
return self.Root, True
if self.key > key: # Als de key kleiner is dan de key van de huidige root, kijken we of er een linkerdeelboom is. Stel van wel, dan gaan we recursief op zoek naar ons item. Anders geven we None terug met bool false.
if self.left:
return BST.searchTreeRetrieve(self.left, key)
return None, False
if self.key < key: # Als de key groter is dan de key van de huidige root, kijken we of er een rechterdeelboom is. Stel van wel, dan gaan we recursief op zoek naar ons item. Anders geven we None terug met bool false.
if self.right:
return BST.searchTreeRetrieve(self.right, key)
return None, False
def inorderTraverse(self, Functie=None,Traversed=[]):
"""
Traversed de boom op inorder wijze. En afhankelijk van de functie doet het iets met die waarde.
:param Functie: Wat er met de inorder waarden moet gebeuren. Print/Return/....., Als het niet ingevoerd wordt neemt de functie automatisch aan dat het een return is. Bij een return geven we de waarden gelijst terug, bij een print geven we ze gewoon afgedrukt terug.
:param Traversed: Deze parameter wordt alleen maar gebruikt bij de return.
:return:(Compleet afhankelijk van functie)
Precondities:/
Postcondities: Functie wordt uitgevoerd op de inorder items.
"""
if (Functie!=None):
if self.left != None:
BST.inorderTraverse(self.left, Functie)
Functie(self.key)
if self.right != None:
BST.inorderTraverse(self.right, Functie)
else:
if self.left != None:
BST.inorderTraverse(self.left)
Traversed.append(self.key)
if self.right != None:
BST.inorderTraverse(self.right)
return Traversed
def inorderSuccesor(self, Succ=True):
if Succ:
return BST.inorderSuccesor(self.right, False) # Gaat recursief rechts af en kijkt bij de volgende recursie of er een node links is.
if self.left: # Zo ja recurseert hij over de linkse node anders geeft hij gewoon de current node terug.
return BST.inorderSuccesor(self.left, False)
return self
def searchTreeDelete(self, item):
"""
Delete een node uit een boom
:param item: De te verwijderen node
:return: (value/None,True/False)
Preconditie: Als de root de enige node is mag je ze niet verwijderen
Postconditie: Node is verwijdert
"""
if not self.isEmpty(): # Kijkt of de boom leeg is of niet. Als die wel leeg is geeft hij gewoon false terug anders gaat hij verder.
if item == self.key: # Kijkt of de item gelijk is aan de huidige node zijn key.
if self.left == None and self.right == None: # Zo ja, controleert het of het een rechter kind of linker kind heeft,beiden of geen een.
if self.parent.left == self:
self.parent.left = None
elif self.parent.right == self:
self.parent.right = None
elif self.right == None: # Bij Als het linkerkind bestaat wisselt de methodie die van plaats met de parent en verwijderen we het item dat nu in een blad zit.
if self == self.parent.left:
self.parent.left = self.left
else:
self.parent.right = self.left
elif self.left == None: # Bij Als het rechterkind bestaat wisselt de methodie die van plaats met de parent en verwijderen we het item dat nu in een blad zit.
if self.parent:
if self == self.parent.right:
self.parent.right = self.right
else:
self.parent.left = self.right
else:
Succ = self.inorderSuccesor()
self.Root = Succ.Root
self.key = Succ.key
Succ.key = item
return Succ.searchTreeDelete(item)
else: # Vindt de inordersuccesor en wisselt die om met het te verwijderen item. Verwijder dan het item wanneer het een blad is. Dit is als er 2 kinderen zijn.
Succ = self.inorderSuccesor()
self.Root = Succ.Root
self.key = Succ.key
Succ.key = item
return Succ.searchTreeDelete(item)
return True
if item < self.key: # Als item niet overeenkomt met self.key controleren we of deze groter of kleiner is dan de key en voeren we een recursief uit afhankelijk of het groter of kleiner is en of het linker of rechterkind bestaat.
if self.left:
return BST.searchTreeDelete(self.left, item)
return False
if item > self.key:
if self.right:
return BST.searchTreeDelete(self.right, item)
return False
return False
def save(self):
"""
Slaat de huidige boom op in een dict.
:return: (Dict)
Precondities:/
Postcondities: Geeft de dictionary terug die van de boom is opgesteld.
"""
Boompje = {"Root": None, "Children": []} # Maakt een lege boomdict aan.
if self.Root != None: # Als de root in die boomdict leeg is wordt de root gelijkgesteld aan self.root.
Boompje["Root"] = self.key
if self.left != None: # Als het linkerkind niet none is kijken we of het rechterkind none is. Zo ja voegen we een none waarde toe op de positie van het rechterkind. Anders niet
if self.right == None:
Boompje["Children"].append(self.left.save())
Boompje["Children"].append(None)
else:
Boompje["Children"].append(self.left.save())
if self.right != None: # Exact hetzelfde als hierboven alleen omgedraaide volgorde.
if self.left == None:
Boompje["Children"].append(None)
Boompje["Children"].append(self.right.save())
else:
Boompje["Children"].append(self.right.save())
if len(Boompje["Children"]) == 0: # Als de boom geen kinderen heeft wordt gewoon de onderstaande dict teruggegeven.
return {"Root": self.key}
if self == None:
return {}
return Boompje
def emptyTree(self): # Maakt de boom leeg.
"""
Maakt de boom leeg voor het geval dat wij een nieuwe willen loaden.
:return: /
Precondities:/
Postcondities: Maakt de boom leeg.
"""
self.left=None
self.right=None
self.Root=None
self.key=None
def load(self, dict, NotEmpty=True):
"""
Maakt de boom leeg en load een nieuwe daar in plaats.
:param dict: De boom die moet worden ingeladen.
:param Empty: Controleert of het leeg moet worden gemaakt of niet.
:return:/
Precondities:/
Postcondities:Maakt de boom leeg en laad een nieuwe boom..
"""
if NotEmpty: # Als de boom niet leeg is wordt die leeggemaakt.
BST.emptyTree(self)
if len(dict) == 0 or dict=={}: # Als de dict leeg is geeft het een lege boom terug.
return BST.emptyTree(self)
self.Root = dict["root"] # Stelt de root gelijk aan de root van de dict.
self.key = self.Root # Stelt de key gelijk aan de root.
if "children" in dict: # Kijkt of het kinderen heeft.
if dict['children'][
0]: # Als er een linkerkind is wordt self.left een bst klasse met parent de huidige root en worden de waarden recursief ingeladen.
self.left = BST()
self.left.parent = self
self.left.load(dict['children'][0], False)
if dict['children'][
1]: # Als er een rechterkind is wordt self.right een bst klasse met parent de huidige root en worden de waarden recursief ingeladen.
self.right = BST()
self.right.parent = self
self.right.load(dict['children'][1], False)
class Table:
def __init__(self):
self.tree=BST()
def tableIsEmpty(self):
return self.tree.isEmpty()
def tableInsert(self,key,Treeitem):
return self.tree.searchTreeInsert(createTreeItem(key,Treeitem))
def tableRetrieve(self,key):
return self.tree.searchTreeRetrieve(key)
def traverseTable(self,Func=None):
return self.tree.inorderTraverse(Func)
def load(self,NewTabel):
return self.tree.load(NewTabel)
def save(self):
return self.tree.save()
def tableDelete(self,key):
return self.tree.searchTreeDelete(key)
| MenuaSoftware/CinepolisSystem | Project/Denis/BST.py | BST.py | py | 12,220 | python | nl | code | 0 | github-code | 36 |
29640551287 | #https://open.kattis.com/problems/detaileddifferences
#setting test case variable
testCase = input()
#creating for loop in range of test case
for i in range(int(testCase)):
#get line 1 from user and then print
line1 = input()
print(line1)
#get line 2 from user and then print
line2 = input()
print(line2)
#make list from both lines in order to test each character
line1 = list(line1)
line2 = list(line2)
#create a difference list
diff = []
#for in in range of line 1 (doesn't matter because each line is same length)
for i in range(0, len(line1)):
#if line 1 [i] is equal to line 2 [i], append the diff list with "."
if line1[i] == line2[i]:
diff.append(".")
#else, append the diff list with "*"
else:
diff.append("*")
#print the list after joining each character in list as a string
print("".join([str(i) for i in diff]))
| teddcp2/Tensorflow-Deep-Learning-notes | python/detailed_differences_1.4.py | detailed_differences_1.4.py | py | 961 | python | en | code | 0 | github-code | 36 |
28692875111 | # 2023.09.19
# 빅데이터개론
# userListHeader.py
# zip, enumerate 함수를 구현
"""
myZip function
parameter: *args 가변인자, 매개변수의 수가 변할 수 있음
여러 데이터가 합쳐진 형태를 튜플로 리턴 // list가 아니어도 묶을 수 있다
이때 가장 짧은 길이의 데이터에 맞춤
"""
def myZip(*args):
min_length = min(len(arg) for arg in args) # len()은 이전에 구현했으니, 메소드를 바로 사용
result = []
for i in range(min_length):
result.append(tuple(arg[i] for arg in args))
return result
"""
myEnumerate function
parameter: <list> list_a
여러 데이터가 합쳐진 형태를 리스트로 리턴
"""
def myEnumerate(list_a):
index = 0
enumerate_list = []
for i in list_a:
enumerate_list.append([index, i])
index += 1
return enumerate_list
| ffvv0123/2023-R2-Big-Data | List/List_02/userListHeader.py | userListHeader.py | py | 903 | python | ko | code | 0 | github-code | 36 |
18914351143 | class Solution:
def taskSchedulerII(self, tasks: list[int], space: int) -> int:
day = 0
history: dict[int, int] = {}
for task_i, task in enumerate(tasks):
if task in history and day - history[task] <= space:
day += space - (day - history[task]) + 1
history[task] = day
day += 1
return day
| lancelote/leetcode | src/task_scheduler_ii.py | task_scheduler_ii.py | py | 379 | python | en | code | 3 | github-code | 36 |
70536838504 | '''
Given the heads of two singly linked-lists headA and headB, return the node at which the two lists intersect. If the two linked lists have no intersection at all, return null.
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB, s=None):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not s:
s = set()
if not headA and not headB:
return None
if headA:
if headA in s:
return headA
s.add(headA)
headA = headA.next
if headB:
if headB in s:
return headB
s.add(headB)
headB = headB.next
return self.getIntersectionNode(headA, headB, s)
| ChrisStewart132/LeetCode | 160. Intersection of Two Linked Lists.py | 160. Intersection of Two Linked Lists.py | py | 975 | python | en | code | 0 | github-code | 36 |
13632762170 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 25 15:04:22 2018
@author: Nagano Masatoshi
"""
import cv2
import os
#import ffmpeg as fp
def main():
#動画を読み込む
filename = './movie/output.mp4'
video = cv2.VideoCapture(filename)
path = 'movie2'
if not os.path.exists(path):
os.mkdir(path)
savepath = 'image2'
if not os.path.exists(savepath):
os.mkdir(savepath)
#フレーム数確認
frames = int(video.get(7))
print ('frame rate:',frames)
for i in range(0,frames):
_, frame = video.read()
cv2.imwrite(savepath+"/%d.png"%i,frame)
if __name__ == '__main__':
main() | nagano28/img2mp4-mp42img | movie2img.py | movie2img.py | py | 723 | python | en | code | 1 | github-code | 36 |
30289586485 | class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
# start at both ends
L = 0
R = len(height)-1
res = 0
# consider conditions that can never have a maximum
## bottom length continues to decrease, key is height
while L < R:
area = (R-L)*min(height[R], height[L])
res = max(res, area)
# move the small y-axis value inward
if height[L] > height[R]:
R -= 1
elif height[L] < height[R]:
L += 1
else:
R -= 1
L += 1
return res | hanjisu49/LeetCode-for-BigTech | 0011_containerMostWater.py | 0011_containerMostWater.py | py | 743 | python | en | code | 0 | github-code | 36 |
22635756480 | # from django.http import HttpResponse
from django.shortcuts import render
# from random import randint
# Create your views here.
from . models import Article # 같은 폴더 안의 models에서 Article을 사용할거야
def index(request):
# random_number = randint(1,18)
# return HttpResponse("Hello, word {}".format(random_number))
# name = "Jay"
# return render(request, "index.html", {"name" : name})
article_list = Article.objects.all()
# Article.objects.create(
# title = "hello^^",
# contents = "test",
# view_count = 0
# )
ctx = {
"article_list" : article_list
}
return render(request, "index.html", ctx)
| jungeunlee95/python-practice | Django/src/blog/views.py | views.py | py | 696 | python | en | code | 0 | github-code | 36 |
22777255698 | # -*- coding:utf-8 -*-
import os
from os import path
import random
def check_file(path='./',ext=''):
_filelist = os.listdir(path)
ch_e = []
for _file in _filelist:
_root, _ext = os.path.splitext(_file)
if _ext == ext:
ch_e.append(_file)
else:
pass
return ch_e
rootpath = path.dirname(path.abspath(__file__)) + '/'
#imagepaths = os.listdir(rootpath+'dataset')
path = rootpath + 'dataset'
imagepaths = check_file(path=path, ext='.jpg')
print(imagepaths)
val = random.sample(imagepaths,int(len(imagepaths)/5))
train = imagepaths
for item in val:
train.remove(item)
with open('train.txt','w')as trainfile:
for path in train:
abspath = rootpath+'dataset/'+path+'\n'
trainfile.write(abspath)
with open('val.txt','w')as valfile:
for path in val:
abspath = rootpath+'dataset/'+path+'\n'
valfile.write(abspath)
| Swall0w/Yolo-Fomat | gen_label.py | gen_label.py | py | 915 | python | en | code | 0 | github-code | 36 |
11359144671 | def snail(arr):
global N
newX, newY = 0, 0
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
dr_s = 0
num = 1
for i in range(N*N):
X, Y = newX, newY
arr[Y][X] = num
newX = dx + dx[dr_s]
newY = dy + dy[dr_s]
if newX >= N or newX < 0 or newY >= N or newY < 0 or arr[newY][newX] != 0:
dr_s = (dr_s + 1) % 4
import sys
sys.stdin = open('dfs.txt')
T = int(input())
for tc in range(1, T+1):
N = int(input())
arr = [[0 for _ in range(N)] for _ in range(N)]
snail(arr)
print('#{}'.format(tc))
for i in range(N):
for j in range(N):
print(arr[i][j], end=" ")
print()
| Jade-KR/TIL | 04_algo/sw문제/d12/prac.py | prac.py | py | 684 | python | en | code | 0 | github-code | 36 |
71534550504 | import cv2
import os
import torch
import torch.nn as nn
import torchvision
import argparse
import numpy as np
import copy
from torch.autograd import Variable
pa = argparse.ArgumentParser()
pa.add_argument("--input_row", type=int, default=0)
pa.add_argument("--input_col", type=int, default=0)
pa.add_argument("--input_channel", type=int, default=0)
opt = pa.parse_args()
opt.input_row = 24
opt.input_col = 32
opt.input_channel = 1
print(opt)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
print("++++++++++Your CUDA is available!++++++++++" if torch.cuda.is_available() else "----------No CUDA available!----------")
def readDataL(num):
paths = os.getcwd() + "/RobomasterNumData/" + str(num) + "L/"
filelist = os.listdir(paths)
AllDataSub = torch.rand(len(filelist), 1, 24, 32)
for i in range(len(filelist)):
I = cv2.imread(paths + filelist[i])
I = cv2.resize(I, (opt.input_col, opt.input_row))
I = cv2.cvtColor(I, cv2.COLOR_BGRA2GRAY) # 原始图竟然是四通道的!!
temp = torch.from_numpy(I)
AllDataSub[i, 0, :, :] = temp
# cv2.imshow("1", I)
# cv2.waitKey(1)
return AllDataSub
def readDataS(num):
paths = os.getcwd() + "/RobomasterNumData/" + str(num) + "S/"
filelist = os.listdir(paths)
AllDataSub = torch.rand(len(filelist), 1, 24, 32)
for i in range(len(filelist)):
I = cv2.imread(paths + filelist[i])
I = cv2.resize(I, (opt.input_col, opt.input_row))
I = cv2.cvtColor(I, cv2.COLOR_BGRA2GRAY) # 原始图竟然是四通道的!!
temp = torch.from_numpy(I)
AllDataSub[i, 0, :, :] = temp
# cv2.imshow("1", I)
# cv2.waitKey(1)
return AllDataSub
class Num_Reg(nn.Module):
def __init__(self):
super(Num_Reg, self).__init__()
self.convA = nn.Sequential(
nn.Conv2d(
in_channels=opt.input_channel,
out_channels=10,
kernel_size=5,
stride=1,
padding=2
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.convB = nn.Sequential(
nn.Conv2d(
in_channels=10,
out_channels=30,
kernel_size=5,
stride=1,
padding=2
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.f1 = nn.Linear(8 * 6 * 1 * 30, 200)
self.f2 = nn.Linear(200, 16)
def forward(self, x):
x = self.convA(x)
x = self.convB(x)
x = x.view(x.size(0), -1) # why it's -1
x = self.f1(x)
x = self.f2(x)
return x
Recognizer = Num_Reg()
print("Your Model's structure: ", Recognizer)
# prepare the data
AllData1 = readDataL(1)
AllData2 = readDataL(2)
AllData3 = readDataL(3)
AllData4 = readDataL(4)
AllData5 = readDataL(5)
AllData6 = readDataL(6)
AllData7 = readDataL(7)
AllData8 = readDataL(8)
AllData1s = readDataS(1)
AllData2s = readDataS(2)
AllData3s = readDataS(3)
AllData4s = readDataS(4)
AllData5s = readDataS(5)
AllData6s = readDataS(6)
AllData7s = readDataS(7)
AllData8s = readDataS(8)
AllData = torch.cat((AllData1, AllData2, AllData3, AllData4, AllData5, AllData6, AllData7, AllData8,
AllData1s, AllData2s, AllData3s, AllData4s, AllData5s, AllData6s, AllData7s, AllData8s), 0)
# prepare the label
label = np.zeros((50 * 16), int)
k = 0
for i in range(16):
for j in range(50):
label[k] = i
k = k + 1
# print(k, " ", label[k])
label = torch.from_numpy(label)
# test your label and your data
for j in range(800):
cv2.imwrite("./TestImage/" + str(j) + "-" + str(label.numpy()[j]) + ".jpg", AllData[j, 0, :, :].numpy())
print("Here is your data's shape", AllData.shape)
print("Here is your label's shape", label.shape)
# exit(128)
# shuffle it
shuffle_kernel = np.arange(0, 800, 1)
np.random.shuffle(shuffle_kernel)
labeltemp = label.numpy()
labeltemp1 = copy.deepcopy(labeltemp)
AllDataTemp = copy.deepcopy(AllData)
for n in range(800):
AllDataTemp[n, :, :, :] = AllData[shuffle_kernel[n], :, :, :]
labeltemp1[n] = labeltemp[shuffle_kernel[n]]
label = torch.from_numpy(labeltemp1)
for n in range(800):
cv2.imwrite("./TestImage2/" + str(n) + "-" + str(label.numpy()[n]) + ".jpg", AllDataTemp[n, 0, :, :].numpy())
AllData = copy.deepcopy(AllDataTemp)
# print("labeltemp1", labeltemp1)
# print("label", label)
# exit(255)
# optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters
optimizer = torch.optim.Adam(Recognizer.parameters(), lr=0.001) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
loss_func.cuda(device)
Recognizer.cuda(device)
for num in range(750):
xx = AllData[num, :, :, :]
x = torch.rand(1, 1, 24, 32)
x[0, 0, :, :] = xx
x = x / 255.0
x = x.cuda(device)
# print(type(x), x.is_floating_point(), x.dtype)
label = label.cuda(device)
y = Recognizer(x)[0]
y = y.reshape(1, 16)
y = y.cuda(device)
# print(type(y), y.is_floating_point(), y.dtype)
labelv = Variable(label[num])
labelv = labelv.reshape(1)
# labelv = labelv.to(torch.int64)
labelv = labelv.cuda(device)
# print(type(labelv), labelv.is_floating_point(), labelv.dtype)
# print(y.shape, labelv)
loss = loss_func(y, labelv) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
print("num: ", num)
print("OVER! ")
for rr in range(50):
rrr = rr+750
xx = AllData[rrr, :, :, :]
x = torch.rand(1, 1, 24, 32)
x[0, 0, :, :] = xx
x = x/255.0
# print(x.shape)
x = x.cuda()
test_output = Recognizer(x)[0]
test_output = test_output.cpu()
test_output = test_output.reshape(1, 16)
# print(test_output)
pred_y = torch.max(test_output, 1)[1].data.numpy()
label = label.cpu()
# print(rr, 'prediction number', pred_y, 'real number', label[rrr], test_output, pred_y == label[rrr].numpy())
print(rr, 'prediction number', pred_y, 'real number', label[rrr], pred_y == label[rrr].numpy())
torch.save(Recognizer, "./RoboMaterData.t7")
print("OVER! ")
| DrWiki/Alliance_Sentry_CNN_Tensorflow_Pytorch | Robomaster_CNN.py | Robomaster_CNN.py | py | 6,374 | python | en | code | 2 | github-code | 36 |
13354653919 | def reverse_string_1(s):
return ' '.join(reversed(s.split()))
def reverse_string(s):
length = len(s)
words = list()
spaces = [' ']
i = 0
while i < length:
if s[i] not in spaces:
word_start = i
while i < length and i not in spaces:
i += 1
words.append(s[word_start:i])
i += 1
# print(f'show me i = {i} and coutner = {counter}')
return words
# while len(words) > 0:
# temp = words.pop(-1)
# print(temp)
# result = result + ' ' + temp
# return result
# print(reverse_string_1('Hello World this is Sam'))
# print(reverse_string('Hello World this is Sam'))
# st = []
# lst = 'abcdefghi'
# st.append(lst[0:2])
# st.append(lst[2:4])
# print(st)
# def rev(s):
# return s.split()[::-1]
# print(rev('Hello World this is Sam'))
ls = [1,1,2]
s = set()
for i in ls:
s.add(i)
print(len(s))
| XingzheZhao/Coding_Docs | problems/reverseString.py | reverseString.py | py | 924 | python | en | code | 0 | github-code | 36 |
4500647945 | """
This unit test tests the uri resolver.
It is often the case, that a taxonomy schema imports another taxonomy using a relative path.
i.e:
<link:linkbaseRef [..] xlink:href="./../example_lab.xml" [..]/>
The job of the uri resolver is to resolve those relative paths and urls and return an absolute path or url
"""
import logging
import sys
import unittest
from xbrl.transformations import normalize, TransformationException
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
testTransforms = {
"http://www.xbrl.org/inlineXBRL/transformation/2010-04-20": [
# [format,value,expected]
['datedoteu', '17.07.22', '2022-07-17'],
['datedoteu', '17.07.2022', '2022-07-17'],
['datedotus', '07.17.22', '2022-07-17'],
['datedotus', '07.17.2022', '2022-07-17'],
['datelonguk', '17 July 2022', '2022-07-17'],
['datelonguk', '17 July 22', '2022-07-17'],
['datelonguk', '7 February 2022', '2022-02-07'],
['datelongus', 'July 17, 2022', '2022-07-17'],
['datelongus', 'July 17, 22', '2022-07-17'],
['datelongus', 'February 7, 2022', '2022-02-07'],
['dateshortuk', '17 Jul. 2022', '2022-07-17'],
['dateshortuk', '17 Jul. 22', '2022-07-17'],
['dateshortuk', '7 Feb. 2022', '2022-02-07'],
['dateshortus', 'Jul. 17, 2022', '2022-07-17'],
['dateshortus', 'Jul. 17, 22', '2022-07-17'],
['dateshortus', 'Feb. 7, 2022', '2022-02-07'],
['dateslasheu', '17/07/2022', '2022-07-17'],
['dateslasheu', '17/07/22', '2022-07-17'],
['dateslashus', '07/17/2022', '2022-07-17'],
['dateslashus', '07/17/22', '2022-07-17'],
['datelongdaymonthuk', '17 July', '--07-17'],
['datelongdaymonthuk', '7 February', '--02-07'],
['datelongmonthdayus', 'July 17', '--07-17'],
['datelongmonthdayus', 'February 7', '--02-07'],
['dateshortdaymonthuk', '17 Jul.', '--07-17'],
['dateshortdaymonthuk', '7 Feb', '--02-07'],
['dateshortmonthdayus', 'Jul. 17', '--07-17'],
['dateshortmonthdayus', 'Feb 7', '--02-07'],
['dateslashdaymontheu', '7/2', '--02-07'],
['dateslashdaymontheu', '07/02', '--02-07'],
['dateslashmonthdayus', '2/7', '--02-07'],
['dateslashmonthdayus', '02/07', '--02-07'],
['datelongyearmonth', '22 February', '2022-02'],
['datelongyearmonth', '2022 February', '2022-02'],
['dateshortyearmonth', '22 Feb.', '2022-02'],
['dateshortyearmonth', '2022 Feb', '2022-02'],
['datelongmonthyear', 'February 22', '2022-02'],
['datelongmonthyear', 'February 2022', '2022-02'],
['dateshortmonthyear', 'Feb. 22', '2022-02'],
['dateshortmonthyear', 'Feb 2022', '2022-02'],
['numcomma', '1400,40', '1400.40'],
['numcomma', '1,40', '1.40'],
['numcommadot', '1,400.40', '1400.40'],
['numcommadot', '1,000,400.40', '1000400.40'],
['numdash', 'any value', '0'],
['numdotcomma', '123.123.123,12', '123123123.12'],
['numspacecomma', '123 123 123,12', '123123123.12'],
['numspacedot', '123 123 123.12', '123123123.12']
],
"http://www.xbrl.org/inlineXBRL/transformation/2011-07-31": [
# [format,value,expected]
['booleanfalse', 'any string', 'false'],
['booleantrue', 'any string', 'true'],
['datedaymonth', '2.1', '--01-02'],
['datedaymonth', '02*01', '--01-02'],
['datedaymonthen', '2. January', '--01-02'],
['datedaymonthen', '02*Jan', '--01-02'],
['datedaymonthyear', '2*1*22', '2022-01-02'],
['datedaymonthyear', '02 01 2022', '2022-01-02'],
['datedaymonthyearen', '2*Jan*22', '2022-01-02'],
['datedaymonthyearen', '02 January 2022', '2022-01-02'],
['datemonthday', '2 1', '--02-01'],
['datemonthday', '02-01', '--02-01'],
['datemonthdayen', 'Feb. 1', '--02-01'],
['datemonthdayen', 'February 1', '--02-01'],
['datemonthdayyear', '12.01.99', '1999-12-01'],
['datemonthdayyear', '12.01.1999', '1999-12-01'],
['datemonthdayyearen', 'Dec. 1 99', '1999-12-01'],
['datemonthdayyearen', 'December 01 99', '1999-12-01'],
['datemonthyearen', 'Dec. 99', '1999-12'],
['datemonthyearen', 'December 1999', '1999-12'],
['dateyearmonthen', '99 Dec.', '1999-12'],
['dateyearmonthen', '1999 December', '1999-12'],
['nocontent', 'any string', ''],
['numcommadecimal', '123 123 123,123', '123123123.123'],
['numcommadecimal', '123.123.123,123', '123123123.123'],
['numdotdecimal', '123 123 123.123', '123123123.123'],
['numdotdecimal', '123,123,123.123', '123123123.123'],
['zerodash', '-', '0'],
],
"http://www.xbrl.org/inlineXBRL/transformation/2015-02-26": [
# [format,value,expected]
['booleanfalse', 'nope', 'false'],
['booleantrue', 'yeah', 'true'],
['datedaymonth', '11.12', '--12-11'],
['datedaymonth', '1.2', '--02-01'],
['datedaymonthen', '2. December', '--12-02'],
['datedaymonthen', '2 Sept.', '--09-02'],
['datedaymonthen', '14. april', '--04-14'],
['datedaymonthyear', '2.12.2021', '2021-12-02'],
['datedaymonthyear', '1.1.99', '1999-01-01'],
['datedaymonthyear', '18. 02 2022', '2022-02-18'],
['datedaymonthyearen', '02. December 2021', '2021-12-02'],
['datedaymonthyearen', '13. Dec. 21', '2021-12-13'],
['datedaymonthyearen', '1 Feb 99', '1999-02-01'],
['datemonthday', '1.2', '--01-02'],
['datemonthday', '12-1', '--12-01'],
['datemonthday', '1.30', '--01-30'],
['datemonthdayen', 'Jan 02', '--01-02'],
['datemonthdayen', 'February 13', '--02-13'],
['datemonthdayen', 'sept. 1', '--09-01'],
['datemonthdayyear', '12-30-2021', '2021-12-30'],
['datemonthdayyear', '2-16-22', '2022-02-16'],
['datemonthdayyear', '2-1-2019', '2019-02-01'],
['datemonthdayyearen', 'March 31, 2021', '2021-03-31'],
['datemonthdayyearen', 'Dec. 31, 22', '2022-12-31'],
['datemonthdayyearen', 'april 12 2021', '2021-04-12'],
['datemonthyear', '12 2021', '2021-12'],
['datemonthyear', '1 22', '2022-01'],
['datemonthyear', '02-1999', '1999-02'],
['datemonthyearen', 'December 2021', '2021-12'],
['datemonthyearen', 'apr. 22', '2022-04'],
['datemonthyearen', 'Sept. 2000', '2000-09'],
['dateyearmonthday', '2021.12.31', '2021-12-31'],
['dateyearmonthday', '2021 1 31', '2021-01-31'],
['dateyearmonthday', '22-1-1', '2022-01-01'],
['dateyearmonthen', '2021 December', '2021-12'],
['dateyearmonthen', '22 sept.', '2022-09'],
['dateyearmonthen', '21.apr.', '2021-04'],
['nocontent', 'Bla bla', ''],
['numcommadecimal', '1.499,99', '1499.99'],
['numcommadecimal', '100*499,999', '100499.999'],
['numcommadecimal', '0,5', '0.5'],
['numdotdecimal', '1,499.99', '1499.99'],
['numdotdecimal', '1*499', '1499'],
['numdotdecimal', '1,000,000.5', '1000000.5'],
['zerodash', '--', '0'],
],
"http://www.xbrl.org/inlineXBRL/transformation/2020-02-12": [
# [format,value,expected]
['date-day-month', '1.1', '--01-01'],
['date-day-month', '31-12', '--12-31'],
['date-day-month', '27*2', '--02-27'],
['date-day-month-year', '1-2-20', '2020-02-01'],
['date-day-month-year', '1-02-20', '2020-02-01'],
['date-day-month-year', '01 02 2020', '2020-02-01'],
['date-day-monthname-en', '1. sept.', '--09-01'],
['date-day-monthname-en', '01. sep.', '--09-01'],
['date-day-monthname-en', '30 August', '--08-30'],
['date-day-monthname-year-en', '30 August 22', '2022-08-30'],
['date-day-monthname-year-en', '01 Aug 22', '2022-08-01'],
['date-day-monthname-year-en', '1 Aug 2022', '2022-08-01'],
['date-month-day', '1 31', '--01-31'],
['date-month-day', '01-31', '--01-31'],
['date-month-day', '12.1', '--12-01'],
['date-month-day-year', '12. 1 22', '2022-12-01'],
['date-month-day-year', '01/12/2022', '2022-01-12'],
['date-month-day-year', '01.12.2022', '2022-01-12'],
['date-month-year', '1*22', '2022-01'],
['date-month-year', '01 22', '2022-01'],
['date-month-year', '12.2022', '2022-12'],
['date-monthname-day-en', 'April/1', '--04-01'],
['date-monthname-day-en', 'Sept./20', '--09-20'],
['date-monthname-day-en', 'december 31', '--12-31'],
['date-monthname-day-year-en', 'december 31, 22', '2022-12-31'],
['date-monthname-day-year-en', 'dec. 31, 2022', '2022-12-31'],
['date-monthname-day-year-en', 'dec. 1, 2022', '2022-12-01'],
['date-year-month', '99/1', '1999-01'],
['date-year-month', '2022 - 12', '2022-12'],
['date-year-month', '2022 -/ 1', '2022-01'],
['date-year-month-day', ' 22-1-2 ', '2022-01-02'],
['date-year-month-day', ' 2022/1/2 ', '2022-01-02'],
['date-year-month-day', ' 22/01/02 ', '2022-01-02'],
['date-year-monthname-en', '22/december', '2022-12'],
['date-year-monthname-en', '22/dec.', '2022-12'],
['date-year-monthname-en', '2022-dec', '2022-12'],
['fixed-empty', 'some text', ''],
['fixed-false', 'some text', 'false'],
['fixed-true', 'some text', 'true'],
['fixed-zero', 'some text', '0'],
['num-comma-decimal', '1.499,99', '1499.99'],
['num-comma-decimal', '100*499,999', '100499.999'],
['num-comma-decimal', '0,5', '0.5'],
['num-dot-decimal', '1,499.99', '1499.99'],
['num-dot-decimal', '1*499', '1499'],
['num-dot-decimal', '1,000,000.5', '1000000.5'],
],
"http://www.sec.gov/inlineXBRL/transformation/2015-08-31": [
# [format,value,expected]
['duryear', '-22.3456', '-P22Y4M4D'],
['duryear', '21.84480', 'P21Y10M5D'],
['duryear', '+0.3456', 'P0Y4M4D'],
['durmonth', '22.3456', 'P22M10D'],
['durmonth', '-0.3456', '-P0M10D'],
['durwordsen', 'Five years, two months', 'P5Y2M0D'],
['durwordsen', '9 years, 2 months', 'P9Y2M0D'],
['durwordsen', '12 days', 'P0Y0M12D'],
['durwordsen', 'ONE MONTH AND THREE DAYS', 'P0Y1M3D'],
['numwordsen', 'no', '0'],
['numwordsen', 'None', '0'],
['numwordsen', 'nineteen hundred forty-four', '1944'],
['numwordsen', 'Seventy Thousand and one', '70001'],
['boolballotbox', '☐', 'false'],
['boolballotbox', '☐', 'false'],
['boolballotbox', '☑', 'true'],
['boolballotbox', '☑', 'true'],
['boolballotbox', '☒', 'true'],
['boolballotbox', '☒', 'true'],
['exchnameen', 'The New York Stock Exchange', 'NYSE'],
['exchnameen', 'New York Stock Exchange LLC', 'NYSE'],
['exchnameen', 'NASDAQ Global Select Market', 'NASDAQ'],
['exchnameen', 'The Nasdaq Stock Market LLC', 'NASDAQ'],
['exchnameen', 'BOX Exchange LLC', 'BOX'],
['exchnameen', 'Nasdaq BX, Inc.', 'BX'],
['exchnameen', 'Cboe C2 Exchange, Inc.', 'C2'],
['exchnameen', 'Cboe Exchange, Inc.', 'CBOE'],
['exchnameen', 'Chicago Stock Exchange, Inc.', 'CHX'],
['exchnameen', 'Cboe BYX Exchange, Inc.', 'CboeBYX'],
['exchnameen', 'Cboe BZX Exchange, Inc.', 'CboeBZX'],
['exchnameen', 'Cboe EDGA Exchange, Inc.', 'CboeEDGA'],
['exchnameen', 'Cboe EDGX Exchange, Inc.', 'CboeEDGX'],
['exchnameen', 'Nasdaq GEMX, LLC', 'GEMX'],
['exchnameen', 'Investors Exchange LLC', 'IEX'],
['exchnameen', 'Nasdaq ISE, LLC', 'ISE'],
['exchnameen', 'Miami International Securities Exchange', 'MIAX'],
['exchnameen', 'Nasdaq MRX, LLC', 'MRX'],
['exchnameen', 'NYSE American LLC', 'NYSEAMER'],
['exchnameen', 'NYSE Arca, Inc.', 'NYSEArca'],
['exchnameen', 'NYSE National, Inc.', 'NYSENAT'],
['exchnameen', 'MIAX PEARL, LLC', 'PEARL'],
['exchnameen', 'Nasdaq PHLX LLC', 'Phlx'],
['stateprovnameen', 'Alabama', 'AL'],
['stateprovnameen', 'Alaska', 'AK'],
['stateprovnameen', 'Arizona', 'AZ'],
['stateprovnameen', 'Arkansas', 'AR'],
['stateprovnameen', 'California', 'CA'],
['stateprovnameen', 'Colorado', 'CO'],
['stateprovnameen', 'Connecticut', 'CT'],
['stateprovnameen', 'Delaware', 'DE'],
['stateprovnameen', 'Florida', 'FL'],
['stateprovnameen', 'Georgia', 'GA'],
['stateprovnameen', 'Hawaii', 'HI'],
['stateprovnameen', 'Idaho', 'ID'],
['stateprovnameen', 'Illinois', 'IL'],
['stateprovnameen', 'Indiana', 'IN'],
['stateprovnameen', 'Iowa', 'IA'],
['stateprovnameen', 'Kansas', 'KS'],
['stateprovnameen', 'Kentucky', 'KY'],
['stateprovnameen', 'Louisiana', 'LA'],
['stateprovnameen', 'Maine', 'ME'],
['stateprovnameen', 'Maryland', 'MD'],
['stateprovnameen', 'Massachusetts', 'MA'],
['stateprovnameen', 'Michigan', 'MI'],
['stateprovnameen', 'Minnesota', 'MN'],
['stateprovnameen', 'Mississippi', 'MS'],
['stateprovnameen', 'Missouri', 'MO'],
['stateprovnameen', 'Montana', 'MT'],
['stateprovnameen', 'Nebraska', 'NE'],
['stateprovnameen', 'Nevada', 'NV'],
['stateprovnameen', 'New Hampshire', 'NH'],
['stateprovnameen', 'New Jersey', 'NJ'],
['stateprovnameen', 'New Mexico', 'NM'],
['stateprovnameen', 'New York', 'NY'],
['stateprovnameen', 'North Carolina', 'NC'],
['stateprovnameen', 'North Dakota', 'ND'],
['stateprovnameen', 'Ohio', 'OH'],
['stateprovnameen', 'Oklahoma', 'OK'],
['stateprovnameen', 'Oregon', 'OR'],
['stateprovnameen', 'Pennsylvania', 'PA'],
['stateprovnameen', 'Rhode Island', 'RI'],
['stateprovnameen', 'South Carolina', 'SC'],
['stateprovnameen', 'South dakota', 'SD'],
['stateprovnameen', 'Tennessee', 'TN'],
['stateprovnameen', 'Texas', 'TX'],
['stateprovnameen', 'Utah', 'UT'],
['stateprovnameen', 'Vermont', 'VT'],
['stateprovnameen', 'Virginia', 'VA'],
['stateprovnameen', 'Washington', 'WA'],
['stateprovnameen', 'Washington D.C.', 'DC'],
['stateprovnameen', 'West Virginia', 'WV'],
['stateprovnameen', 'Wisconsin', 'WI'],
['stateprovnameen', 'Wyoming', 'WY'],
['entityfilercategoryen', 'accelerated filer', 'Accelerated Filer']
]
}
class TransformationTest(unittest.TestCase):
def test_normalize(self):
"""
:return:
"""
for namespace in testTransforms:
for i, testCase in enumerate(testTransforms[namespace]):
formatCode, testInput, expected = testCase
if expected == 'exception':
try:
testOutput = normalize(namespace, formatCode, testInput)
self.fail('Expected Transformation Exception, received ' + testOutput)
except TransformationException:
pass
else:
testOutput = normalize(namespace, formatCode, testInput)
self.assertEqual(expected, testOutput, msg=f'Failed at test case {testCase} of registry {namespace}')
if __name__ == '__main__':
unittest.main()
| manusimidt/py-xbrl | tests/test_transformation.py | test_transformation.py | py | 15,723 | python | en | code | 78 | github-code | 36 |
34772355909 | #! //Users/tyt15771/miniconda3/envs/pymol/bin/python
from pymol import cmd
import json
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--lig_path", required=True,
)
parser.add_argument(
"-p",
"--prot_path", required=True,
)
parser.add_argument(
"-t",
"--target", required=True,
)
args = vars(parser.parse_args())
lig = args["lig_path"]
prot = args["prot_path"]
target = args["target"]
bits = json.load(open('bits.json', 'r'))
dirname = f'{os.getcwd()}/{target}_complexes'
def extend_save(filepath, moltype, atom, depth, num, bit):
struc = filepath.split('/')[-1].split('.')[0]
cmd.reinitialize()
cmd.load(filepath)
cmd.select('root', f'index {atom}')
cmd.select('ext', f'root extend {depth}')
cmd.save(f'{dirname}/{bit}/{struc}_{moltype}{num}.pdb', 'ext')
for bit in bits:
num = 0
for pair in bits[bit]:
if not os.path.isdir(f'{dirname}/{bit}'):
os.mkdir(f'{dirname}/{bit}')
lig_atom = pair[0]
lig_depth = pair[1]
prot_atom = pair[2]
prot_depth = pair[3]
extend_save(lig, 'lig', lig_atom, lig_depth, num, bit)
extend_save(prot, 'prot', prot_atom, prot_depth, num, bit)
num += 1
| xchem/PLEC | generate_complexes.py | generate_complexes.py | py | 1,280 | python | en | code | 0 | github-code | 36 |
19448716950 | import cv2
import os
import re
import mediapipe as mp
import pandas as pd
# Initialize mediapipe lib
mpPose = mp.solutions.pose
pose = mpPose.Pose()
mpDraw = mp.solutions.drawing_utils
lm_list = []
label = "FALLBACK"
no_of_frames = 200
def make_landmark_timestep(results):
c_lm = []
for id, lm in enumerate(results.pose_landmarks.landmark):
c_lm.append(lm.x)
c_lm.append(lm.y)
c_lm.append(lm.z)
c_lm.append(lm.visibility)
return c_lm
def draw_landmark_on_image(mpDraw, results, frame):
# Draw the line to connect the landmarks
mpDraw.draw_landmarks(frame, results.pose_landmarks, mpPose.POSE_CONNECTIONS)
# Draw the landmarks on the frame
for id, lm in enumerate(results.pose_landmarks.landmark):
h, w, c = frame.shape
cx, cy = int(lm.x * w), int(lm.y * h)
cv2.circle(frame, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return frame
# Set the path of all the videos
video_folder_fallback = '/home/yuu/Documents/PBL5-demo/Data/Fall_backwards'
video_folder_fallforward = '/home/yuu/Documents/PBL5-demo/Data/Fall_forward'
video_folder_fallleft = '/home/yuu/Documents/PBL5-demo/Data/Fall_left'
video_folder_fallright = '/home/yuu/Documents/PBL5-demo/Data/Fall_right'
video_folder_fallsitting = '/home/yuu/Documents/PBL5-demo/Data/Fall_sitting'
video_folder_walk = '/home/yuu/Documents/PBL5-demo/Data/Walk'
# Read video from the folder and save the landmarks to a csv file
def read_video(video_folder):
for video in os.listdir(video_folder):
video_path = os.path.join(video_folder, video)
cap = cv2.VideoCapture(video_path)
if (cap.isOpened() == False):
print("Error opening video stream or file")
while cap.isOpened():
ret, frame = cap.read()
if ret:
# Recognize the pose
frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = pose.process(frameRGB)
if results.pose_landmarks:
# Read the value of the landmarks
lm = make_landmark_timestep(results)
lm_list.append(lm)
# Draw the landmarks on the frame
frame = draw_landmark_on_image(mpDraw, results, frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
# Save the landmarks to a csv file
def save_landmark_to_csv(label):
df = pd.DataFrame(lm_list)
df.to_csv(label + '.csv')
| nt-myduyen/demo-human-detection | read-data.py | read-data.py | py | 2,663 | python | en | code | 0 | github-code | 36 |
27078162265 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from score.items import ScoreItem
from sunburnt import SolrInterface
class ScoreSpider(CrawlSpider):
name = 'score'
allowed_domains = ['matchendirect.fr']
start_urls = ['http://www.matchendirect.fr/hier/']
rules = [Rule(SgmlLinkExtractor(allow=(r'/live-score/[a-z0-9\-]+\.html$', r'/foot-score/[a-z0-9\-]+\.html$')), 'parse_score')]
# init solr instance
def __init__(self, *args, **kwargs):
super(ScoreSpider, self).__init__(*args, **kwargs)
self.si = SolrInterface('http://localhost:8080/solr')
# called on start urls
# get host, visitor, scores
def parse_start_url(self, response):
sel = Selector(response)
leagues = sel.xpath('//h3')
docs = []
for league in leagues:
table = league.xpath('following-sibling::table[@class="tableau"][1]')
rows = table.xpath('tr')
for row in rows:
# if match has has started & is finished
scoring = row.xpath('td[@class="lm4"]/a[not(span)]/text()').extract()
isPlaying = row.xpath('td[@class="lm2_1"]').extract()
if scoring and not isPlaying:
score = ScoreItem()
score['id'] = 'http://www.matchendirect.fr' + row.xpath('td[@class="lm4"]/a/@href').extract().pop()
score['host'] = row.xpath('td[@class="lm3"]/a/text()').extract().pop()
score['visitor'] = row.xpath('td[@class="lm5"]/a/text()').extract().pop()
scoringArr = scoring.pop().split(' - ')
score['scorehost'] = int(scoringArr[0])
score['scorevisitor'] = int(scoringArr[1])
if score['scorehost'] > score['scorevisitor']:
score['winner'] = score['host']
elif score['scorehost'] < score['scorevisitor']:
score['winner'] = score['visitor']
leagueArr = league.xpath('a[1]/text()').extract().pop().split(' : ')
score['country'] = leagueArr[0]
score['league'] = leagueArr[1]
docs.append(dict(score))
# index crawled games
self.si.add(docs)
self.si.commit()
# called on followed urls
# get game details (goal scorer & time)
def parse_score(self, response):
sel = Selector(response)
# if match has started & is finished
scorehost = sel.xpath('//div[@id="match_score"]/div[@class="col2"]/text()').extract().pop().strip()
scorevisitor = sel.xpath('//div[@id="match_score"]/div[@class="col3"]/text()').extract().pop().strip()
isPlaying = sel.xpath('//div[@id="match_entete_2"]/img').extract()
if scorehost and scorevisitor and not isPlaying:
score = ScoreItem()
# get already indexed data
solr_doc = self.si.query(id=response.url).execute()
if list(solr_doc):
doc = solr_doc[0]
else:
doc = {}
score['id'] = response.url
# get goals
table = sel.xpath('//table[@class="tableau match_evenement"]')
rows = table.xpath('tr')
score['goalscorershost'], score['goalscorersvisitor'], score['goaltimeshost'], score['goaltimesvisitor'] = ([], [], [], [])
score['penaltytimeshost'], score['penaltytimesvisitor'], score['ogtimeshost'], score['ogtimesvisitor'] = ([], [], [], [])
for row in rows:
tdgoalhost = row.xpath('td[@class="c1" and span[@class="ico_evenement1"]]')
tdpenaltyhost = row.xpath('td[@class="c1" and span[@class="ico_evenement2"]]')
tdowngoalhost = row.xpath('td[@class="c1" and span[@class="ico_evenement7"]]')
tdgoalvisitor = row.xpath('td[@class="c3" and span[@class="ico_evenement1"]]')
tdpenaltyvisitor = row.xpath('td[@class="c3" and span[@class="ico_evenement2"]]')
tdowngoalvisitor = row.xpath('td[@class="c3" and span[@class="ico_evenement7"]]')
tdgoalhost = tdgoalhost or tdpenaltyhost or tdowngoalhost
tdgoalvisitor = tdgoalvisitor or tdpenaltyvisitor or tdowngoalvisitor
if tdgoalhost:
time = tdgoalhost.xpath('following-sibling::td[@class="c2"][1]/text()').extract().pop().rstrip("'")
if tdpenaltyhost:
score['penaltytimeshost'].append(time)
elif tdowngoalhost:
score['ogtimeshost'].append(time)
score['goaltimeshost'].append(time)
score['goalscorershost'].append(tdgoalhost.xpath('a/text()').extract().pop())
elif tdgoalvisitor:
time = tdgoalvisitor.xpath('preceding-sibling::td[@class="c2"][1]/text()').extract().pop().rstrip("'")
if tdpenaltyvisitor:
score['penaltytimesvisitor'].append(time)
elif tdowngoalvisitor:
score['ogtimesvisitor'].append(time)
score['goaltimesvisitor'].append(time)
score['goalscorersvisitor'].append(tdgoalvisitor.xpath('a/text()').extract().pop())
# get time, refree & stadium
matchinfos = sel.xpath('//table[@id="match_entete_1"]/tr/td[@class="info"]/text()').extract()
matchinfos.pop()
matchinfos = [x.lstrip('\n\t\r') for x in matchinfos]
if u'Arbitre : - ' in matchinfos:
matchinfos.remove(u'Arbitre : - ')
date = format_date(matchinfos[0])
time = matchinfos[1].split(' ')[-1].replace('h', ':') + ':00'
score['date'] = "%sT%sZ" % (date, time)
if len(matchinfos) >= 3:
score['stadium'] = matchinfos[2]
if len(matchinfos) == 4:
score['referee'] = matchinfos[3].split(' : ')[1]
# index all datas
doc = dict(doc.items() + dict(score).items())
self.si.add(doc)
self.si.commit()
def format_date(date):
months = {
u'janvier': '01',
u'février': '02',
u'mars': '03',
u'avril': '04',
u'mai': '05',
u'juin': '06',
u'juillet': '07',
u'août': '08',
u'septembre': '09',
u'octobre': '10',
u'novembre': '11',
u'décembre': '12'
}
date_list = date.split(' ')[1:]
date_list.reverse()
date_list[1] = months[date_list[1]]
formatted_date = '-'.join(date_list)
return formatted_date
| h4k1m0u/matchendirect-crawl | score/score/spiders/score_spider.py | score_spider.py | py | 6,947 | python | en | code | 0 | github-code | 36 |
29871880593 | from fastapi import APIRouter, HTTPException, status, Body
from fastapi.param_functions import Depends
from fastapi.security.oauth2 import OAuth2PasswordRequestForm
from sqlalchemy.orm.session import Session
from db.database import get_db
from auth import oauth2
from utils.hash import Hash
from jose import jwt
from jose.exceptions import JWTError
from db.db_role import get_role_by_userID
from db.db_function import get_func_by_roleID
from db.db_user import get_user_by_username
from schemas import RefreshTokenRequest
router = APIRouter(
prefix='/auth',
tags=['authentication']
)
@router.post('/login')
def login(request: OAuth2PasswordRequestForm = Depends(), db: Session = Depends(get_db)):
try:
connection = db.connection().connection
cursor = connection.cursor()
except Exception as e:
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail='Cannot connect to database.')
try:
user = get_user_by_username(request.username, db)
if not user or not Hash.verify(user.PASSWORD, request.password):
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials")
user_role = get_role_by_userID(user, db)
result = get_func_by_roleID(user_role, db)
access_token = oauth2.create_access_token(data={'sub': user.USER_NAME, 'role_id': user_role.ROLE_ID })
refresh_token = oauth2.create_refresh_token(data={'sub': user.USER_NAME, 'role_id': user_role.ROLE_ID})
return {
'accessToken': access_token,
'refreshToken': refresh_token,
'username': user.USER_NAME,
'fullName': user.FULL_NAME,
'acceptUrl': [
func.API_PATH
for func in result
]
}
except JWTError as e:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f'Refresh token has expired or invalid')
finally:
cursor.close()
connection.close()
@router.post('/refresh-token')
def refresh_token(request_body: RefreshTokenRequest , db: Session = Depends(get_db)):
try:
payload = jwt.decode(request_body.refresh_Token, oauth2.SECRET_KEY_ACCESS, algorithms=[oauth2.ALGORITHM])
username: str = payload.get("sub")
user = get_user_by_username(username, db)
if not user:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Refresh token has expired or invalid")
user_role = get_role_by_userID(user, db)
result = get_func_by_roleID(user_role, db)
access_token = oauth2.create_access_token(data={'sub': user.USER_NAME, 'role_id': user_role.ROLE_ID})
refresh_token = oauth2.create_refresh_token(data={'sub': user.USER_NAME, 'role_id': user_role.ROLE_ID})
return {
'accessToken': access_token,
'refreshToken': refresh_token,
'username': user.USER_NAME,
'fullName': user.FULL_NAME,
'acceptUrl': [
func.API_PATH
for func in result
]
}
except JWTError:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f'Refresh token has expired or invalid')
| DaXuaBa/mobirace_be | auth/authentication.py | authentication.py | py | 3,009 | python | en | code | 0 | github-code | 36 |
21624620290 | import re
import duckduckgo
def duckduck_regex(message):
regex = "^\/duckduck\s+(?P<data>[a-zA-Z0-9\s]+)"
m = re.match(regex, message)
if not m:
return None
else:
return m.groupdict()["data"]
def ddg(query):
"""
DuckDuckGo search
"""
return duckduckgo.get_zci(query)
| 0x00-0x00/gadreel-bot | src/duckduckgo.py | duckduckgo.py | py | 317 | python | en | code | 2 | github-code | 36 |
74072355624 | import moviepy.editor as mp
from assembly_api import *
def convert(name):
video = mp.VideoFileClip(name)
video.audio.write_audiofile("converted.wav")
print("Please enter the name of video file you want to summarize along with its type...... eg: test.mp4")
name = input()
convert(name)
def start():
filename = "converted.wav"
audio_url = upload(filename)
save_transcript(audio_url, "output1")
print("The video has been converted to wav format successfully.")
start() | SiddheshJawadi/NLP_Webinar_Summarization | NLP/main.py | main.py | py | 487 | python | en | code | 0 | github-code | 36 |
43697079041 | from libqtile.lazy import lazy
from libqtile.config import Key
mod = "mod4"
terminal = "kitty"
filemanager = "thunar"
browser = "brave"
keys = [
# window controls
Key([mod], "j", lazy.layout.down(), desc="Move focus down"),
Key([mod], "k", lazy.layout.up(), desc="Move focus up"),
Key([mod, "shift"], "j", lazy.layout.shuffle_down(), lazy.layout.section_down(), desc="Move windows down in current stack"),
Key([mod, "shift"], "k", lazy.layout.shuffle_up(), lazy.layout.section_up(), desc="Move windows up in current stack"),
Key([mod], "h", lazy.layout.shrink(), lazy.layout.decrease_nmaster(), desc="Shrink window (MonadTall), decrease number in master pane (Tile)"),
Key([mod], "l", lazy.layout.grow(), lazy.layout.increase_nmaster(), desc="Expand window (MonadTall), increase number in master pane (Tile)"),
Key([mod, "shift"], "f", lazy.window.toggle_floating(), desc="toggle floating"),
Key([mod], "f", lazy.window.toggle_fullscreen(), desc="toggle fullscreen"),
Key([mod], "n", lazy.layout.normalize(), desc="Reset all window sizes"),
Key([mod], "space", lazy.layout.next(), desc="Move window focus to other window"),
Key([mod], "q", lazy.window.kill(), desc="Kill focused window"),
# media bindings
Key([], "XF86AudioRaiseVolume", lazy.spawn("amixer set Master 3%+")),
Key([], "XF86AudioLowerVolume", lazy.spawn("amixer set Master 3%-")),
Key([], "XF86AudioMute", lazy.spawn("amixer set Master toggle")),
Key([], "XF86AudioPlay", lazy.spawn("playerctl play-pause"), desc="Play/Pause player"),
Key([], "XF86AudioNext", lazy.spawn("playerctl next"), desc="Skip to next"),
Key([], "XF86AudioPrev", lazy.spawn("playerctl previous"), desc="Skip to previous"),
# launchers
Key([mod], "r", lazy.spawn("rofi -show combi"), desc="spawn rofi"),
Key([mod, "shift"], "r", lazy.spawncmd(), desc="Spawn a command using a prompt widget"),
# applications
Key([mod], "F12", lazy.spawn(terminal), desc="Launch terminal"),
Key([mod], "F1", lazy.spawn(browser), desc="Launches web browser"),
Key([mod], "F2", lazy.spawn(filemanager), desc="Launches File Manager"),
# system bindings
Key([mod], "F10", lazy.spawn("xscreensaver-command -lock"), desc="Locks screen"),
Key([mod], "F11", lazy.spawn("clipmenu"), desc="Show clipboard history"),
Key([mod, "shift"], "F10", lazy.spawn("~/.config/rofi/powermenu.sh"), desc="Suspends system"),
Key([mod, "control"], "r", lazy.restart(), desc="Restart Qtile"),
Key([mod, "control"], "q", lazy.shutdown(), desc="Shutdown Qtile"),
# layouts
Key([mod], "Tab", lazy.next_layout(), desc="Toggle between layouts"),
Key([mod, "shift", "control"], "h", lazy.layout.swap_column_left()),
Key([mod, "shift", "control"], "l", lazy.layout.swap_column_right()),
Key([mod, "shift"], "space", lazy.layout.flip()),
# Monitors
Key([mod], "w", lazy.to_screen(0), desc="Keyboard focus to monitor 1"),
Key([mod], "e", lazy.to_screen(1), desc="Keyboard focus to monitor 2"),
Key([mod], "period", lazy.next_screen(), desc="Move focus to next monitor"),
Key([mod], "comma", lazy.prev_screen(), desc="Move focus to prev monitor"),
]
| daddyhacker18/dotfiles-laptop | .config/qtile/modules/keys.py | keys.py | py | 3,211 | python | en | code | 0 | github-code | 36 |
8170098557 | import pickle
import numpy as np
import matplotlib.pyplot as plt
import os
#global a,b,c,aami
data_sys = './diadata/'
filelist=['data1.txt','data2.txt','data3.txt','data4.txt','data5.txt','data6.txt','data7.txt','data8.txt','data9.txt','data10.txt'
,'data11.txt','data12.txt','data13.txt','data14.txt','data15.txt','data16.txt','data17.txt','data18.txt','data19.txt','data20.txt','data21.txt'
, 'data22.txt','data23.txt','data24.txt','data25.txt','data26.txt','data27.txt','data28.txt','data29.txt','data30.txt']
a = 0
b = 0
c = 0
aami = 0
#for ii in range(len(filelist)):
filepath=os.path.join(data_sys+'data30.txt')
f=open(filepath,'rb+')
data_sys=f.read()
f.close()
data=pickle.loads(data_sys)
pred_sys=data["pred"]
real_sys=data["real"]
error = np.array(pred_sys).reshape(-1) - np.array(real_sys).reshape(-1)
num_5 = 0
num_10 = 0
num_15 = 0
for ii in error:
if abs(ii) <= 5:
num_5 += 1
for ii in error:
if abs(ii) <= 10:
num_10 += 1
for ii in error:
if abs(ii) <= 15:
num_15 += 1
num = len(error)
rate_5 = num_5 / num
rate_10 = num_10 / num
rate_15 = num_15 / num
#global a, b, c, aami
if rate_5 >= 0.65 and rate_10 >= 0.85 and rate_15 >= 0.95:
a += 1
elif rate_5 >= 0.5 and rate_10 >= 0.75 and rate_15 >= 0.90:
b += 1
elif rate_5 >= 0.4 and rate_10 >= 0.65 and rate_15 >= 0.85:
c += 1
mean_val = np.mean(error)
std_val = np.std(error)
if mean_val <= 5 and std_val <= 8:
aami += 1
print(a,b,c,aami)
| WangboML/BP_estimation | sys_process.py | sys_process.py | py | 1,484 | python | en | code | 2 | github-code | 36 |
38394545817 | import pandas as pd
#%%
#1a opción Rutas de importación y exportación
synergy_dataframe = pd.read_csv('synergy_logistics_database.csv',
index_col=0, encoding='utf-8', parse_dates=[4, 5])
#Definir apartados
combinaciones1 = synergy_dataframe.groupby(by=['direction', 'origin',
'destination','transport_mode'])
#Agregamos el apartado Total value, el cual nos servirá para filtrar las 10
#mejores rutas de importación y exportación
descripcion = combinaciones1.describe()['total_value']
#print (descripcion)
#Ordenamos con base en el número de exportaciones e importaciones realizadas
count = descripcion['count']
#Ordenaremos la serie de mayor a menor
count_sort = count.sort_values(ascending=False)
#Transformamos la serie a un dataframe
count_sort = count_sort.to_frame().reset_index()
#print (count_sort)
#%%
#2a opción Medio de transporte utilizado
synergy_dataframe2 = pd.read_csv('synergy_logistics_database.csv',
index_col=0, encoding='utf-8', parse_dates=[4, 5])
#Definimos apartados, si eliminamos 'direction' podremos ver un score general
#de los medios de transporte.
combinaciones2 = synergy_dataframe2.groupby(by=['direction',
'transport_mode'])
#Agregamos el apartado Total value para considerar el valor de
#las imp. y exp.
descripcion2=combinaciones2.describe()['total_value']
#Obtenemos el promedio
count2=descripcion2['mean']
count_sort2=count2.sort_values(ascending=False)
count_sort2=count_sort2.to_frame().reset_index()
#print (count_sort2)
#%%
#3a opción Valor total de importaciones y exportaciones
synergy_dataframe3 = pd.read_csv('synergy_logistics_database.csv',
index_col=0, encoding='utf-8', parse_dates=[4, 5])
#Definimos variables de imp. y exp.
exports=synergy_dataframe3[synergy_dataframe3['direction']=='Exports']
imports=synergy_dataframe3[synergy_dataframe3['direction']=='Imports']
#Función para obtener el porcentaje acumulado y países pertenecientes
#al 85%
def sol_3 (df,p):
total_value_origin=df.groupby('origin').sum()['total_value'].reset_index()
total_value_percent=total_value_origin['total_value'].sum()
total_value_origin['percent']=100*total_value_origin['total_value']/total_value_percent
total_value_origin.sort_values(by='percent',ascending=False,inplace=True)
total_value_origin['cumsum']=total_value_origin['percent'].cumsum()
lista=total_value_origin[total_value_origin['cumsum']<p]
return lista
res_exports=sol_3(exports, 85)
res_imports=sol_3(imports, 85)
#Se grafican en un pie chart acorde a su número de índice
plot=res_exports.plot.pie(y='total_value')
#print (plot)
plot2=res_imports.plot.pie(y='total_value')
#print (plot2)
#Definimos apartados y realizamos sumatoria por país incluyendo
#imp. y exp.
combinaciones3=(synergy_dataframe3.groupby(by=['origin'])
.sum().groupby(level=[0]).cumsum())
#print (combinaciones3)
plot3=combinaciones3.plot.pie(y='total_value')
#print (plot3) | ingridhuezo7/01-HUEZO-INGRID | ANALISIS_02_ HUEZO VAPNIK_INGRID/ANALISIS_02_ HUEZO VAPNIK_INGRID.py | ANALISIS_02_ HUEZO VAPNIK_INGRID.py | py | 3,198 | python | es | code | 0 | github-code | 36 |
4723479414 | # -*- coding: utf-8 -*-
"""
Custom scripts to plot DISCO profiles to a passed plot axis.
"""
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import os
import numpy as np
from utils.plotting_helpers import annotate_sig_buildup_points
from utils.wrangle_data import flatten_multicolumns, calculate_abs_buildup_params
# set default style params
plt.style.use(['science', 'discolib'])
plt.rcParams.update({'font.family': 'sans-serif'})
plt.rcParams.update({'font.size': 6})
# for use in more specific plotting
custom_colors = ['#377eb8', '#984ea3', '#ff7f00', '#e41a1c', '#f781bf',
'#ffff33', '#4daf4a', '#a65628', '#999999']
# must install LaTex before you can use Science Plots
os.environ["PATH"] += os.pathsep + '/Library/TeX/texbin'
def add_fingerprint_toax(df, ax, **kwargs):
'''Adds DISCO AF0 fingerprints of all binding protons for one polymer to the plot axis passed.
Parameters:
-----------
df: pandas.DataFrame
"replicate" raw data for one polymer (raw file containing all technical AF0 replicates for only the polymers binding protons)
ax: matplotlib axis
axis of the plot to contain the DISCO fingerprint
**kwargs: dict {"custom_palette": [list, of, colourcodes]} (optional, if desired)
custom plotting palette containing a unique color code for each proton
Returns:
--------
None, adds DISCO AF0 fingerprint to plot axis passed
Example:
-------
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> # read the per-replicate data table, containing information from only the binding protons in HPC
>>> hpc_df_replicates = pd.read_excel("../data/raw/stats_analysis_output_replicate_HPC_370k_20uM.xlsx", index_col=[0], header=[0]).reset_index()
>>> fig, ax = plt.subplots(figsize=(5, 5))
>>> add_fingerprint_toax(df=hpc_df_replicates, ax=ax)
>>> plt.show()
The binding AF0 fingerprint for HPC will appear on the passed plot axis.
'''
ppm = np.round(df.copy().groupby(by='proton_peak_index')['ppm'].mean(), 2)
ppm_ix = np.unique(df['proton_peak_index'].values)
# map ppm to proton peak index incase multi ppms per proton peak index
ppm_mapper = dict(zip(ppm_ix, ppm))
df['ppm'] = df['proton_peak_index'].map(ppm_mapper)
# remove duplicates from df
df_plot = df[['ppm', 'AFo']].drop_duplicates()
# take absolute value of AFo
df_plot['AFo'] = df_plot['AFo'].abs()
#create plot, use custom palette if there is one
try:
custom_palette = kwargs.pop("custom_palette")
sns.barplot(data=df_plot, x='ppm', y='AFo', ax=ax,
edgecolor='k',errcolor='black',palette=custom_palette)
sns.stripplot(data=df_plot, x='ppm', y='AFo', ax=ax, edgecolor='k',
linewidth=0.5, jitter=False, palette=custom_palette)
except KeyError: # executes if there is no custom palette
pass
sns.barplot(data=df_plot, x='ppm', y='AFo', ax=ax, edgecolor='k', errcolor='black')
sns.stripplot(data=df_plot, x='ppm', y='AFo', ax=ax,
edgecolor='k', linewidth=0.5, jitter=False)
# reformat x axis
ax.invert_xaxis() # invert to match NMR spectrum
return
def add_buildup_toax(df, ax):
'''Adds the Absolute DISCO Effect(t) buildup curves for all protons in
a polymer to an existing plot axis.
Parameters:
-----------
df: pandas.DataFrame
mean raw data file for one polymer, containing mean DISCO Effects(t)
(corr % attuenation) for /only/ the binding protons
ax: matplotlib axis
plot axis to contain the buildup curves
Returns:
--------
None, adds buildup curves to axis
Example:
--------
>>> import matplotlib.pyplot as plt
>>> import pandas as pd
>>> # read the mean data table, containing information from only the binding protons in HPC
>>> hpc_df = pd.read_excel("../data/raw/stats_analysis_output_mean_HPC_370k_20uM.xlsx", index_col=[0, 1, 2, 3], header=[0, 1]).reset_index()
>>> fig, ax = plt.subplots(figsize = (5,5))
>>> add_buildup_toax(df = hpc_df, ax = ax)
>>> plt.show()
The buildup curves for all binding peaks in HPC will appear on the plot axis.
'''
if type(df.columns) == pd.MultiIndex:
df = flatten_multicolumns(df) # make data indexable if it is not already
ppm_labels = np.round(df['ppm'], 2)
df_plot = df.copy()
groups = df_plot.groupby([ppm_labels])
# plot DISCO effect build up curve, absolute values
for ppm, group in groups:
sat_time, disco_effect, y1, y2 = calculate_abs_buildup_params(group)
ax.plot(sat_time, disco_effect, markeredgecolor='k', markeredgewidth=0.35,
marker='o', linestyle='', ms=5, label="%.2f" % ppm)
ax.fill_between(sat_time, y1, y2, alpha=0.25)
ax.legend(loc='lower right', title="Peak ($\delta$, ppm)")
ax.axhline(y=0.0, color="0.8", linestyle='dashed')
return
def add_overlaid_buildup_toax_customlabels(df_list, ax, **kwargs):
'''Adds buildup curve(s) to a plot. Allows control of individual proton buildup curves when designing a buildup curve plot,
for use cases where only specific subsets of buildup curves are desired to be plotted.
Parameters:
-----------
df_list: list
list of one "mean" raw dataframe for every polymer whose data is to be used in the plot
ax: matplotlib axis
location for the plot
**kwargs: dict
any desired custom plotting parameters
Notes:
------
* To add new custom properties, simply add another kwargs.pop statement below, and pass a new kwarg to the dict
Example:
-------
>>> import matplotlib.pyplot as plt
>>> import pandas as pd
>>> from utils.plotting_helpers import assemble_peak_buildup_df
>>> from utils.wrangle_data import generate_disco_effect_mean_diff_df, generate_subset_sattime_df
>>> # read data
>>> low_hpc_replicate_all = pd.read_excel("../data/raw/stats_analysis_output_replicate_all_HPC_80k_20uM.xlsx", index_col=[0], header=[0]).reset_index(drop=True)
>>> high_hpc_replicate_all = pd.read_excel("../data/raw/stats_analysis_output_replicate_all_HPC_370k_20uM.xlsx", index_col=[0], header=[0]).reset_index(drop=True)
>>> # grab the desired proton to plot by their proton peak index
>>> ppi_1_low = assemble_peak_buildup_df(low_hpc_replicate_all, 1)
>>> ppi_1_high = assemble_peak_buildup_df(high_hpc_replicate_all, 1)
>>> # compute change significance between proton buildup curves
>>> hpc_effect_size_df = generate_disco_effect_mean_diff_df(low_hpc_replicate_all, high_hpc_replicate_all)
>>> hpc_subset_sattime_df = generate_subset_sattime_df(hpc_effect_size_df, 0.25)
>>> kwargs = {"labels": ["80", "370"],
>>> "dx": 0.003,
>>> "dy": 0.010,
>>> "change_significance": hpc_effect_size_df,
>>> "annot_color": "#000000",
>>> "custom_colors": ['#b3cde3', '#377eb8']}
>>> # construct and display peak overlay plot w/ annotated change significance
>>> fig, ax = plt.subplots(figsize = (5,2))
>>> df_list = [ppi_1_low, ppi_1_high]
>>> add_overlaid_buildup_toax_customlabels(df_list, ax, **kwargs)
>>> plt.show()
'''
# extract custom properties
custom_labels = kwargs.pop("labels")
dx = kwargs.pop("dx")
dy = kwargs.pop("dy")
change_sig_df = kwargs.pop("change_significance")
buildup_colors = kwargs.pop("custom_colors")
annot_color = kwargs.pop("annot_color")
# plot overlaid buildups using the correct custom properties
color_count = 0
for ix, df in enumerate(df_list):
plot_label = custom_labels[ix]
if type(df.columns) == pd.MultiIndex:
df = flatten_multicolumns(df) # make data indexable if it is not already
ppm_labels = np.round(df['ppm'], 2)
df_plot = df.copy()
groups = df_plot.groupby([ppm_labels])
# plot DISCO effect build up curve, absolute values
for _, group in groups:
sat_time, disco_effect, y1, y2 = calculate_abs_buildup_params(group)
full_plot_label = f"{plot_label}"
ax.plot(sat_time, disco_effect, markeredgecolor='k', markeredgewidth=0.35, color=buildup_colors[color_count],
marker='o', linestyle='', ms=5, label=full_plot_label)
ax.fill_between(sat_time, y1, y2, color=buildup_colors[color_count],
alpha=0.25)
ax.axhline(y=0.0, color="0.8", linestyle='dashed')
color_count += 1
# annotate significance of change in disco effect (NOT disco adhesion interaction significance)
key = group['proton_peak_index'].unique()[0]
change_sig_subset = change_sig_df.loc[change_sig_df['proton_peak_index'] == key]
# annotate change sig points
significance = change_sig_subset['changed_significantly']
annotate_sig_buildup_points(ax, significance, sat_time, disco_effect, dx, dy, color=annot_color)
return
def add_difference_plot(df, ax, dy, **kwargs):
'''Add a proton-wise difference profile plot to a plot axis (Vertical Orientation).
Parameters:
-----------
df: Pandas.DataFrame
"subset_sattime_df" for a polymer comparison, contains change effect data computed only at
the desired sat time t for this plot
ax: matplotlib axis
plot destination
dy: float
y distance for significance marker away from datapoint
**kwargs: dict {"custom_colors":[list, with, one, custom, color, per, proton]} (optional)
Returns:
--------
None, adds difference profile to a plot
Theory Notes:
-------------
* Proton-wise difference profile plot shows the change effect size and significance of any changes in mean DISCO effect(t) between all
the protons from two polymer test groups. In this work, test groups were low mW vs high mW versions of the same polymer.
* Selected sat time for the difference plot is determined when the subset_sattime_df is computed. Lower sat times are more
representative as there is the least influence from rebinding.
Example:
--------
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from utils.plotting import add_difference_plot
>>> from utils.wrangle_data import generate_disco_effect_mean_diff_df, generate_subset_sattime_df
>>> # we use "replicate all" data as we are considering both binding and non-binding protons
>>> low_HPC = pd.read_excel("../data/raw/stats_analysis_output_replicate_all_HPC_80k_20uM.xlsx", index_col=[0], header=[0]).reset_index(drop=True)
>>> high_HPC = pd.read_excel("../data/raw/stats_analysis_output_replicate_all_HPC_370k_20uM.xlsx", index_col=[0], header=[0]).reset_index(drop=True)
>>> # first compute HPC's difference profile from the raw data
>>> hpc_effect_size_df = generate_disco_effect_mean_diff_df(low_HPC, high_HPC)
>>> # subset HPC's difference profile to the desired sat time Disco Effect(t = 0.25)
>>> hpc_subset_sattime_df = generate_subset_sattime_df(hpc_effect_size_df, 0.25)
>>> kwargs = {"custom_colors": ['#377eb8', '#984ea3', '#ff7f00', '#e41a1c', '#f781bf', '#ffff33', '#4daf4a', '#a65628', '#999999']}
>>> fig, ax = plt.subplots(figsize = (3,5))
>>> add_difference_plot(df=hpc_subset_sattime_df, dy= 0.010, ax=ax, **kwargs)
>>> plt.show()
'''
custom_colors = kwargs.pop("custom_colors")
plot_range = range(1, (df.shape[0])+1)
# zero line
ax.axvline(x=0.0, color="0.8", linestyle='dashed')
# error bars
ax.hlines(y=plot_range, xmin=df['effect_sem_lower'],
xmax=df['effect_sem_upper'], color='black', linewidth=2, zorder=1)
# data
ax.scatter(df['effect_size'], plot_range, s=(40,), color=custom_colors[:df.shape[0]],
alpha=1, label='Effect Size', marker='o', linewidths=0.35, edgecolors='k', zorder = 2)
ax.set_yticks(plot_range, np.round(df['ppm'].values, 2))
# annotate significance
df['annotation'] = df['changed_significantly'].map({True: "*", False: ""})
for ix, value in enumerate(list(plot_range)):
x = df['effect_size'].iloc[ix]
y = value + dy
marker = df['annotation'].iloc[ix]
ax.annotate(marker, (x, y), c='#000000')
return
def add_difference_plot_transposed(df, ax, dy, **kwargs):
'''Add a proton-wise difference profile plot to a plot axis (Horizontal Orientation).
Parameters:
-----------
df: Pandas.DataFrame
"subset_sattime_df" for a polymer comparison, contains change effect data computed only at
the desired sat time t for this plot
ax: matplotlib axis
plot destination
dy: float
y distance for significance marker away from datapoint
**kwargs: dict {"custom_colors":[list, with, one, custom, color, per, proton]} (optional)
Returns:
--------
None, adds difference profile to a plot
Theory Notes:
-------------
* Proton-wise difference profile plot shows the change effect size and significance of any changes in mean DISCO effect(t) between all
the protons from two polymer test groups. In this work, test groups were low mW vs high mW versions of the same polymer.
* Selected sat time for the difference plot is determined when the subset_sattime_df is computed.
Example:
--------
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from utils.plotting import add_difference_plot_transposed
>>> from utils.wrangle_data import generate_disco_effect_mean_diff_df, generate_subset_sattime_df
>>> # we use "replicate all" data table as we are considering both binding and non-binding protons
>>> low_HPC = pd.read_excel("../data/raw/stats_analysis_output_replicate_all_HPC_80k_20uM.xlsx", index_col=[0], header=[0]).reset_index(drop=True)
>>> high_HPC = pd.read_excel("../data/raw/stats_analysis_output_replicate_all_HPC_370k_20uM.xlsx", index_col=[0], header=[0]).reset_index(drop=True)
>>> # first compute HPC's difference profile from the raw data
>>> hpc_effect_size_df = generate_disco_effect_mean_diff_df(low_HPC, high_HPC)
>>> # subset HPC's difference profile to the desired sat time Disco Effect(t = 0.25)
>>> hpc_subset_sattime_df = generate_subset_sattime_df(hpc_effect_size_df, 0.25)
>>> kwargs = {"custom_colors": ['#377eb8', '#984ea3', '#ff7f00', '#e41a1c', '#f781bf', '#ffff33', '#4daf4a', '#a65628', '#999999']}
>>> fig, ax = plt.subplots(figsize=(3, 5))
>>> add_difference_plot_transposed(df=hpc_subset_sattime_df, dy=0.010, ax=ax, **kwargs)
>>> plt.show()
'''
custom_colors = kwargs.pop("custom_colors")
plot_domain = range(1, df.shape[0]+1)
# zero line
ax.axhline(y=0.0, color="0.8", linestyle='dashed')
# error bars
ax.vlines(x=plot_domain, ymin=df['effect_sem_lower'],
ymax=df['effect_sem_upper'], color='black', linewidth=2, zorder = 1)
# data
ax.scatter(plot_domain, df['effect_size'], s = (40,), color=custom_colors[:df.shape[0]],
alpha=1, label='effect size', marker = 'o', linewidths = 0.35, edgecolors = 'k', zorder = 2)
ax.set_xticks(plot_domain, np.round(df['ppm'].values,2))
# annotate significance
df['annotation'] = df['changed_significantly'].map({True: "*", False: ""})
for ix, value in enumerate(list(plot_domain)):
y = df['effect_size'].iloc[ix] + dy
x = value + 0.05
marker = df['annotation'].iloc[ix]
ax.annotate(marker, (x,y), c = '#000000')
return
| Frank-Gu-Lab/disco-figures-template | notebooks/utils/plotting.py | plotting.py | py | 15,812 | python | en | code | 2 | github-code | 36 |
15778610136 | import os
import sys
from typing import Callable
from utils.inputs import int_input
from utils.prints import Color, print_line, print_result
def get_exercices_count() -> int:
"""
Retourne le nombre d'exercices.
:return: Le nombre d'exercices.
:rtype: int
"""
# count the number of files with pattern "ex[number].py" in directory ../src
return len([name for name in os.listdir('src') if name.startswith('ex') and name.endswith('.py')])
def get_exercice(number: int) -> tuple[str, str, Callable[[], None]]:
"""
Retourne le tuple contenant le nom, la consigne et la fonction à exécuter.
:param number: Le numéro de l'exercice.
:type number: int
:return: Le tuple contenant le nom, la consigne et la fonction à exécuter.
:rtype: tuple[str, str, Callable[[], None]]
"""
file: object = __import__(f"src.ex{number}")
module: object = getattr(file, f"ex{number}")
function: Callable[[], None] = getattr(module, f"ex{number}")
return getattr(module, "__title__"), str(function.__doc__), function
def get_instructions_from_docstring(function: tuple[str, str, Callable[[], None]]) -> str:
"""
Retourne la consigne de l'exercice.
:param function: L'exercice.
:type function: tuple[str, str, Callable[[], None]]
:return: La consigne de l'exercice depuis le docstring.
:rtype: str
"""
return function[1].split(':return:')[0].strip().replace('\t', '')
def print_menu() -> None:
"""
Affiche le menu de sélection des exercices.
:return: None
:rtype: None
"""
print_line("Menu", color = Color.YELLOW)
exercices: list[tuple[str, str, Callable[[], None]]] = [get_exercice(i) for i in range(1, get_exercices_count() + 1)]
list_exercices: str = '\n'.join([f"Exercice {Color.cyan(i + 1)} - {Color.green(exercices[i][0])}" for i in range(len(exercices))])
list_exercices += Color.red('\n\nSTOP pour arrêter.')
print(list_exercices)
def menu() -> None:
"""
Lance le programme.
:return: None
:rtype: None
"""
print(Color.red('Bienvenue dans le menu du TP 3'))
while True:
print_menu()
try:
input1: int = int_input(Color.blue('Veuillez choisir un exercice : '), 1, lambda _: get_exercices_count(), True)
exercice: tuple[str, str, Callable[[], None]] = get_exercice(input1)
print(exercice)
print_line(f"Exercice n°{input1}", color = Color.YELLOW)
print_result(f"{get_instructions_from_docstring(exercice)}\n", Color.BLUE)
exercice[2]()
input(Color.cyan('Appuyez sur ENTRÉE pour continuer...'))
except:
print(Color.cyan('\nAu revoir :)'))
sys.exit(1)
if __name__ == '__main__':
menu()
| Ayfri/Python-TP3 | menu/menu.py | menu.py | py | 2,550 | python | fr | code | 0 | github-code | 36 |
15827646142 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import itertools
import logging
import pymongo
import arrow
import pandas as pd
import emission.storage.timeseries.timequery as estt
import emission.core.get_database as edb
import emission.core.wrapper.rawtrip as ecwrt
import emission.core.wrapper.entry as ecwe
import emission.core.wrapper.userinput as ecwui
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.timeseries.cache_series as estsc
import emission.storage.decorations.timeline as esdt
import emission.storage.decorations.analysis_timeseries_queries as esda
EPOCH_MINIMUM = 0
EPOCH_MAXIMUM = 2**31 - 1
# helpers for getting start/enter and end/exit times of a trip/place
begin_of = lambda te: te['data'].get('start_ts', te['data'].get('enter_ts'))
end_of = lambda te: te['data'].get('end_ts', te['data'].get('exit_ts'))
def get_raw_sections_for_trip(user_id, trip_id):
return get_sections_for_trip("segmentation/raw_section", user_id, trip_id)
def get_cleaned_sections_for_trip(user_id, trip_id):
return get_sections_for_trip("analysis/cleaned_section", user_id, trip_id)
def get_raw_stops_for_trip(user_id, trip_id):
return get_stops_for_trip("segmentation/raw_stop", user_id, trip_id)
def get_cleaned_stops_for_trip(user_id, trip_id):
return get_stops_for_trip("analysis/cleaned_stop", user_id, trip_id)
def get_raw_timeline_for_trip(user_id, trip_id):
"""
Get an ordered sequence of sections and stops corresponding to this trip.
"""
return esdt.Timeline(esda.RAW_STOP_KEY, esda.RAW_SECTION_KEY,
get_raw_stops_for_trip(user_id, trip_id),
get_raw_sections_for_trip(user_id, trip_id))
def get_cleaned_timeline_for_trip(user_id, trip_id):
"""
Get an ordered sequence of sections and stops corresponding to this trip.
"""
return esdt.Timeline(esda.CLEANED_STOP_KEY, esda.CLEANED_SECTION_KEY,
get_cleaned_stops_for_trip(user_id, trip_id),
get_cleaned_sections_for_trip(user_id, trip_id))
def get_sections_for_trip(key, user_id, trip_id):
# type: (UUID, object_id) -> list(sections)
"""
Get the set of sections that are children of this trip.
"""
query = {"user_id": user_id, "data.trip_id": trip_id,
"metadata.key": key}
logging.debug("About to execute query %s with sort_key %s" % (query, "data.start_ts"))
section_doc_cursor = edb.get_analysis_timeseries_db().find(query).sort(
"data.start_ts", pymongo.ASCENDING)
return [ecwe.Entry(doc) for doc in section_doc_cursor]
def get_stops_for_trip(key, user_id, trip_id):
"""
Get the set of sections that are children of this trip.
"""
query = {"user_id": user_id, "data.trip_id": trip_id,
"metadata.key": key}
logging.debug("About to execute query %s with sort_key %s" % (query, "data.enter_ts"))
stop_doc_cursor = edb.get_analysis_timeseries_db().find(query).sort(
"data.enter_ts", pymongo.ASCENDING)
return [ecwe.Entry(doc) for doc in stop_doc_cursor]
def _get_next_cleaned_timeline_entry(ts, tl_entry):
"""
Find the next trip or place in the timeline
"""
if ("end_place" in tl_entry.data):
return ts.get_entry_from_id(esda.CLEANED_PLACE_KEY, tl_entry.data.end_place)
elif ("starting_trip" in tl_entry.data):
starting_trip = ts.get_entry_from_id(esda.CLEANED_TRIP_KEY, tl_entry.data.starting_trip)
# if there is no cleaned trip, fall back to untracked time
if starting_trip is None:
logging.debug("Starting trip %s is not tracked, checking untracked time..." % tl_entry.data.starting_trip)
starting_trip = ts.get_entry_from_id(esda.CLEANED_UNTRACKED_KEY, tl_entry.data.starting_trip)
return starting_trip
else:
return None
def get_user_input_for_trip(trip_key, user_id, trip_id, user_input_key):
ts = esta.TimeSeries.get_time_series(user_id)
trip_obj = ts.get_entry_from_id(trip_key, trip_id)
return get_user_input_for_timeline_entry(ts, trip_obj, user_input_key)
# Additional checks to be consistent with the phone code
# www/js/diary/services.js
# Since that has been tested the most
# If we no longer need these checks (maybe with trip editing), we can remove them
def valid_user_input_for_timeline_entry(ts, tl_entry, user_input):
# we know that the trip is cleaned so we can use the fmt_time
# but the confirm objects are not necessarily filled out
fmt_ts = lambda ts, tz: arrow.get(ts).to(tz)
entry_start = begin_of(tl_entry)
entry_end = end_of(tl_entry)
if entry_start is None:
# a place will have no enter time if it is the first place in the timeline
# so we will set the start time as low as possible for the purpose of comparison
entry_start = EPOCH_MINIMUM
if entry_end is None:
# a place will have no exit time if the user hasn't left there yet
# so we will set the end time as high as possible for the purpose of comparison
entry_end = EPOCH_MAXIMUM
# logging.warn("Comparing user input %s (%s) of type %s: %s -> %s, trip of type %s %s (%s) -> %s (%s)" %
# (user_input.data.label, user_input.get_id(), user_input.metadata.key,
# fmt_ts(user_input.data.start_ts, user_input.metadata.time_zone),
# fmt_ts(user_input.data.end_ts, user_input.metadata.time_zone),
# tl_entry.get_id(),
# fmt_ts(entry_start, user_input.metadata.time_zone), entry_start,
# fmt_ts(entry_end, user_input.metadata.time_zone), entry_end))
logging.debug("Comparing user input %s: %s -> %s, trip %s -> %s, start checks are (%s && %s) and end checks are (%s || %s)" % (
user_input.data.label,
fmt_ts(user_input.data.start_ts, user_input.metadata.time_zone),
fmt_ts(user_input.data.end_ts, user_input.metadata.time_zone),
fmt_ts(entry_start, user_input.metadata.time_zone), fmt_ts(entry_end, user_input.metadata.time_zone),
(user_input.data.start_ts >= entry_start),
(user_input.data.start_ts < entry_end),
(user_input.data.end_ts <= entry_end),
((user_input.data.end_ts - entry_end) <= 15 * 60)
))
start_checks = (user_input.data.start_ts >= entry_start and
user_input.data.start_ts < entry_end)
end_checks = (user_input.data.end_ts <= entry_end or
((user_input.data.end_ts - entry_end) <= 15 * 60))
if start_checks and not end_checks:
logging.debug("Handling corner case where start check matches, but end check does not")
next_entry_obj = _get_next_cleaned_timeline_entry(ts, tl_entry)
if next_entry_obj is not None:
next_entry_end = end_of(next_entry_obj)
if next_entry_end is None: # the last place will not have an exit_ts
end_checks = True # so we will just skip the end check
else:
end_checks = user_input.data.end_ts <= next_entry_end
logging.debug("Second level of end checks when the next trip is defined (%s <= %s) = %s" % (
user_input.data.end_ts, next_entry_end, end_checks))
else:
end_checks = True
logging.debug("Second level of end checks when the next trip is not defined = %s" % end_checks)
if end_checks:
# If we have flipped the values, check to see that there is sufficient overlap
# https://github.com/e-mission/e-mission-docs/issues/476#issuecomment-747587041
overlapDuration = min(user_input.data.end_ts, entry_end) - max(user_input.data.start_ts, entry_start)
logging.debug("Flipped endCheck, overlap(%s)/trip(%s) = %s" %
(overlapDuration, tl_entry.data.duration, (overlapDuration / tl_entry.data.duration)));
end_checks = (overlapDuration/tl_entry.data.duration) > 0.5;
return start_checks and end_checks
def valid_user_input(ts, trip_obj):
def curried(user_input):
return valid_user_input_for_timeline_entry(ts, trip_obj, user_input)
return curried
def final_candidate(filter_fn, potential_candidates):
potential_candidate_objects = [ecwe.Entry(c) for c in potential_candidates]
extra_filtered_potential_candidates = list(filter(filter_fn, potential_candidate_objects))
if len(extra_filtered_potential_candidates) == 0:
return None
# In general, all candiates will have the same start_ts, so no point in
# sorting by it. Only exception to general rule is when user first provides
# input before the pipeline is run, and then overwrites after pipeline is
# run
sorted_pc = sorted(extra_filtered_potential_candidates, key=lambda c:c["metadata"]["write_ts"])
# for debug logs, we'll print out label if it exists; else use the start or enter time of the input
entry_detail = lambda c: getattr(c.data, "label", \
getattr(c.data, "start_fmt_time", \
getattr(c.data, "enter_fmt_time", None)))
logging.debug("sorted candidates are %s" %
[{"write_fmt_time": c.metadata.write_fmt_time, "detail": entry_detail(c)} for c in sorted_pc])
most_recent_entry = sorted_pc[-1]
logging.debug("most recent entry is %s, %s" %
(most_recent_entry.metadata.write_fmt_time, entry_detail(most_recent_entry)))
return most_recent_entry
def get_not_deleted_candidates(filter_fn, potential_candidates):
potential_candidate_objects = [ecwe.Entry(c) for c in potential_candidates]
extra_filtered_potential_candidates = list(filter(filter_fn, potential_candidate_objects))
if len(extra_filtered_potential_candidates) == 0:
logging.debug(f"in get_not_deleted_candidates, no candidates, returning []")
return []
# We want to retain all ACTIVE entries that have not been DELETED
all_active_list = [efpc for efpc in extra_filtered_potential_candidates if "status" not in efpc.data or efpc.data.status == ecwui.InputStatus.ACTIVE]
all_deleted_id = [efpc["data"]["match_id"] for efpc in extra_filtered_potential_candidates if "status" in efpc.data and efpc.data.status == ecwui.InputStatus.DELETED]
# TODO: Replace this with filter and a lambda if we decide not to match by ID after all
not_deleted_active = [efpc for efpc in all_active_list if efpc["data"]["match_id"] not in all_deleted_id]
logging.info(f"Found {len(all_active_list)} active entries, {len(all_deleted_id)} deleted entries -> {len(not_deleted_active)} non deleted active entries")
return not_deleted_active
def get_time_query_for_timeline_entry(timeline_entry, force_start_end=True):
begin_of_entry = begin_of(timeline_entry)
end_of_entry = end_of(timeline_entry)
inferred_time_type = lambda timeline_entry: "data.start_ts" if "start_ts" in timeline_entry.data else "data.enter_ts"
timeType = "data.start_ts" if force_start_end else inferred_time_type
if begin_of_entry is None:
# a place will have no enter time if it is the first place in the timeline
# so we will set the start time as low as possible for the purpose of comparison
entry_start = EPOCH_MINIMUM
if end_of_entry is None:
# the last place (user's current place) will not have an exit_ts, so
# every input from its enter_ts onward is fair game
end_of_entry = EPOCH_MAXIMUM
return estt.TimeQuery(timeType, begin_of_entry, end_of_entry)
def get_user_input_for_timeline_entry(ts, timeline_entry, user_input_key):
# When we start supporting user inputs for places, we need to decide whether they will have
# start/end or enter/exit. Depending on the decision, we can either remove support for
# force_start_end (since we always use start/end) or pass in False (so we
# use start/end or enter/exit appropriately)
tq = get_time_query_for_timeline_entry(timeline_entry)
potential_candidates = ts.find_entries([user_input_key], tq)
return final_candidate(valid_user_input(ts, timeline_entry), potential_candidates)
# This is almost an exact copy of get_user_input_for_trip_object, but it
# retrieves an interable instead of a dataframe. So almost everything is
# different and it is hard to unify the implementations. Switching the existing
# function from get_data_df to find_entries may help us unify in the future
def get_user_input_from_cache_series(user_id, timeline_entry, user_input_key):
# When we start supporting user inputs for places, we need to decide whether they will have
# start/end or enter/exit. Depending on the decision, we can either remove support for
# force_start_end (since we always use start/end) or pass in False (so we
# use start/end or enter/exit appropriately)
ts = esta.TimeSeries.get_time_series(user_id)
tq = get_time_query_for_timeline_entry(timeline_entry)
potential_candidates = estsc.find_entries(user_id, [user_input_key], tq)
return final_candidate(valid_user_input(ts, timeline_entry), potential_candidates)
def get_additions_for_timeline_entry_object(ts, timeline_entry):
addition_keys = ["manual/trip_addition_input", "manual/place_addition_input"]
# This should always be start/end
# https://github.com/e-mission/e-mission-docs/issues/880#issuecomment-1509875714
tq = get_time_query_for_timeline_entry(timeline_entry)
potential_candidates = ts.find_entries(addition_keys, tq)
return get_not_deleted_candidates(valid_user_input(ts, timeline_entry), potential_candidates)
def valid_timeline_entry(ts, user_input):
def curried(confirmed_obj):
return valid_user_input_for_timeline_entry(ts, confirmed_obj, user_input)
return curried
# Find the trip or place that the user input belongs to to
def get_confirmed_obj_for_user_input_obj(ts, ui_obj):
# the match check that we have is:
# user input can start after trip/place start
# user input can end before trip/place end OR within 15 minutes after
# Given those considerations, there is no principled query for trip/place data
# that fits into our query model
# the trip/place start is before the user input start, but that can go until eternity
# and the trip/place end can be either before or after the user input end
# we know that the trip/place end is after the user input start, but again, that
# can go on until now.
# As a workaround, let us assume that the trip/place start is no more than a day
# before the start of the ui object, which seems like a fairly conservative
# assumption
ONE_DAY = 24 * 60 * 60
tq = estt.TimeQuery("data.start_ts", ui_obj.data.start_ts - ONE_DAY,
ui_obj.data.start_ts + ONE_DAY)
# iff the input's key is one of these, the input belongs on a place
# all other keys are only used for trip inputs
place_keys = ["manual/place_user_input", "manual/place_addition_input"]
if ui_obj['metadata']['key'] in place_keys:
# if place, we'll query the same time range, but with 'enter_ts'
tq.timeType = "data.enter_ts"
potential_candidates = ts.find_entries(["analysis/confirmed_place"], tq)
else:
potential_candidates = ts.find_entries(["analysis/confirmed_trip"], tq)
return final_candidate(valid_timeline_entry(ts, ui_obj), potential_candidates)
def filter_labeled_trips(mixed_trip_df):
"""
mixed_trip_df: a dataframe with mixed labeled and unlabeled entries
Returns only the labeled entries
"""
if len(mixed_trip_df) == 0:
return mixed_trip_df
labeled_ct = mixed_trip_df[mixed_trip_df.user_input != {}]
logging.debug("After filtering, found %s labeled trips" % len(labeled_ct))
logging.debug(labeled_ct.head())
return labeled_ct
def expand_userinputs(labeled_ct):
"""
labeled_ct: a dataframe that contains potentially mixed trips.
Returns a dataframe with the labels expanded into the main dataframe
If the labels are simple, single level kv pairs (e.g. {mode_confirm:
bike}), the expanded columns can be indexed very simply, like the other
columns in the dataframe. Trips without labels are represented by N/A
TODO: Replace by pandas.io.json.json_normalize?
TODO: Currently duplicated from
https://github.com/e-mission/em-public-dashboard/blob/main/viz_scripts/scaffolding.py
Should remove it from there
"""
if len(labeled_ct) == 0:
return labeled_ct
label_only = pd.DataFrame(labeled_ct.user_input.to_list(), index=labeled_ct.index)
logging.debug(label_only.head())
expanded_ct = pd.concat([labeled_ct, label_only], axis=1)
assert len(expanded_ct) == len(labeled_ct), \
("Mismatch after expanding labels, expanded_ct.rows = %s != labeled_ct.columns %s" %
(len(expanded_ct), len(labeled_ct)))
logging.debug("After expanding, columns went from %s -> %s" %
(len(labeled_ct.columns), len(expanded_ct.columns)))
logging.debug(expanded_ct.head())
return expanded_ct
def has_final_labels(confirmed_trip_data):
return (confirmed_trip_data["user_input"] != {}
or confirmed_trip_data["expectation"]["to_label"] == False)
# Create an alternate method to work on the dataframe column-wise
# instead of iterating over each individual row for improved performance
def has_final_labels_df(df):
# print(df.expectation)
# print(pd.DataFrame(df.expectation.to_list(), index=df.index))
to_list_series = pd.DataFrame(df.expectation.to_list(), index=df.index).to_label
return df[(df.user_input != {})
| (to_list_series == False)]
def get_max_prob_label(inferred_label_list):
# Two columns: "labels" and "p"
label_prob_df = pd.DataFrame(inferred_label_list)
# logging.debug(label_prob_df)
# idxmax returns the index corresponding to the max data value in each column
max_p_idx = label_prob_df.p.idxmax()
# logging.debug(max_p_idx)
# now we look up the labels for that index
return label_prob_df.loc[max_p_idx].labels
def expand_finallabels(labeled_ct):
"""
labeled_ct: a dataframe that contains potentially mixed trips.
Returns a dataframe with the user input labels and the high confidence
inferred labels expanded into the main dataframe. If the labels are
simple, single level kv pairs (e.g. {mode_confirm: bike}), the expanded columns
can be indexed very simply, like the other columns in the dataframe. Trips
without labels are represented by N/A
"""
if len(labeled_ct) == 0:
return labeled_ct
user_input_only = pd.DataFrame(labeled_ct.user_input.to_list(), index=labeled_ct.index)
# Drop entries that are blank so we don't end up with duplicate entries in the concatenated dataframe.
# without this change, concat might involve N/A rows from user entries,
# inserted because the index is specified manually
# then if they have high confidence entries, we will end up with
# duplicated entries for N/A and the yellow labels
user_input_only.dropna('index', how="all", inplace=True)
logging.debug("user_input_only %s" % user_input_only.head())
# see testExpandFinalLabelsPandasFunctionsNestedPostFilter for a step by step
# walkthrough of how this section works. Note that
# testExpandFinalLabelsPandasFunctionsNestedPostFilter has an alternate
# implementation that we don't choose because it generates a UserWarning
# Note that we could have entries that have both user inputs and high
# confidence inferred values. This could happen if the user chooses to go
# into "All Labels" and label high-confidence values. That's why the
# algorithm
# https://github.com/e-mission/e-mission-docs/issues/688#issuecomment-1000981037
# specifies that we look for inferred values only if the user input does
# not exist
expectation_expansion = pd.DataFrame(labeled_ct.expectation.to_list(), index=labeled_ct.index)
high_confidence_no_userinput_df = labeled_ct[
(labeled_ct.user_input == {}) & (expectation_expansion.to_label == False)
]
high_confidence_no_userinput_df.dropna('index', how="all", inplace=True)
if len(high_confidence_no_userinput_df) > 0:
high_confidence_inferred_labels = high_confidence_no_userinput_df.inferred_labels
high_confidence_max_p_inferred_labels = high_confidence_inferred_labels.apply(get_max_prob_label)
high_confidence_max_p_inferred_labels_only = pd.DataFrame(
high_confidence_max_p_inferred_labels.to_list(),
index=high_confidence_inferred_labels.index)
logging.debug("high confidence inferred %s" % high_confidence_max_p_inferred_labels_only.head())
assert pd.Series(labeled_ct.loc[
high_confidence_max_p_inferred_labels_only.index].user_input == {}).all(), \
("Did not filter out all user inputs before expanding high confidence labels %s" %
labeled_ct.loc[high_confidence_max_p_inferred_labels_only.index].user_input)
else:
high_confidence_max_p_inferred_labels_only = pd.DataFrame()
# see testExpandFinalLabelsPandasFunctions for a step by step walkthrough of this section
naive_concat = pd.concat([user_input_only, high_confidence_max_p_inferred_labels_only], axis=0)
# print(naive_concat)
label_only = naive_concat.reindex(labeled_ct.index)
expanded_ct = pd.concat([labeled_ct, label_only], axis=1)
assert len(expanded_ct) == len(labeled_ct), \
("Mismatch after expanding labels, expanded_ct.rows = %s != labeled_ct.columns %s" %
(len(expanded_ct), len(labeled_ct)))
logging.debug("After expanding, columns went from %s -> %s" %
(len(labeled_ct.columns), len(expanded_ct.columns)))
logging.debug(expanded_ct.head())
return expanded_ct
| e-mission/e-mission-server | emission/storage/decorations/trip_queries.py | trip_queries.py | py | 22,026 | python | en | code | 22 | github-code | 36 |
14156669347 | import json
import ptvsd
import sys
import requests
import boto3
ptvsd.enable_attach(address=('0.0.0.0', 5678), redirect_output=True)
print("waiting for debugger to attach...")
sys.stdout.flush()
ptvsd.wait_for_attach()
print("attached")
# import requests
def create_dynamo_table(event, context, table_name_value, enable_streams=False, read_capacity=1, write_capacity=1,region='us-west-2'):
table_name = table_name_value
print('creating table: ' + table_name)
responseObject = {}
responseObject['headers'] = {}
responseObject['headers']['content-type'] = 'application/json'
try:
client = boto3.client(service_name='dynamodb', region_name=region)
table = (client.create_table(TableName=table_name,
AttributeDefinitions=[
{'AttributeName': 'EventId', 'AttributeType': 'S'},
{'AttributeName': 'EventDay', 'AttributeType': 'N' }
],
KeySchema=[{'AttributeName': 'EventId', 'KeyType': 'HASH'},
{'AttributeName': 'EventDay','KeyType': 'RANGE'},
],
ProvisionedThroughput={'ReadCapacityUnits': read_capacity, 'WriteCapacityUnits': write_capacity}))
print(table)
responseObject['statusCode'] = 200
responseObject['body'] = json.dumps(table)
except Exception as e:
print(str(type(e)))
print(e.__doc__)
responseObject['statusCode'] = 500
responseObject['body'] = json.dumps(e.__doc__)
return responseObject
def create_handler(event, context):
table_name = 'user-visits'
return create_dynamo_table(event, context, table_name, False, 1, 1)
| tclarkston/debug-lambda | dynamo_db/app.py | app.py | py | 1,888 | python | en | code | 0 | github-code | 36 |
39623165565 | #
# Day 3: Rucksack Reorganization
# https://adventofcode.com/2022/day/3
#
import string
def calculate(lines: list[str]):
SCORE_KEY = list(string.ascii_lowercase) + list(string.ascii_uppercase)
score = 0
for i in range(0, len(lines), 3):
sack1 = set(lines[i].strip())
sack2 = set(lines[i + 1].strip())
sack3 = set(lines[i + 2].strip())
common = (sack1 & sack2 & sack3).pop()
score += SCORE_KEY.index(common) + 1
return score
with open("input.txt") as f:
lines = f.readlines()
print(calculate(lines))
| jeffharrington/advent-of-code-2022 | day03/day3b.py | day3b.py | py | 564 | python | en | code | 0 | github-code | 36 |
37454105581 | """Console script for bgcflow."""
import subprocess
import sys
from pathlib import Path
import click
import bgcflow
from bgcflow.bgcflow import cloner, deployer, get_all_rules, snakemake_wrapper
from bgcflow.mkdocs import generate_mkdocs_report
from bgcflow.projects_util import copy_final_output, projects_util
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version=bgcflow.__version__)
def main():
"""
A snakemake wrapper and utility tools for BGCFlow (https://github.com/NBChub/bgcflow)
"""
pass
@main.command()
@click.argument("destination")
@click.option("--branch", default="main", help="BGCFlow branch/release to use")
def deploy(**kwargs):
"""
[EXPERIMENTAL] Deploy BGCFlow locally using snakedeploy.
DESTINATION: path to deploy BGCFlow
"""
deployer(**kwargs)
@main.command()
@click.argument("destination")
@click.option(
"--branch",
default="main",
help="BGCFlow branch. (DEFAULT: `dev-snakemake-wrapper`)",
)
def clone(**kwargs):
"""
Use git to clone BGCFlow to local directory.
DESTINATION: path to clone BGCFlow
BRANCH: BGCFlow branch to clone. Currently using development branch: `dev-snakemake-wrapper`
"""
cloner(**kwargs)
@main.command()
@click.option(
"--bgcflow_dir",
default=".",
help="Location of BGCFlow directory. (DEFAULT: Current working directory.)",
)
@click.option(
"--snakefile",
default="workflow/Snakefile",
help="Location of the Snakefile relative to BGCFlow directory. (DEFAULT: workflow/Snakefile)",
)
@click.option(
"--wms-monitor",
default="http://127.0.0.1:5000",
help="Panoptes address. (DEFAULT: http://127.0.0.1:5000)",
)
@click.option(
"-c",
"--cores",
default=8,
help="Use at most N CPU cores/jobs in parallel. (DEFAULT: 8)",
)
@click.option("-n", "--dryrun", is_flag=True, help="Test run.")
@click.option(
"-t",
"--touch",
is_flag=True,
help="Touch output files (mark them up to date without really changing them).",
)
def run(**kwargs):
"""
A snakemake CLI wrapper to run BGCFlow. Automatically run panoptes.
"""
snakemake_wrapper(**kwargs)
@main.command()
@click.option(
"--bgcflow_dir",
default=".",
help="Location of BGCFlow directory. (DEFAULT: Current working directory)",
)
@click.option("--describe", help="Get description of a given rule.")
@click.option("--cite", help="Get citation of a given rule.")
def rules(**kwargs):
"""
Get description of available rules from BGCFlow.
"""
get_all_rules(**kwargs)
@main.command()
@click.option(
"--bgcflow_dir",
default=".",
help="Location of BGCFlow directory. (DEFAULT: Current working directory)",
)
@click.option(
"--project",
help="Initiate a new BGCFlow project. Insert project name: `bgcflow init --project <TEXT>`",
)
@click.option(
"--use_own_rules",
is_flag=True,
help="Generate rule selection template in PEP file instead of using Global rules. Use with `--project` option.",
)
@click.option(
"--prokka_db", help="Path to custom reference file. Use with `--project` option."
)
@click.option(
"--gtdb_tax", help="Path to custom taxonomy file. Use with `--project` option."
)
@click.option(
"--samples_csv", help="Path to samples file. Use with `--project` option."
)
def init(**kwargs):
"""
Create projects or initiate BGCFlow config. Use --project to create a new BGCFlow project.
Usage:
bgcflow init --> check current directory for existing config dir. If not found, generate from template.
bgcflow init --project <TEXT> --> generate a new BGCFlow project in the config directory.
"""
try:
projects_util(**kwargs)
except FileNotFoundError as e:
click.echo(
"ERROR: Cannot find BGCFlow directory.\nPoint to the right directory using `--bgcflow_dir <destination>` or clone BGCFlow using `bgcflow clone <destination>`"
)
print(e)
@main.command()
@click.argument("project")
@click.option("--copy", help="Destination path to copy results.")
@click.option(
"--bgcflow_dir",
default=".",
help="Location of BGCFlow directory. (DEFAULT: Current working directory)",
)
# @click.option('--tree', is_flag=True, help='Show output directory tree structure of a given project.)')
def get_result(**kwargs):
"""
View a tree of a project results or get a copy using Rsync.
PROJECT: project name
"""
if kwargs["copy"] is None:
project_dir = (
Path(kwargs["bgcflow_dir"]) / f"data/processed/{kwargs['project']}"
)
subprocess.call(["tree", "-L", "2", project_dir])
else:
copy_final_output(**kwargs)
@main.command()
@click.option("--port", default=8001, help="Port to use. (DEFAULT: 8001)")
@click.option(
"--file_server",
default="http://localhost:8002",
help="Port to use for fileserver. (DEFAULT: http://localhost:8002)",
)
@click.option(
"--bgcflow_dir",
default=".",
help="Location of BGCFlow directory. (DEFAULT: Current working directory)",
)
@click.option("--project", help="Name of the project. (DEFAULT: all)")
def serve(**kwargs):
"""
Generate static HTML report for BGCFlow run(s)
"""
if kwargs["project"] is None:
click.echo(
"Use `bgcflow serve --project <project name>` to generate report for each project.\nTo see Snakemake run summary, use `bgcflow serve --project snakemake_report`."
)
elif kwargs["project"] == "snakemake_report":
output_dir = Path(kwargs["bgcflow_dir"]) / "data"
workflow_dir = Path(kwargs["bgcflow_dir"]) / "workflow"
assert (
output_dir.is_dir()
), "ERROR: Cannot find BGCFlow directory. Use --bgcflow_dir to set the right location."
subprocess.call(
f"(cd {workflow_dir.parent.resolve()} && snakemake --report index.html)",
shell=True,
)
subprocess.call(
f"(cd {workflow_dir.resolve()} && jupyter nbconvert --execute --to html --output {output_dir.resolve()}/processed/index.html {workflow_dir.resolve()}/notebook/99-entry_point.ipynb --no-input --template classic)",
shell=True,
)
subprocess.call(
[
"python",
"-m",
"http.server",
"--directory",
kwargs["bgcflow_dir"],
str(kwargs["port"]),
]
)
else:
bgcflow_dir = kwargs["bgcflow_dir"]
project_name = kwargs["project"]
port_id = kwargs["port"]
file_server = kwargs["file_server"]
generate_mkdocs_report(
bgcflow_dir, project_name, port_id, file_server, ipynb=False
)
@click.option(
"--bgcflow_dir",
default=".",
help="Location of BGCFlow directory. (DEFAULT: Current working directory.)",
)
@click.option(
"-c",
"--cores",
default=8,
help="Use at most N CPU cores/jobs in parallel. (DEFAULT: 8)",
)
@click.option("-n", "--dryrun", is_flag=True, help="Test run.")
@main.command()
def build(**kwargs):
"""
Use DBT to build DuckDB database from BGCFlow results.
"""
dryrun = ""
bgcflow_dir = Path(kwargs["bgcflow_dir"])
if kwargs["dryrun"]:
dryrun = "--dryrun"
subprocess.call(
f"cd {bgcflow_dir.resolve()} && snakemake --use-conda -c {kwargs['cores']} --snakefile workflow/Database --keep-going {dryrun}",
shell=True,
)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| matinnuhamunada/bgcflow_wrapper | src/bgcflow/cli.py | cli.py | py | 7,639 | python | en | code | 2 | github-code | 36 |
40572434380 | import setuptools
import version
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="uncertainty-wizard",
version=version.RELEASE,
author="Michael Weiss",
author_email="code@mweiss.ch",
description="Quick access to uncertainty and confidence of Keras networks.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/testingautomated-usi/uncertainty_wizard",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| testingautomated-usi/uncertainty-wizard | setup.py | setup.py | py | 838 | python | en | code | 41 | github-code | 36 |
11113762870 | # -*-coding:utf-8-*-
"""
File Name: mouse_response_and_type_conversion.py
Program IDE: PyCharm
Date: 10:04
Create File By Author: Hong
"""
import cv2 as cv
import numpy as np
# 在图像上画矩形框
x1 = -1
y1 = -1
x2 = -1
y2 = -1
# canvas = np.zeros((300, 300, 3), dtype=np.uint8)
canvas = cv.imread('images/2.png', cv.IMREAD_COLOR)
img = np.copy(canvas)
# 回调,系统调用回调函数解决你的问题
# 鼠标响应回调函数,参数固定;对应鼠标事件、横坐标、纵坐标、flags和其他参数
def mouse_drawing(event, x, y, flags, param):
# print(x, y)
global x1, y1, x2, y2
if event == cv.EVENT_LBUTTONDOWN:
x1 = x
y1 = y
if event == cv.EVENT_MOUSEMOVE:
if x1 < 0 or y1 < 0:
return
x2 = x
y2 = y
dx = x2 - x1
dy = y2 - y1
if dx > 0 and dy > 0:
# 擦除重叠
# canvas[:, :] = 0
canvas[:, :, :] = img[:, :, :]
cv.rectangle(canvas, (x1, y1), (x2, y2), (255, 0, 0), 2, 8, 0)
if event == cv.EVENT_LBUTTONUP:
x2 = x
y2 = y
dx = x2 - x1
dy = y2 - y1
if dx > 0 and dy > 0:
# canvas[:, :] = 0
canvas[:, :, :] = img[:, :, :]
cv.rectangle(canvas, (x1, y1), (x2, y2), (255, 0, 0), 2, 8, 0)
x1 = -1
y1 = -1
x2 = -1
y2 = -1
def mouse_response():
cv.namedWindow('Mouse Response', cv.WINDOW_AUTOSIZE)
# 再某个窗口上设置鼠标响应函数
cv.setMouseCallback('Mouse Response', mouse_drawing)
while True:
cv.imshow('Mouse Response', canvas)
c = cv.waitKey(1)
if c == 27:
break
cv.destroyAllWindows()
# 图像像素类型转换和归一化
def pixel_normalization(image_path: str):
img = cv.imread(image_path, cv.IMREAD_COLOR)
# 可以显示整数和浮点数像素图片
# 图像归一化
# 方法1
# print(img/255.0)
cv.imshow('input', img / 255.0)
# 方法2
result = np.zeros_like(np.float32(img))
cv.normalize(img, result, 0, 1, cv.NORM_MINMAX, dtype=cv.CV_32F)
print(result)
cv.imshow('result', result)
cv.waitKey(0)
cv.destroyAllWindows()
if __name__ == '__main__':
path = 'images/2.png'
mouse_response()
# pixel_normalization(path)
| YouthJourney/Computer-Vision-OpenCV | mouse_response_and_type_conversion.py | mouse_response_and_type_conversion.py | py | 2,441 | python | en | code | 21 | github-code | 36 |
39829558439 | import numpy as np
import copy
import json
import imageio
import math
import os
import functools
import torch
from torch import nn
from torch.utils.data import Dataset
# Code based on:
# https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
# Expects tuples of (state, next_state, action, reward, done)
class ReplayBuffer(object):
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
def add(self, data):
if len(self.storage) == self.max_size:
self.storage.pop(0)
self.storage.append(data)
# @profile
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
# X, Y, U, R, D = self.storage[ind[0]]
# x = np.empty((batch_size, *X.shape))
# y = np.empty((batch_size, *Y.shape))
# u = np.empty((batch_size, *U.shape))
# r = np.empty((batch_size, 1))
# d = np.empty((batch_size, 1))
# for i, index in enumerate(ind):
# X, Y, U, R, D = self.storage[index]
# x[i] = X
# y[i] = Y
# u[i] = U
# r[i] = R
# d[i] = D
# result = (x, y, u, r, d)
x, y, u, r, d = [], [], [], [], []
for i in ind:
X, Y, U, R, D = self.storage[i]
x.append(np.array(X, copy=False))
y.append(np.array(Y, copy=False))
u.append(np.array(U, copy=False))
r.append(np.array(R, copy=False))
d.append(np.array(D, copy=False))
# import ipdb; ipdb.set_trace()
result = (np.stack(x),
np.stack(y),
np.stack(u),
np.stack(r).reshape(-1, 1),
np.stack(d).reshape(-1, 1))
return result
def __len__(self):
return len(self.storage)
def sample_seq(self, batch_size, seq_len):
ind = np.random.randint(0, len(self.storage) - seq_len + 1, size=batch_size)
x, y, u, r, d = [], [], [], [], []
for i in ind:
transition_sequence = self.storage[i:i+seq_len]
# take the sequence [(xyurd), (xyurd), (xyurd), (xyurd)]
# and turn it into [(xxxx), (yyyy), (uuuu), (rrrr), (dddd)]
X, Y, U, R, D = list(zip(*transition_sequence))
x.append(np.array(X, copy=False))
y.append(np.array(Y, copy=False))
u.append(np.array(U, copy=False))
r.append(np.array(R, copy=False))
# import ipdb; ipdb.set_trace()
d.append(np.array(D, copy=False))
# import ipdb; ipdb.set_trace()
result = (np.stack(x),
np.stack(y),
np.stack(u),
np.stack(r).reshape(batch_size, seq_len),
np.stack(d).reshape(batch_size, seq_len))
return result
class ReplayDataset(Dataset):
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
def add(self, data):
if len(self.storage) == self.max_size:
self.storage.pop(0)
self.storage.append((
data[0].astype('float32'),
data[1].astype('float32'),
data[2].astype('float32'),
data[3],
data[4]))
def save(self, path):
os.makedirs(path, exist_ok=True)
stacked_arrays = [np.stack([d[i] for d in self.storage]) for i in range(5)]
manifest = {
'type': 'float32',
'shapes': [array.shape for array in stacked_arrays]
}
torch.save(manifest, "{}/manifest.pt".format(path))
saved_arrays = [np.memmap("{}/{}.npmm".format(path, i), dtype='float32', mode='write', shape=stacked_arrays[i].shape)
for i in range(5)]
for saved_array, stacked_array in zip(saved_arrays, stacked_arrays):
saved_array[:] = stacked_array[:]
saved_array.flush()
def load(self, path):
manifest = torch.load("{}/manifest.pt".format(path))
saved_arrays = [np.memmap("{}/{}.npmm".format(path, i), mode='r', dtype=manifest['type'], shape=manifest['shapes'][i])
for i in range(5)]
self.storage = [tuple((np.array(saved_array[i]) for saved_array in saved_arrays))
for i in range(saved_arrays[0].shape[0])]
self.storage = list(self.storage)
def __len__(self):
return len(self.storage)
def __getitem__(self, i):
return self.storage[i]
class DiskReplayDataset(Dataset):
def __init__(self, path, max_size=1e6):
self.storages = None
self.path = path
self.max_size = int(max_size)
self.pointer = 0
# self.size = 0
def create_disk_arrays(self, data):
def type_for(d):
d_type = np.array(d).dtype
if 'float' in str(d_type): d_type = 'float32'
return d_type
def shape_for(d): return (self.max_size,) + d.shape if isinstance(d, np.ndarray) else (self.max_size,)
types = [type_for(d) for d in data]
shapes = [shape_for(d) for d in data]
os.makedirs(self.path, exist_ok=True)
manifest = {
'types': types,
'shapes': shapes
}
torch.save(manifest, "{}/manifest.pt".format(self.path))
self.storages = [np.memmap("{}/{}.npmm".format(self.path, i), dtype=types[i], mode='write', shape=shapes[i])
for i in range(len(data))]
def add(self, data):
if self.storages is None:
def shape(d): return d.shape if isinstance(d, np.ndarray) else (1,)
# import ipdb; ipdb.set_trace()
sizes = tuple([(self.max_size,) + shape(data_elem) for data_elem in data])
self.create_disk_arrays(data)
address = self.pointer % self.max_size
for storage, data_elem in zip(self.storages, data):
storage[address] = data_elem
self.pointer += 1
def save(self, path):
manifest = {
'types': [s.dtype for s in self.storages],
'shapes': [s.shape for s in self.storages],
'pointer': self.pointer,
}
torch.save(manifest, "{}/manifest.pt".format(self.path))
for storage in self.storages:
storage.flush()
def load(self, path):
manifest = torch.load("{}/manifest.pt".format(path))
self.storages = [np.memmap("{}/{}.npmm".format(path, i), mode='r',
dtype=manifest['types'][i],
shape=manifest['shapes'][i])
for i in range(5)]
self.pointer = manifest['pointer']
def __len__(self):
return min(self.max_size, self.pointer)
def __getitem__(self, i):
return tuple((np.array(storage[i]) for storage in self.storages))
class EmbeddedReplayDataset(Dataset):
def __init__(self, max_size=1e6, traj_len=4):
self.storage = []
self.max_size = max_size
self.traj_len = traj_len
def add(self, data):
if len(self.storage) == self.max_size:
self.storage.pop(0)
self.storage.append((
data[0].astype('float32'),
data[1].astype('float32'),
data[2].astype('float32'),
data[3].astype('float32'),
data[4],
data[5],
data[6]))
def __len__(self):
return len(self.storage) - self.traj_len
def __getitem__(self, i):
transition_sequence = self.storage[i:i+self.traj_len]
# take the sequence [(xyurd), (xyurd), (xyurd), (xyurd)]
# and turn it into [(xxxx), (yyyy), (uuuu), (rrrr), (dddd)]
X, Y, U, E, I, R, D = list(zip(*transition_sequence))
result = (
np.array(X, copy=False),
np.array(Y, copy=False),
np.array(U, copy=False),
np.array(E, copy=False),
np.array(I, copy=False),#.reshape(-1, 1),
np.array(R, copy=False),#.reshape(-1, 1),
np.array(D, copy=False))#.reshape(-1, 1))
# for r in result: print(r.shape)
# import ipdb; ipdb.set_trace()
return result
# Expects tuples of (state, next_state, action, embedded_plan, plan_step, reward, done)
class EmbeddedReplayBuffer(object):
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
def add(self, data):
if len(self.storage) == self.max_size:
self.storage.pop(0)
self.storage.append(data)
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
x, y, u, e, i, r, d = [], [], [], [], [], [], []
for j in ind:
X, Y, U, E, I, R, D = self.storage[j]
x.append(np.array(X, copy=False))
y.append(np.array(Y, copy=False))
u.append(np.array(U, copy=False))
e.append(np.array(E, copy=False))
i.append(np.array(I, copy=False))
r.append(np.array(R, copy=False))
d.append(np.array(D, copy=False))
result = (np.array(x),
np.array(y),
np.array(u),
np.array(e),
np.array(i).reshape(-1, 1),
np.array(r).reshape(-1, 1),
np.array(d).reshape(-1, 1))
return result
def __len__(self):
return len(self.storage)
def sample_seq(self, batch_size, seq_len):
ind = np.random.randint(0, len(self.storage) - seq_len + 1, size=batch_size)
x, y, u, e, i, r, d = [], [], [], [], [], [], []
for j in ind:
transition_sequence = self.storage[j:j+seq_len]
# take the sequence [(xyurd), (xyurd), (xyurd), (xyurd)]
# and turn it into [(xxxx), (yyyy), (uuuu), (rrrr), (dddd)]
X, Y, U, E, I, R, D = list(zip(*transition_sequence))
x.append(np.array(X, copy=False))
y.append(np.array(Y, copy=False))
u.append(np.array(U, copy=False))
e.append(np.array(E, copy=False))
i.append(np.array(I, copy=False))
r.append(np.array(R, copy=False))
d.append(np.array(D, copy=False))
result = (np.stack(x),
np.stack(y),
np.stack(u),
np.stack(e),
np.stack(i).reshape(batch_size, seq_len),
np.stack(r).reshape(batch_size, seq_len),
np.stack(d).reshape(batch_size, seq_len))
return result
def serialize_opt(opt):
# import ipdb; ipdb.set_trace()
cleaned_opt = copy.deepcopy(vars(opt))
return json.dumps(cleaned_opt, indent=4, sort_keys=True)
def write_options(opt, location):
with open(location + "/opt.json", 'w') as f:
serial_opt = serialize_opt(opt)
print(serial_opt)
f.write(serial_opt)
f.flush()
def save_gif(filename, inputs, bounce=False, color_last=False, duration=0.05):
images = []
for tensor in inputs:
tensor = tensor.cpu()
if not color_last:
tensor = tensor.transpose(0,1).transpose(1,2)
tensor = tensor.clamp(0,1)
images.append((tensor.cpu().numpy() * 255).astype('uint8'))
if bounce:
images = images + list(reversed(images[1:-1]))
imageio.mimsave(filename, images)
def conv_out_dim(in_planes, out_planes, in_height, in_width, kernel_size,
stride=1, padding=0, dilation=1):
dilated_kernel = dilation * (kernel_size - 1)
out_height = math.floor(
(in_height + 2 * padding - dilated_kernel - 1) / stride + 1)
out_width = math.floor(
(in_width + 2 * padding - dilated_kernel - 1) / stride + 1)
return out_height, out_width
def conv_in_dim(out_height, out_width, kernel_size,
stride=1, padding=0, dilation=1):
dilated_kernel = dilation * (kernel_size - 1)
# (out_height - 1) * stride = in_height + 2 * padding - dilated_kernel - 1
in_height = math.ceil(
(out_height - 1) * stride - 2 * padding + dilated_kernel + 1)
in_width = math.ceil(
(out_width - 1) * stride - 2 * padding + dilated_kernel + 1)
return in_height, in_width
def conv_transpose_in_dim(out_height, out_width, kernel_size,
stride=1, padding=0, dilation=1):
# dilated_kernel = dilation * (kernel_size - 1)
dilated_kernel = kernel_size
in_height = math.ceil(
(out_height - dilated_kernel + 2 * padding) / stride + 1)
in_width = math.ceil(
(out_width - dilated_kernel + 2 * padding) / stride + 1)
return in_height, in_width
def conv_transpose_out_dim(in_height, in_width, kernel_size,
stride=1, padding=0, dilation=1):
# dilated_kernel = dilation * (kernel_size - 1)
dilated_kernel = kernel_size
out_height = math.ceil(
(in_height - 1) * stride - 2 * padding + dilated_kernel)
out_width = math.floor(
(in_width - 1) * stride - 2 * padding + dilated_kernel)
return out_height, out_width
def conv_list_out_dim(conv_layers, in_width, in_height):
for layer in conv_layers:
# import ipdb; ipdb.set_trace()
if isinstance(layer, nn.Conv2d):
in_height, in_width = conv_out_dim(layer.in_channels, layer.out_channels,
in_height, in_width, layer.kernel_size[0], layer.stride[0],
layer.padding[0], layer.dilation[0])
last_channels = layer.out_channels
return in_width, in_height, last_channels
def prod(l):
return functools.reduce(lambda x, y: x * y, l)
def flat_str(x):
x = x.cpu().detach().view([-1]).numpy()
fmt_string = "{:+06.3f}\t" * len(x)
return fmt_string.format(*x)
| willwhitney/dynamics-aware-embeddings | rl/utils.py | utils.py | py | 14,213 | python | en | code | 42 | github-code | 36 |
37862197413 | import pandas as pd
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
from transformers import AutoTokenizer, AutoModel
import torch
from typing import List
from patent_phrase_similarity.data.analysis.visualize_datasets import plot_score_vs_test_score
from patent_phrase_similarity.data.transformation.cpc_datasets import Datasets, CPCDatasets
def get_similar_score_v1(model, context: str, similar_sentences: List[str]) -> List[float]:
sentence_embeddings = model.encode([context] + similar_sentences)
print("Sentences Shape", sentence_embeddings.shape)
similarity_distance = cosine_similarity(
[sentence_embeddings[0]],
sentence_embeddings[1:]
)[0]
return similarity_distance
def get_similar_score_v2(tokenizer, model, context: str, similar_sentences: List[str]) -> List[float]:
tokens = {'input_ids': [], 'attention_mask': []}
_sentences = [context] + similar_sentences
for sentence in sentences:
# encode each sentence and append to dictionary
new_tokens = tokenizer.encode_plus(sentence, max_length=128,
truncation=True, padding='max_length',
return_tensors='pt')
tokens['input_ids'].append(new_tokens['input_ids'][0])
tokens['attention_mask'].append(new_tokens['attention_mask'][0])
# reformat list of tensors into single tensor
tokens['input_ids'] = torch.stack(tokens['input_ids'])
tokens['attention_mask'] = torch.stack(tokens['attention_mask'])
outputs = model(**tokens)
outputs.keys()
embeddings = outputs.last_hidden_state
attention_mask = tokens['attention_mask']
print("Attention Mask Tensor", attention_mask.shape)
mask = attention_mask.unsqueeze(-1).expand(embeddings.size()).float()
# print(mask.shape)
masked_embeddings = embeddings * mask
# Then we sum the remained of the embeddings along axis 1:
summed = torch.sum(masked_embeddings, 1)
# Then sum the number of values that must be given attention in each position of the tensor:
summed_mask = torch.clamp(mask.sum(1), min=1e-9)
# Finally, we calculate the mean as the sum of the embedding activations summed
# divided by the number of values that should be given attention in each position summed_mask:
mean_pooled = summed / summed_mask
# convert from PyTorch tensor to numpy array
mean_pooled = mean_pooled.detach().numpy()
# calculate
similarity_distance = cosine_similarity(
[mean_pooled[0]],
mean_pooled[1:]
)[0]
return similarity_distance
if __name__ == '__main__':
model_V1 = SentenceTransformer('bert-base-nli-mean-tokens')
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')
model_v2 = AutoModel.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')
sentences = [
"Three years later, the coffin was still full of Jello.",
"The fish dreamed of escaping the fishbowl and into the toilet where he saw his friend go.",
"The person box was packed with jelly many dozens of months later.",
"He found a leprechaun in his walnut shell."
]
# similar_v1 = get_similar_score_v1(sentences[0], sentences[1:])
# similar_v2 = get_similar_score_v2(sentences[0], sentences[1:])
#
# print(similar_v1)
# print(similar_v2)
# context = [
# 'HUMAN NECESSITIES',
# 'AGRICULTURE',
# 'FORESTRY',
# 'ANIMAL HUSBANDRY',
# 'HUNTING',
# 'TRAPPING',
# 'FISHING',
# 'SOIL WORKING IN AGRICULTURE OR FORESTRY',
# 'PARTS, DETAILS, OR ACCESSORIES OF AGRICULTURAL MACHINES OR IMPLEMENTS, IN GENERAL ',
# 'PLANTING',
# 'SOWING',
# 'FERTILISING ',
# 'HARVESTING',
# 'MOWING',
# 'PROCESSING OF HARVESTED PRODUCE',
# 'HAY OR STRAW PRESSES',
# 'DEVICES FOR STORING AGRICULTURAL OR HORTICULTURAL PRODUCE ',
# 'HORTICULTURE',
# 'CULTIVATION OF VEGETABLES, FLOWERS, RICE, FRUIT, VINES, HOPS OR SEAWEED',
# 'FORESTRY',
# 'WATERING ',
# 'NEW PLANTS OR ',
# ' PROCESSES FOR OBTAINING THEM',
# 'PLANT REPRODUCTION BY TISSUE CULTURE TECHNIQUES',
# 'MANUFACTURE OF DAIRY PRODUCTS ',
# 'ANIMAL HUSBANDRY',
# 'CARE OF BIRDS, FISHES, INSECTS',
# 'FISHING',
# 'REARING OR BREEDING ANIMALS, NOT OTHERWISE PROVIDED FOR',
# 'NEW BREEDS OF ANIMALS',
# 'SHOEING OF ANIMALS',
# 'CATCHING, TRAPPING OR SCARING OF ANIMALS ',
# 'APPARATUS FOR THE DESTRUCTION OF NOXIOUS ANIMALS OR NOXIOUS PLANTS',
# 'PRESERVATION OF BODIES OF HUMANS OR ANIMALS OR PLANTS OR PARTS THEREOF ',
# 'BIOCIDES, e.g. AS DISINFECTANTS, AS PESTICIDES OR AS HERBICIDES ',
# 'PEST REPELLANTS OR ATTRACTANTS',
# 'PLANT GROWTH REGULATORS'
# ]
#
# similar_vector = get_similar_score_v1('\n'.join(context), ['panel', 'panel frame'])
# print(similar_vector)
# print("Difference:", 100 * abs(similar_vector[0] - similar_vector[1]), "%")
#
# similar_vector = get_similar_score_v1('\n'.join(context), ['distributor pipe', 'pipe'])
# print(similar_vector)
# print("Difference:", 100 * abs(similar_vector[0] - similar_vector[1]), "%")
#
# similar_vector = get_similar_score_v1('\n'.join(context), ['distributor pipe', 'liquid channels'])
# print(similar_vector)
# print("Difference:", 100 * abs(similar_vector[0] - similar_vector[1]), "%")
#
# similar_vector = get_similar_score_v1('\n'.join(context), ['distributor pipe', 'optical pipe'])
# print(similar_vector)
# print("Difference:", 100 * abs(similar_vector[0] - similar_vector[1]), "%")
datasets = Datasets()
cpc_datasets = CPCDatasets()
train_df = datasets.get_train_df()
cpc_train_df = cpc_datasets.merge_with_df(train_df)
cpc_train_df['test_score'] = cpc_train_df.apply(lambda row: get_similar_score_v1(model_V1, row['anchor'], [row['target']])[0], axis=1)
# similar_vector = get_similar_score_v1('distributor pipe', ['optical pipe'])
# print(similar_vector)
# print("Difference:", 100 * abs(similar_vector[0] - similar_vector[1]), "%")
plot_score_vs_test_score(cpc_train_df, column_a='score', column_b='test_score')
| vquilon/kaggle-competitions | patent-phrase-to-phrase-matching/models/transformers_similarity.py | transformers_similarity.py | py | 6,450 | python | en | code | 0 | github-code | 36 |
2722280973 | class Solution(object):
def buildTree(self, preorder, inorder):
if not preorder or not inorder:
return None
# firt elementin preorder is root, found the index in preorder and separate them
root = TreeNode(preorder[0])
index = inorder.index(preorder[0])
# length of left branch in preorder is 1(root) to index + 1,
root.left = self.buildTree(preorder[1:index+1], inorder[:index])
root.right = self.buildTree(preorder[index+1:], inorder[index+1:])
return root
| ZhengLiangliang1996/Leetcode_ML_Daily | Tree/105_ConstructBinaryTreeFromPreorderandInorder.py | 105_ConstructBinaryTreeFromPreorderandInorder.py | py | 571 | python | en | code | 1 | github-code | 36 |
71304388263 | #Desafio 31
#Débora Janini
#Este desafio fiz com base em de duas colegas,
#aonde pude ver ferraemntas novas do python e entender
N = int(input())
array = list(map(int, input().split()))
menor = min(array) #min retorna o menor
posicao = array.index(menor) #index retorna o indice, então
#index do menor valor no array
print(f'Menor valor: {menor}')
print(f'Posicao: {posicao}') | deborajanini/desafios-python | desafio31versao2.py | desafio31versao2.py | py | 382 | python | pt | code | 0 | github-code | 36 |
29465284343 | ## This module aims to analyze the effect of angle distribuiton
import numpy as np
import math as mt
import matplotlib.pyplot as plt
import Kinematics
import scipy.integrate as integrate
import scipy.optimize as opt
#Particle Property
#kpc
kpc_in_cm = 3.08567758e21
#light speed
vc = 3e10
#Neutrino
M_nu = 0.32 # Unit:ev/c2
E_total_nu = 3.6e53*6.24150913e11 #Total energy of neutrinos #Transfer 2e51erg to unit of MeV
E_per_nu = 10e6 #Mean energy of each neutrino #estimated value
#DM
M_DM = 1e03
#NFW Parameter
rho_s = 0.184e9
rs=24.42*kpc_in_cm
#cross section (Neutrino and DM)
cs = 1e-30
def angle_norm(beta,r,R):
def f(psi):
rp = (R**2+r**2-2*R*r*np.cos(psi))**0.5
cos_theta_psi = ((R**2+(r*np.cos(psi))**2-2*R*r*np.cos(psi))/(R**2+r**2-2*R*r*np.cos(psi)))**0.5
def g(psi):
def theta(psi):
return np.arctan(1/(1/(np.tan(psi))-r/(R*np.sin(psi))))
theta = theta(psi)
sec = 1/np.cos(theta)
return 4*np.tan(theta)*(sec**2)*(1-beta*beta)/((sec**2-beta**2)**2)/np.sin(theta)
return np.sin(psi)*g(psi)*cos_theta_psi/(rp**2)/2.
theta_max = np.pi/2
def psi(theta):
return theta - np.arcsin(r/R*np.sin(theta))
psi_max = psi(theta_max)
return integrate.nquad(f, [[0,psi_max]])[0]
def DM_number(m_dm,e_per_nu,start,end,n_total):
beta = (E_per_nu**2- M_nu**2)**0.5/(E_per_nu+M_DM)
gamma = (1-beta**(2))**0.5
time_delay = np.sum((start-end)**2)**0.5*(1/beta-1)/(vc)
R = (np.sum((start-end)**2))**0.5
l = end -start
def f(r,theta):
psi = theta - np.arcsin(r/R*np.sin(theta))
dpsi_dtheta = 1- r/R*np.cos(theta) /(1-(r/R*np.sin(theta))**2)**0.5
x=(np.sum((start + np.array([r*np.cos(psi),0,r*np.sin(psi)]))**2))**0.5/rs
cos_theta_psi=((R**2+(r*np.cos(psi))**2-2*R*r*np.cos(psi))/(R**2+r**2-2*R*r*np.cos(psi)))**0.5
theta = np.arctan(1/(1/(np.tan(psi))-r/(R*np.sin(psi))))
sec = 1/np.cos(theta)
dn_domega= (sec**3)*(1-beta*beta)/((sec**2-beta**2)**2)/np.pi
return np.sin(psi) *dpsi_dtheta *R/(R**2+r**2-2*R*r*np.cos(psi))*dn_domega/(x*(1+x)*(1+x))
k = n_total*rho_s*cs/m_dm/(4*np.pi)
r0 = 0.01*rs
result =integrate.dblquad(f, 0, np.pi/2., lambda theta: r0, lambda theta: R)
print(result[1])
L_dm = result[0]*2*np.pi*k/R
print("DM Number(1/cm^2):"+str(L_dm))
return L_dm
def DM_flux(m_dm,e_per_nu,start,end,n_total,t):
beta = (E_per_nu**2- M_nu**2)**0.5/(E_per_nu+M_DM)
a =(vc)*t
#print(beta)
gamma = (1-beta**(2))**0.5
time_delay = np.sum((start-end)**2)**0.5*(1/beta-1)/(vc)
R = (np.sum((start-end)**2))**0.5
l = end -start
def get_dtheta_dt(r,t):
def get_theta():
def f(theta):
#v = vc *beta *((1-beta**2)/(np.cos(theta)**(-2)-beta**2))**0.5
eta = 2* np.arctan(np.tan(theta)/(1-beta**2)**0.5)
root = ((1+np.cos(eta))**2 + (np.sin(eta)**2) *(1-beta**2))**0.5
v = vc *beta *root/(1+beta**2*np.cos(eta))
l = v*(t-r/vc)
return l**2+ r**2 - R**2 + 2*r*l*np.cos(theta)
sol = opt.fsolve(f, np.pi/4)
return sol[0]
theta = np.abs(get_theta())
psi = theta - np.arcsin(r/R*np.sin(theta))
eta = 2* np.arctan(np.tan(theta)/(1-beta**2)**0.5)
l = R*np.sin(psi)/np.sin(theta)
#v = vc *beta *((1-beta**2)/(np.cos(theta)**(-2)-beta**2))**0.5
eta = 2* np.arctan(np.tan(theta)/(1-beta**2)**0.5)
root = ((1+np.cos(eta))**2 + (np.sin(eta)**2) *(1-beta**2))**0.5
v = vc *beta *root/(1+beta**2*np.cos(eta))
sec = np.cos(theta)**(-1)
dl_dtheta = r*np.sin(theta)-r**2*np.sin(2*theta)/((R**2-r**2*np.sin(theta)**2)**0.5)
#dv_dtheta = -vc*beta*np.sin(eta/2.)/2. *2*sec**2*(1-beta*beta)**0.5/(sec**2-beta**2)
d_eta_dtheta = 2*sec**2*(1-beta*beta)**0.5/(sec**2-beta**2)
eba = (1+beta**2*np.cos(eta))
dv_dtheta = vc*beta *d_eta_dtheta* ( root*(beta**2*np.sin(eta)) - eba/root*(np.sin(eta)+beta**2*np.sin(eta)*np.cos(eta)) )/eba**2
dt_dtheta = dl_dtheta/v -l/(v**2)*dv_dtheta
return 1/dt_dtheta, theta, psi
def f(r):
dtheta_dt, theta, psi = get_dtheta_dt(r,t)
dpsi_dtheta = 1- r/R*np.cos(theta) /(1-(r/R*np.sin(theta))**2)**0.5
x=(np.sum((start + np.array([r*np.cos(psi),0,r*np.sin(psi)]))**2))**0.5/rs
cos_theta_psi=((R**2+(r*np.cos(psi))**2-2*R*r*np.cos(psi))/(R**2+r**2-2*R*r*np.cos(psi)))**0.5
sec = 1/np.cos(theta)
dn_domega= (sec**3)*(1-beta*beta)/((sec**2-beta**2)**2)/np.pi
return np.sin(psi) *dpsi_dtheta *R/(R**2+r**2-2*R*r*np.cos(psi))*dn_domega/(x*(1+x)*(1+x)) *dtheta_dt
k = n_total*rho_s*cs/m_dm/(4*np.pi)
r0 = 0.01*rs
result = integrate.quad(f,r0,R)[0]
if result<0:
return 0
L_dm = result*2*np.pi*k/R
return L_dm
def DM_number_original(m_dm,e_per_nu,start,end,n_total):
beta = (E_per_nu**2- M_nu**2)**0.5/(E_per_nu+M_DM)
gamma = (1-beta**(2))**0.5
time_delay = np.sum((start-end)**2)**0.5*(1/beta-1)/(vc)
R = (np.sum((start-end)**2))**0.5
l = end -start
def f(t):
r=(np.sum((start+l*t)**2))**0.5
x= r/rs
return 1/(x*(1+x)*(1+x))
k = n_total*rho_s*cs/m_dm/(4*np.pi) /R
r0 = 0.01*rs/R
L_dm = integrate.nquad(f, [[r0,1.]])[0]*k
print("DM Number original(1/cm^2):"+str(L_dm))
return L_dm
def Spectrum(m_dm,e_per_nu,start,end,n_total):
beta = (E_per_nu**2- M_nu**2)**0.5/(E_per_nu+M_DM)
gamma = (1-beta**(2))**0.5
time_delay = np.sum((start-end)**2)**0.5*(1/beta-1)/(vc)
R = (np.sum((start-end)**2))**0.5
l = end -start
def dL_dEdR(r,theta):
dE_dtheta = M_DM *(beta**2)*np.sin(theta)/(1-beta**2)
def f(r):
psi = theta - np.arcsin(r/R*np.sin(theta))
x=(np.sum((start + np.array([r*np.cos(psi),0,r*np.sin(psi)]))**2))**0.5/rs
cos_theta_psi= np.cos(theta-psi)
sec = 1/np.cos(theta)
dpsi_dtheta = 1- r/R*np.cos(theta) /(1-(r/R*np.sin(theta))**2)**0.5
dn_domega= (sec**3)*(1-beta*beta)/((sec**2-beta**2)**2)/np.pi
return np.sin(psi)*cos_theta_psi*R /(R**2+r**2-2*R*r*np.cos(psi))*dn_domega*dpsi_dtheta/(x*(1+x)*(1+x))
return f(r)
E_0 = M_DM*1.001
E_f = M_DM/(1-beta**2)*(1+(beta**2))*0.999
norm = integrate.dblquad(dL_dEdR, 0, np.pi/2, lambda theta: 0, lambda theta: R)[0]*2*np.pi
print(norm)
def dL_dE(theta):
sec = 1/np.cos(theta)
dE_dtheta = M_DM *(beta**2)/(1-beta**2) *4*(gamma**2)*np.tan(theta)*(sec**2) /(1+(gamma**2)*(np.tan(theta)**2))
def f(r):
psi = theta - np.arcsin(r/R*np.sin(theta))
x=(np.sum((start + np.array([r*np.cos(psi),0,r*np.sin(psi)]))**2))**0.5/rs
cos_theta_psi= np.cos(theta-psi)
sec = 1/np.cos(theta)
dpsi_dtheta = 1- r/R*np.cos(theta) /(1-(r/R*np.sin(theta))**2)**0.5
dn_domega= (sec**3)*(1-beta*beta)/((sec**2-beta**2)**2)/np.pi
return np.sin(psi)*cos_theta_psi*R /(R**2+r**2-2*R*r*np.cos(psi))*dn_domega*dpsi_dtheta/(x*(1+x)*(1+x))
return integrate.nquad(f, [[0.01*R,R]])[0] /dE_dtheta
theta =np.linspace(0.001,np.pi/2,1000)
E = [M_DM/(1-beta**2)*(1+(beta**2)*(1-(gamma**2)*(np.tan(theta[i])**2)) /(1+(gamma**2)*(np.tan(theta[i])**2)))for i in range(0,1000)]
spec = [dL_dE(theta[i])/norm for i in range(0,1000)]
plt.plot(E, spec, color ='blue')
plt.xlabel('E(eV)')
plt.ylabel('dL_chi/dE(1/cm**2 eV)')
plt.show()
if __name__== '__main__':
beta = (E_per_nu**2- M_nu**2)**0.5/(E_per_nu+M_DM)
gamma = (1-beta**(2))**0.5
print(beta)
print("Angle Norm:"+str(angle_norm(beta,0.5,1)))
start=np.array([8.7*0.0*kpc_in_cm,0,0.*3.08567758e18])
end =np.array([8.7*kpc_in_cm,0,0.*3.08567758e18])
ref = DM_number(M_DM,E_per_nu ,start,end,E_total_nu/E_per_nu)
t0 = kpc_in_cm/(vc)
mode = 1
if mode==0:
N = 100
s = np.linspace(-3,-0.5,100)
#t = [8.7*(0.99+10**(s[i]))*t0 for i in range(len(s))]
t = np.linspace(8.7*(0.99+9e-3),8.7*(0.99+1e-2),N)*t0
flux = [DM_flux(M_DM,E_per_nu ,start,end,E_total_nu/E_per_nu,t[i]) for i in range(len(t))]
t2 = np.linspace(8.7*(0.99+1e-2),8.7*(0.99+1e-1),N)*t0
flux2 = [DM_flux(M_DM,E_per_nu ,start,end,E_total_nu/E_per_nu,t2[i]) for i in range(len(t))]
t3 = np.linspace(8.7*(0.99+1e-1),8.7*(0.99+1e-0),N)*t0
flux3 = [DM_flux(M_DM,E_per_nu ,start,end,E_total_nu/E_per_nu,t3[i]) for i in range(len(t))]
#ref = DM_number_original(M_DM,E_per_nu ,start,end,E_total_nu/E_per_nu)
inter = [(flux[i+1]+flux[i])/2*(t[i+1]-t[i])for i in range(len(t)-1)]
inter2 = [(flux2[i+1]+flux2[i])/2*(t2[i+1]-t2[i])for i in range(len(t)-1)]
inter3 = [(flux3[i+1]+flux3[i])/2*(t3[i+1]-t3[i])for i in range(len(t)-1)]
t=np.concatenate((t, t2), axis=None)
t=np.concatenate((t, t3), axis=None)
flux=np.concatenate((flux, flux2), axis=None)
flux=np.concatenate((flux, flux3), axis=None)
print("ratio:"+str(np.sum(inter+inter2+inter3)/ref))
plt.plot(t,flux)
plt.xscale('log')
plt.yscale('log')
plt.show()
elif mode==1:
s = np.linspace(-8,-4,100)
r0 = 0.01*rs
delta_t = 8.7*t0 #- r0/vc
t = [ delta_t+ 8.7*(10**(s[i]))*t0 for i in range(len(s))]
flux = [DM_flux(M_DM,E_per_nu ,start,end,E_total_nu/E_per_nu,t[i]) for i in range(len(t))]
plt.plot(t-delta_t*np.ones(len(t)),flux)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('t(s)')
plt.ylabel('Flux (#/s/cm^2)')
plt.show()
inter = [(flux[i+1]+flux[i])/2*(t[i+1]-t[i])for i in range(len(t)-1)]
print("ratio:"+str(np.sum(inter)/ref))
| CrazyAncestor/DM_Neutrino_Flux | old_codes/Angle_Effect.py | Angle_Effect.py | py | 10,190 | python | en | code | 0 | github-code | 36 |
16379803548 | # %% preliminaries
import sys, os
from pylab import *
from treefarm import *
from treefarm.minimizers.flatgp import FlatGPMinimizer, expected_improvement
from treefarm.core.utils import get_minium_states
from treefarm.core.space_utils import get_subspace, to_space
from time import sleep
import GPy
kernel_dict = {
'RBF' : GPy.kern.RBF,
'EXP' : GPy.kern.Exponential,
'BIAS': GPy.kern.Bias,
'LINEAR' : GPy.kern.Linear,
}
K = convert(set(kernel_dict.keys()))
K.symbol = 'K'
K = op(kernel_dict.get)(K)(1)
G = join()
G <<= K+G | K*G | K
G = simplify(G)
if os.path.exists('wild_kernel_names.npy'):
names = list(np.load('wild_kernel_names.npy'))
else:
names = []
if os.path.exists('wild_kernel_search.npy'):
perf_stor = list(np.load('wild_kernel_search.npy'))
else:
perf_stor = []
def str_deep(kern):
try :
L = [str_deep(k) for k in kern.parts]
out = kern.name + '(' + ','.join(L) + ')'
except:
out = kern.name
return out
@operation
def test_kernel(kernel):
def f(x):
return sin(x) * x / 2 + x**2 / 20 - 10*exp(-(x - 10)**2) + 8
search_space = op(f)(R[-1000:1000])
print('test kernel:', str_deep(kernel))
outer_iterations = 10
for i in range(outer_iterations):
print('iteration', i, end='')
obj = minimize(
search_space = search_space,
max_iter = 30,
minimizer = FlatGPMinimizer(search_space, kernel=kernel),
)
obj.run()
if 'min_perfs' not in locals():
min_perfs = np.array(get_minium_states(obj.minimizer.protocol))
else:
min_perfs += np.array(get_minium_states(obj.minimizer.protocol))
print(':', min_perfs[-1]/(i+1))
out = min_perfs / outer_iterations
perf_stor.append(out)
names.append(str_deep(kernel))
return out[-1]
kernel_ss = test_kernel(G)
min_obj = minimize(kernel_ss, max_iter=10)
#min_obj.run()
def plot_and_dump():
_perf_stor = np.array(perf_stor)
_perf_stor.dump('wild_kernel_search.npy')
np.array(names).dump('wild_kernel_names.npy')
# %%
figure("Performances")
for k, mperfs in zip(names, _perf_stor):
plot(mperfs, label=str(k))
legend()
figure("log-Performances")
for k, mperfs in zip(names, _perf_stor):
plot(np.log(mperfs), label=str(k))
legend()
show()
| Mome/baumschule | examples/test_different_kernels.py | test_different_kernels.py | py | 2,382 | python | en | code | 0 | github-code | 36 |
16655733054 | from hypothesis import given, assume, strategies as st
from quadratic import quadratic
import cmath
@given(a = st.floats(min_value=-10000, max_value=10000),
b = st.floats(min_value=-10000, max_value=10000),
c = st.floats(min_value=-10000, max_value=10000))
def test_quad(a, b, c):
assume(abs(a) >= 0.0001)
x1, x2 = quadratic(a, b, c)
assert cmath.isclose(a*x1**2 + b*x1 + c, 0.0, abs_tol=0.0001)
assert cmath.isclose(a*x1**2 + b*x1 + c, 0.0, abs_tol=0.0001)
if __name__ == '__main__':
test_quad()
| rhettinger/modernpython | test_quadratic.py | test_quadratic.py | py | 535 | python | en | code | 438 | github-code | 36 |
10453935366 | # Copyright (C) 2020-2021 Burak Martin (see 'AUTHOR' for full notice)
"""
Enable/disable parallelism, caching and nogil. The program needs to be restarted to take effect since these options
only effect numba functions which need to be recompiled.
"""
parallel = True
cache = True
nogil = True
| pymatting/pymatting-interactive-tool | config/config.py | config.py | py | 308 | python | en | code | 5 | github-code | 36 |
16662671 | load("@obazl_rules_ocaml//ocaml:providers.bzl", "BuildConfig", "OpamConfig")
opam_pkgs = {
"ocaml": ">= 4.04.0",
"dune": ">= 1.2.0",
"ounit": "with-test & >= 1.0.2",
"ppx_sexp_conv": "with-test & >= v0.9.0",
"stringext": ["1.4.0"],
"angstrom": ["0.14.0"],
}
opam = OpamConfig(
version = "2.0",
builds = {
"4.12": BuildConfig(
default = True,
switch = "4.12",
compiler = "4.12",
packages = opam_pkgs,
install = True,
),
},
)
| tweag/ocaml-uri-bazel | bzl/opam.bzl | opam.bzl | bzl | 541 | python | en | code | 0 | github-code | 36 |
6197795850 | def gcd(n1,n2):
while n2 >0:
n1,n2 = n2 , n1%n2
return n1
def solution(denum1, num1, denum2, num2):
# 1. 두 분수의 합 계산
boonmo = num1 * num2
boonja = denum1 * num2 + denum2 * num1
# 2. 최대공약수 계산
gcd_value = gcd(boonmo, boonja)
# 3. gcd 로 나눈 값을 answer에 담기
answer = [boonja / gcd_value, boonmo / gcd_value]
return answer
| byeong-chang/Baekjoon-programmers | 프로그래머스/lv0/120808. 분수의 덧셈/분수의 덧셈.py | 분수의 덧셈.py | py | 414 | python | ko | code | 2 | github-code | 36 |
19691521791 | import torch
from torch import nn
import torch.nn.functional as F
from utils import gelu, LayerNorm
from transformer import TransformerLayer, Embedding, LearnedPositionalEmbedding, SelfAttentionMask
from label_smoothing import LabelSmoothing
class BIGLM(nn.Module):
def __init__(self, local_rank, vocab, embed_dim, ff_embed_dim, num_heads, dropout, layers, smoothing_factor, approx=None):
super(BIGLM, self).__init__()
self.vocab = vocab
self.embed_dim = embed_dim
self.tok_embed = Embedding(self.vocab.size, embed_dim, self.vocab.padding_idx)
self.pos_embed = LearnedPositionalEmbedding(embed_dim, device=local_rank)
self.layers = nn.ModuleList()
for i in range(layers):
self.layers.append(TransformerLayer(embed_dim, ff_embed_dim, num_heads, dropout, with_external=True))
self.emb_layer_norm = LayerNorm(embed_dim)
self.one_more = nn.Linear(embed_dim, embed_dim)
self.one_more_layer_norm = LayerNorm(embed_dim)
self.out_proj = nn.Linear(embed_dim, self.vocab.size)
self.attn_mask = SelfAttentionMask(device=local_rank)
self.smoothing = LabelSmoothing(local_rank, self.vocab.size, self.vocab.padding_idx, smoothing_factor)
self.dropout = dropout
self.device = local_rank
self.approx = approx
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.one_more.bias, 0.)
nn.init.normal_(self.one_more.weight, std=0.02)
nn.init.constant_(self.out_proj.bias, 0.)
nn.init.normal_(self.out_proj.weight, std=0.02)
def label_smotthing_loss(self, y_pred, y, y_mask, avg=True):
seq_len, bsz = y.size()
y_pred = torch.log(y_pred.clamp(min=1e-8))
loss = self.smoothing(y_pred.view(seq_len * bsz, -1), y.view(seq_len * bsz, -1))
if avg:
return loss / torch.sum(y_mask)
else:
return loss / bsz
def nll_loss(self, y_pred, y, y_mask, avg=True):
cost = -torch.log(torch.gather(y_pred, 2, y.view(y.size(0), y.size(1), 1)))
cost = cost.view(y.shape)
y_mask = y_mask.view(y.shape)
if avg:
cost = torch.sum(cost * y_mask, 0) / torch.sum(y_mask, 0)
else:
cost = torch.sum(cost * y_mask, 0)
cost = cost.view((y.size(1), -1))
ppl = 2 ** cost
return cost.sum().item(), ppl.sum().item()
def work_incremental(self, enc, src_padding_mask, ys_inp, ys_tpl, ys_seg, ys_pos, incremental_state=None):
seq_len, bsz = ys_inp.size()
x = self.tok_embed(ys_inp) + self.pos_embed(ys_inp) + self.tok_embed(ys_tpl) + self.tok_embed(ys_seg) + self.tok_embed(ys_pos)
x = self.emb_layer_norm(x)
padding_mask = torch.eq(ys_inp, self.vocab.padding_idx)
if not padding_mask.any():
padding_mask = None
if incremental_state is None:
self_attn_mask = self.attn_mask(seq_len)
incremental_state = {}
else:
x = x[-1, :, :].unsqueeze(0)
self_attn_mask = None
for layer in self.layers:
x, _ ,_ = layer.work_incremental(x, self_padding_mask=padding_mask, \
self_attn_mask=self_attn_mask, \
external_memories = enc, \
external_padding_mask = src_padding_mask, \
incremental_state = incremental_state)
x = self.one_more_layer_norm(gelu(self.one_more(x)))
probs = torch.softmax(self.out_proj(x), -1)
_, pred_y = probs.max(-1)
return probs, pred_y, incremental_state
def work(self, enc, src_padding_mask, ys_inp, ys_tpl, ys_seg, ys_pos):
seq_len, bsz = ys_inp.size()
self_attn_mask = self.attn_mask(seq_len)
x = self.tok_embed(ys_inp) + self.pos_embed(ys_inp) + self.tok_embed(ys_tpl) + self.tok_embed(ys_seg) + self.tok_embed(ys_pos)
x = self.emb_layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
padding_mask = torch.eq(ys_inp, self.vocab.padding_idx)
if not padding_mask.any():
padding_mask = None
for layer in self.layers:
x, _ ,_ = layer(x, self_padding_mask=padding_mask, \
self_attn_mask = self_attn_mask, \
external_memories = enc, \
external_padding_mask = src_padding_mask,)
x = self.one_more_layer_norm(gelu(self.one_more(x)))
probs = torch.softmax(self.out_proj(x), -1)
_, pred_y = probs.max(-1)
return probs, pred_y
def encode(self, xs_tpl, xs_seg, xs_pos):
padding_mask = torch.eq(xs_tpl, self.vocab.padding_idx)
x = self.tok_embed(xs_tpl) + self.tok_embed(xs_seg) + self.tok_embed(xs_pos)
x = self.emb_layer_norm(x)
return x, padding_mask
def ppl(self, xs_tpl, xs_seg, xs_pos, ys_truth, ys_inp, ys_tpl, ys_seg, ys_pos, msk):
enc, src_padding_mask = self.encode(xs_tpl, xs_seg, xs_pos)
seq_len, bsz = ys_inp.size()
self_attn_mask = self.attn_mask(seq_len)
x = self.tok_embed(ys_inp) + self.pos_embed(ys_inp) + self.tok_embed(ys_tpl) + self.tok_embed(ys_seg) + self.tok_embed(ys_pos)
x = self.emb_layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
padding_mask = torch.eq(ys_truth, self.vocab.padding_idx)
if not padding_mask.any():
padding_mask = None
for layer in self.layers:
x, _ ,_ = layer(x, self_padding_mask=padding_mask, \
self_attn_mask = self_attn_mask, \
external_memories = enc, \
external_padding_mask = src_padding_mask,)
x = self.one_more_layer_norm(gelu(self.one_more(x)))
pred = torch.softmax(self.out_proj(x), -1)
nll, ppl = self.nll_loss(pred, ys_truth, msk)
return nll, ppl, bsz
def forward(self, xs_tpl, xs_seg, xs_pos, ys_truth, ys_inp, ys_tpl, ys_seg, ys_pos, msk):
enc, src_padding_mask = self.encode(xs_tpl, xs_seg, xs_pos)
seq_len, bsz = ys_inp.size()
self_attn_mask = self.attn_mask(seq_len)
x = self.tok_embed(ys_inp) + self.pos_embed(ys_inp) + self.tok_embed(ys_tpl) + self.tok_embed(ys_seg) + self.tok_embed(ys_pos)
x = self.emb_layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
padding_mask = torch.eq(ys_truth, self.vocab.padding_idx)
if not padding_mask.any():
padding_mask = None
for layer in self.layers:
x, _ ,_ = layer(x, self_padding_mask=padding_mask, \
self_attn_mask = self_attn_mask, \
external_memories = enc, \
external_padding_mask = src_padding_mask,)
x = self.one_more_layer_norm(gelu(self.one_more(x)))
pred = torch.softmax(self.out_proj(x), -1)
loss = self.label_smotthing_loss(pred, ys_truth, msk)
_, pred_y = pred.max(-1)
tot_tokens = msk.float().sum().item()
acc = (torch.eq(pred_y, ys_truth).float() * msk).sum().item()
nll, ppl = self.nll_loss(pred, ys_truth, msk)
return (pred_y, ys_truth), loss, acc, nll, ppl, tot_tokens, bsz
| lipiji/SongNet | biglm.py | biglm.py | py | 7,558 | python | en | code | 227 | github-code | 36 |
29791048785 | import requests
from lxml import etree
import json
from pyecharts import Map # 0.1.9.4
class nCoV_2019:
def __init__(self):
self.headers = {
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1"
}
self.url = "https://ncov.dxy.cn/ncovh5/view/pneumonia_peopleapp"
def parse_url(self):
# 发送get请求网页
r = requests.get(url=self.url, headers=self.headers)
# 我们用断言来判断他的状态码手否为200,如果不是200则会报错
assert r.status_code == 200
# 这里是要用xpath 来提取我想要的信息
html = etree.HTML(r.content.decode())
# 获取id为getListByCountryTypeService1 script标签里的数据
# 通过split函数以'}'分割成一个列表
# html.xpath('//*[@id="getListByCountryTypeService1"]//text()')[0] 本身列表只有一个数据 类型为str 我只要取出来 我就能用字符串函数
results = html.xpath('//*[@id="getListByCountryTypeService1"]//text()')[0].split('}')[:-3]
return results
# 拿到数据后观察数据
# 1.我们要把无关的字符删除,
# 2.我们用data_dict存取每个省份的数据 确诊 死亡 治愈 三个字段
def getDataList(self, results):
data_list = []
for result in results:
# 申明一个字典,存放数据,一个循环存放完,入data_list里,在清空,相当于一个临时存储变量
data_dict = {}
# 修改第一条数据
if results.index(result) == 0:
# replace(old,new)
result = result.replace('try { window.getListByCountryTypeService1 = [', '')
# 去除开头多的',' 并且将str类型的一个'字典' 转换成一个真正的字典
result = json.loads(result.lstrip(',') + '}')
# 省份
data_dict['provinceShortName'] = result['provinceShortName']
# 确诊人数
data_dict['confirmedCount'] = result['confirmedCount']
# 死亡人数
data_dict['deadCount'] = result['deadCount']
# 治愈人数
data_dict['curedCount'] = result['curedCount']
data_list.append(data_dict)
return data_list
def main(self):
results = self.parse_url()
data_list = self.getDataList(results)
return data_list
nCoV_2019 = nCoV_2019()
data_list = nCoV_2019.main()
# 省份列表
provinceShortName_list = []
# 确诊人数列表
confirmedCount_list = []
# 死亡人数列表
deadCount_list = []
# 治愈人数列表
curedCount_list = []
# 列表赋值
for i in data_list:
provinceShortName_list.append(i['provinceShortName'])
confirmedCount_list.append(i['confirmedCount'])
deadCount_list.append(i['deadCount'])
curedCount_list.append(i['curedCount'])
# 画图
map = Map("中国疫情分布图", '', width=1980, height=1024, title_text_size=35)
# is_label_show 显示每个店 is_visualmap 显示颜色以及注释 maptype地区
map.add("", provinceShortName_list, confirmedCount_list, visual_range=[0, 1000], maptype='china', is_visualmap=True,
visual_text_color='#000', is_label_show=True)
map.show_config()
map.render(path='./中国疫情图.html')
| Ramelon/Python- | 疫情图.py | 疫情图.py | py | 3,469 | python | zh | code | 7 | github-code | 36 |
34246961090 | """
The directories module: Provides paths to all directories used in the
package.
This module helps with the relative paths of the directories in this
package. It helps with overcoming the hassle of re-writing & hardcoding
paths used for reference.
At a glance, the structure of the module is following:
- ai_dir{}: Dictionary of all the important directories used
in the package.
- local_dir{}: Dictionary of all directories in `D:/` drive.
See https://github.com/xames3/charlotte for cloning the repository.
"""
# History:
#
# < Checkout my github repo for history and latest stable build >
#
# 1.1.1 - Improved the type hints by using the typing module.
# Made the code more* PEP-8 compliant.
# Updated paths as per new configuration.
# 1.1.0 - Stories are now part of `./data/stories/` directory.
# 1.0.0 - First code.
from os.path import join
from pathlib import Path
from typing import Text
from win32api import GetLogicalDriveStrings
PARENT = Path.cwd()
def _drives(drive_letter: Text) -> Text:
"""Returns drive letter.
drive_letter: Drive letter to be searched for.
Returns the drive letter from all the valid and present partitions.
This assures that the user does not use any drive letter which is
not present on the system.
"""
partitions = GetLogicalDriveStrings().split('\000')[:-1]
drive = {}
try:
for index in range(len(partitions)):
keys = (Path(partitions[index]).parts[0].split(':\\')[0]).lower()
values = partitions[index]
drive[keys] = values
return drive[drive_letter[0].lower()]
except KeyError as error:
missing_drive = str(error).strip('\'')
print(f'Could not find {missing_drive.title()}:\\ drive.')
ai_dir = {
'data': PARENT/'data/',
'logs': PARENT/'logs',
'models': PARENT/'models',
'backup': PARENT/'backup',
'temp': PARENT/'temp',
'tests': PARENT/'tests',
'utils': PARENT/'utils',
'actions': PARENT/'utils/actions',
'assists': PARENT/'utils/assists',
'knowledge': PARENT/'data/knowledge',
'stories': PARENT/'data/stories',
'csv': PARENT/'data/knowledge/csv'
}
local_dir = {
'bittorrent': join(_drives('d'), 'Bittorrents'),
'documents': join(_drives('d'), 'Documents'),
'films': join(_drives('d'), 'Films'),
'photos': join(_drives('d'), 'Photos'),
'music': join(_drives('d'), 'Music'),
'xa': join(_drives('d'), 'XA'),
'videos': join(_drives('d'), 'Videos'),
'web': join(_drives('d'), 'Web')
}
| ganyavhad/charlotte | utils/paths/directories.py | directories.py | py | 2,597 | python | en | code | 0 | github-code | 36 |
37222098464 | #!/usr/bin/env python
# coding: utf-8
# In[143]:
#Natural Language Processing with the DIJA and Reddit Headlines
#Classification Predictions on Stock Market from Headlines
#Classification includes Overall Up or Down, Market Volitality, and Measure of Strong and Poor Days
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
#cross validation
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
#accuracy
from sklearn.metrics import accuracy_score
#train a perceptron model
from sklearn.linear_model import Perceptron
#plot decision regions to visualize
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
from wordcloud import WordCloud,STOPWORDS
import re
import nltk
from nltk.corpus import stopwords
#estimators
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn import linear_model
#model metrics
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.model_selection import cross_val_score
# In[436]:
#Import Data for Viz
df = pd.read_csv('Combined_News_DJIA.csv')
CATdf = pd.read_csv('Corrected_Combined_DJIA_cat.csv')
corrCATdf = pd.read_csv('corrCATdf.csv')
#Import Data for Modeling
data = pd.read_csv('Combined_News_DJIA.csv')
dataCAT = pd.read_csv('Corrected_Combined_DJIA_cat.csv')
DJIAdf = pd.read_csv('DJIA_table.csv')
corrCATdf = pd.read_csv('corrCATdf.csv')
# In[145]:
CATdf.info()
# In[4]:
#Define features and Dependent Variable
CATdf_features = CATdf.iloc[:,2:35]
depVar = CATdf['NetUpDown']
# In[5]:
#Define X, Y using features and dependent variable
X = CATdf_features
y = depVar
# In[6]:
#Number of unique y values
print('Class labels:', np.unique(y))
# In[7]:
# Create a default pairplot
sns.pairplot(DJIAdf)
# In[8]:
# Take the log of Volume and Close
DJIAdf['log_vol'] = np.log10(DJIAdf['Volume'])
DJIAdf['log_close'] = np.log10(DJIAdf['Close'])
sns.pairplot(DJIAdf)
# Drop the non-transformed columns
DJIAdf = DJIAdf.drop(columns = ['Volume'])
DJIAdf = DJIAdf.drop(columns = ['Adj Close'])
DJIAdf = DJIAdf.drop(columns = ['Close'])
DJIAdf = DJIAdf.drop(columns = ['Open'])
DJIAdf = DJIAdf.drop(columns = ['High'])
DJIAdf = DJIAdf.drop(columns = ['Low'])
# In[9]:
# Create a pairplot with Volume/Close
sns.pairplot(DJIAdf)
# In[10]:
#Correlation heat map
#Visulaize highly correlated features (quantitative realtion between two features)
corrCATdf_table = CATdf.corr()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(corrCATdf_table, cmap='coolwarm', vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,len(corrCATdf_table.columns),1)
ax.set_xticks(ticks)
plt.xticks(rotation=90)
ax.set_yticks(ticks)
ax.set_xticklabels(corrCATdf_table.columns)
ax.set_yticklabels(corrCATdf_table.columns)
plt.show()
# In[11]:
#Correlation Table
#View numeric values of correlation
print(corrCATdf_table)
# In[13]:
#Covariance Heat Map
#visualize coavriance (measure of how two features change together)
covCATdf_table = CATdf.cov()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(covCATdf_table,cmap='coolwarm', vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0,len(covCATdf_table.columns),1)
ax.set_xticks(ticks)
plt.xticks(rotation=90)
ax.set_yticks(ticks)
ax.set_xticklabels(covCATdf_table.columns)
ax.set_yticklabels(covCATdf_table.columns)
plt.show()
# In[14]:
#Covariance Table
#View numeric values of Covariance
covCATdf_table = CATdf.cov()
print(covCATdf_table)
# In[16]:
#Factor Plot of NetUpDown
sns.catplot('NetUpDown', data = CATdf, kind = 'count')
# In[17]:
#Count Net totals
NetUpDown_totals = CATdf.groupby('NetUpDown')['NetUpDown'].count()
NetUpDown_totals
#More Positive Days than Negative days
# In[18]:
#Factor Plot of HLcat
sns.catplot('HLcat', data = CATdf, kind = 'count')
# In[19]:
#Count Net totals
HLcat_totals = CATdf.groupby('HLcat')['HLcat'].count()
HLcat_totals
#1 = 0-100 in points swing
#2 = 100 - 250
#3 = 250+
# In[20]:
#Another way to plot histogram of HLdifference
CATdf['HLdifference'].hist(bins = 100)
#Shows numbers of days of HLdifference
# In[434]:
#Guassian curve manufactured for HLdifference
# histogram plot of a low res sample
from numpy.random import seed
from numpy.random import randn
from matplotlib import pyplot
from numpy import exp
from scipy.stats import boxcox
# seed the random number generator
seed(1)
#define data
data2 = CATdf.HLdifference
#power transform
data1 = boxcox(data2, 0)
pyplot.hist(data1)
pyplot.show()
# In[21]:
#Factor Plot of OCcat
sns.catplot('OCcat', data = CATdf, kind = 'count')
# In[22]:
#Count totals of OCcat
OCcat_totals = CATdf.groupby('OCcat')['OCcat'].count()
OCcat_totals
#2 = >100 (very positive day)
#1 = >0 (positive day)
#-1 = >-100 (negative day)
#-2 = <-100 (very negative day)
# In[23]:
#Another way to plot histogram of OCdifference
CATdf['OCdifference'].hist(bins = 100)
#Shows numbers of days of OCdifference
# In[25]:
#Distribution of age, with an overlay of a density plot
volume = CATdf['Volume'].dropna()
volume_dist = sns.distplot(volume)
volume_dist.set_title("Distribution of Trade Volume")
#Bell curve of numbers of days with Volume
# In[26]:
#Guassian curve manufactured for Volume
# histogram plot of a low res sample
from numpy.random import seed
from numpy.random import randn
from matplotlib import pyplot
from numpy import exp
from scipy.stats import boxcox
# seed the random number generator
seed(1)
#define data
data2 = CATdf.Volume
#power transform
data1 = boxcox(data2, 0)
pyplot.hist(data1)
pyplot.show()
# In[27]:
#Plot Trade HLDifference Over Time (volatility)
import datetime
X = pd.to_datetime(CATdf.Date)
y = CATdf.HLdifference
#plot
plt.plot(X,y)
# In[28]:
#Plot Trade OCdifference Over Time
import datetime
X = pd.to_datetime(CATdf.Date)
y = CATdf.OCdifference
#plot
plt.plot(X,y)
plt.gcf().autofmt_xdate()
plt.show()
# In[29]:
#Show Close over Time
import datetime
X = pd.to_datetime(CATdf.Date)
y = CATdf.Close
#plot
plt.plot(X,y)
plt.gcf().autofmt_xdate()
plt.show()
# In[32]:
#Linear Plot of Volume and HLcat on Market Up/Down
#illustrates low volatilty days more liekly to finish net positive
#also higher volume on low and high volatility days more likely to finish net positive
sns.lmplot('Volume', 'NetUpDown', data=CATdf, hue = 'HLcat')
# In[33]:
#Graph DJIA Close with HLdiffernce and Volume for insight
index = pd.read_csv('djia_df_cat.csv')
index.Date = pd.to_datetime(index.Date)
plt.figure(figsize=(10,8))
plt.plot(index.Date, index.Close,label = "DJIA closing price");
plt.plot(index.Date, index.HLdifference*10,label = "HLDifference"); #scale volume for readability
plt.plot(index.Date, index.Volume/100000, label = "Volume");
plt.legend();
plt.title("DJIA stocks");
# In[34]:
#BEGIN MODELING
#split data set train/test
train = dataCAT[dataCAT['Date'] < '2015-01-01']
test = dataCAT[dataCAT['Date'] > '2014-12-31']
# In[35]:
train.describe()
# In[36]:
test.describe()
# In[43]:
#Process of breaking down headlines into CountVector array below
example = train.iloc[0,17]
print(example)
# In[44]:
#Make all lowercase
example2 = example.lower()
print(example2)
# In[45]:
#Split words using CountVectorizer
example3 = CountVectorizer().build_tokenizer()(example2)
print(example3)
# In[46]:
#Remove Stop Words
example4 = [word for word in example3 if word not in stopwords.words('english')]
# In[47]:
print(example4)
# In[435]:
#Illustration of One-Hot Encoding used to create an array (example with stop words)
vectorEX = CountVectorizer()
EX = vectorEX.fit_transform(example3)
print(EX.toarray())
# In[48]:
#Illustration of One-Hot Encoding used to create an array and later help remove stop words (example without stop words)
vectorEX = CountVectorizer()
EX = vectorEX.fit_transform(example4)
print(EX.toarray())
# In[49]:
#Islotaed Words and Count the Number of times they appear
pd.DataFrame([[x,example4.count(x)] for x in set(example4)], columns = ['Word', 'Count'])
# In[437]:
#Calculate the values 0.25 quantile and 0.75 quantile of Stock Market Data
DJIAdf.quantile([0.25, 0.75])
# In[219]:
#Define Different Algorithms Below for later Modeling
#Random Forest
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=300, criterion='gini',
max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None,
bootstrap=True, oob_score=True, n_jobs=1, random_state=1,
verbose=0, warm_start=False, class_weight=None)
# In[220]:
#KNN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=10, weights='uniform',
algorithm='auto', leaf_size=30, p=2,
metric='minkowski', metric_params=None, n_jobs=None)
# In[221]:
#Multi-layer Perceptron classifier
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto',
beta_1=0.9, beta_2=0.999, early_stopping=False,
epsilon=1e-08, hidden_layer_sizes=(5,2), learning_rate='constant',
learning_rate_init=0.001, max_iter=300, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
solver='adam', tol=0.001, validation_fraction=0.1, verbose=False,
warm_start=False)
# In[222]:
#C-Support Vector Classification
import sklearn.svm as svm
sv = svm.LinearSVC(penalty='l2', loss='squared_hinge',
dual=True, tol=0.0001, C=1.0, multi_class='ovr',
fit_intercept=True, intercept_scaling=1,
class_weight=None, verbose=0, random_state=1, max_iter=1000)
# In[56]:
#Use for loop to iterate through each row of the dataset
#combine all headlines into a single string
trainheadlines = []
for row in range(0,len(train.index)):
trainheadlines.append(' '.join(str(x) for x in train.iloc[row,11:35]))
# In[58]:
#add that string to the list we need for CountVectorizer
onewordvector = CountVectorizer()
onewordtrain = onewordvector.fit_transform(trainheadlines)
print(onewordtrain.shape)
#show array of number of rows and total number of unique words in trainheadlines
# In[60]:
#Train a logistic Regression model
#name the model then fit the model based on X and Y values
#Sub LogisticRegression() with different defined algo for comapring results between algos
onewordmodel = LogisticRegression()
onewordmodel = onewordmodel.fit(onewordtrain, train["NetUpDown"]) #Also change y-value here for comparing other
#classification categories
# In[61]:
#repeat steps used to prep training data
#predict whether the DJIA increased or decreased for each day in test dataset
testheadlines = []
for row in range(0,len(test.index)):
testheadlines.append(' '.join(str(x) for x in test.iloc[row,11:35]))
# In[62]:
#add that string to the list we need for CountVectorizer
onewordtest = onewordvector.transform(testheadlines)
predictions = onewordmodel.predict(onewordtest)
# In[63]:
print(onewordtest.shape)
# In[64]:
#Look at predictions using crosstab
pd.crosstab(test["NetUpDown"], predictions, rownames=["Actual"], colnames=["Predicted"])
# In[65]:
#Show accuracy
#Be sure to label correct y-value being tested
acc1 = accuracy_score(test['NetUpDown'], predictions)
print('One Word Model Accuracy: ', acc1)
# In[68]:
#Identify the Top 10 Positive and Negative coefficients
#Bag of Words
#For LogisticRegression() only
onewordwords = onewordvector.get_feature_names()
onewordcoeffs = onewordmodel.coef_.tolist()[0]
coeffdf = pd.DataFrame({'Word' : onewordwords,
'Coefficient' : onewordcoeffs})
coeffdf = coeffdf.sort_values(['Coefficient', 'Word'], ascending = [0, 1])
#positive words
coeffdf.head(10)
# In[69]:
#Negative words
coeffdf.tail(10)
# In[438]:
#Two-Word Modeling, using words paired together
#n-gram model, n = length of sequence of words to be counted
#n = 2 model
twowordvector = CountVectorizer(ngram_range = (2,2))
twowordtrain = twowordvector.fit_transform(trainheadlines)
# In[439]:
#view data
print(twowordtrain.shape)
#Shows an new array with two-word combinations (now 355,342)
# In[73]:
#Name and fit Model Two Word Model
#Sub LogisticRegression() with different defined algo for comapring results between algos
twowordmodel = LogisticRegression()
twowordmodel = twowordmodel.fit(twowordtrain, train["NetUpDown"])
#transfrom test data
testheadlines = []
for row in range(0, len(test.index)):
testheadlines.append(' '.join(str(x) for x in test.iloc[row, 11:35]))
twowordtest = twowordvector.transform(testheadlines)
twowordpredictions = twowordmodel.predict(twowordtest)
# In[74]:
#Cross tab results
pd.crosstab(test["NetUpDown"], twowordpredictions, rownames = ["Actual"], colnames=["Predicted"])
# In[75]:
#Show accuracy
acc2 = accuracy_score(test['NetUpDown'], twowordpredictions)
print('Two Word Model accuracy: ', acc2)
# In[76]:
#Word Pairing coefficients
twowordwords = twowordvector.get_feature_names()
twowordcoeffs = twowordmodel.coef_.tolist()[0]
twowordcoeffdf = pd.DataFrame({'Words' : twowordwords,
'Coefficient' : twowordcoeffs})
twowordcoeffdf = twowordcoeffdf.sort_values(['Coefficient', 'Words'], ascending=[0, 1])
#Positive Word Pairings
twowordcoeffdf.head(10)
# In[77]:
#Negative Word Pairings
twowordcoeffdf.tail(10)
# In[78]:
#Three Word Modeling
#n-gram model, n = length of sequence of words to be counted
#n = 3 model
threewordvector = CountVectorizer(ngram_range = (3,3))
threewordtrain = threewordvector.fit_transform(trainheadlines)
# In[80]:
#view data
print(threewordtrain.shape)
#589,589 unique three-word combinations
# In[81]:
#Name and fit Model Three Word Model
threewordmodel = LogisticRegression()
threewordmodel = threewordmodel.fit(threewordtrain, train["NetUpDown"])
#transfrom test data
testheadlines = []
for row in range(0, len(test.index)):
testheadlines.append(' '.join(str(x) for x in test.iloc[row, 11:35]))
threewordtest = threewordvector.transform(testheadlines)
threewordpredictions = threewordmodel.predict(threewordtest)
# In[82]:
#Cross tab results
pd.crosstab(test["NetUpDown"], threewordpredictions, rownames = ["Actual"], colnames=["Predicted"])
# In[83]:
#Show accuracy
acc3 = accuracy_score(test['NetUpDown'], threewordpredictions)
print('Three Word Model accuracy: ', acc3)
# In[84]:
#Three Word coefficients
threewordwords = threewordvector.get_feature_names()
threewordcoeffs = threewordmodel.coef_.tolist()[0]
threewordcoeffdf = pd.DataFrame({'Words' : threewordwords,
'Coefficient' : threewordcoeffs})
threewordcoeffdf = threewordcoeffdf.sort_values(['Coefficient', 'Words'], ascending=[0, 1])
#Positive Words
threewordcoeffdf.head(10)
# In[85]:
#Negative words
threewordcoeffdf.tail(10)
# In[86]:
#Model for OCcat (Open/Close category: -2, -1, 1, 2)
#Showing Most accurate model
#Two-Word Modeling, using words paired together
#n-gram model, n = length of sequence of words to be counted
#n = 2 model
twowordvector = CountVectorizer(ngram_range = (2,2))
twowordtrain = twowordvector.fit_transform(trainheadlines)
# In[87]:
#view data
print(twowordtrain.shape)
# In[88]:
#Name and fit Model
#Using Support Vector on new y-variable OCcat
twowordmodel = sv
twowordmodel = twowordmodel.fit(twowordtrain, train["OCcat"])
#transfrom test data
testheadlines = []
for row in range(0, len(test.index)):
testheadlines.append(' '.join(str(x) for x in test.iloc[row, 11:35]))
twowordtest = twowordvector.transform(testheadlines)
twowordpredictions = twowordmodel.predict(twowordtest)
# In[89]:
#Cross tab results
pd.crosstab(test["OCcat"], twowordpredictions, rownames = ["Actual"], colnames=["Predicted"])
# In[90]:
#Show accuracy for OCcat Prediction
acc2 = accuracy_score(test['OCcat'], twowordpredictions)
print('Two Word Model accuracy on OCcat: ', acc2)
# In[440]:
#Model for HLcat (High/Low category measuring volatility 1, 2, 3)
#Showing most accuracte HLcat model
onewordvector = CountVectorizer()
onewordtrain = onewordvector.fit_transform(trainheadlines)
print(onewordtrain.shape)
#shows total number of different words (31,122)
# In[441]:
#Train one word model on HLcat using Random Forest (Best Performing Algo)
onewordmodel = rf
onewordmodel = onewordmodel.fit(onewordtrain, train["HLcat"])
# In[442]:
#repeat steps used to prep training data
#predict whether the DJIA increased or decreased for each day in test dataset
testheadlines = []
for row in range(0,len(test.index)):
testheadlines.append(' '.join(str(x) for x in test.iloc[row,11:35]))
# In[443]:
#add that string to the list we need for CountVectorizer
onewordtest = onewordvector.transform(testheadlines)
predictions = onewordmodel.predict(onewordtest)
# In[444]:
#Look at predictions using crosstab
pd.crosstab(test["HLcat"], predictions, rownames=["Actual"], colnames=["Predicted"])
# In[445]:
#Show accuracy
#Be sure to label correct y-value being tested
acc1 = accuracy_score(test['HLcat'], predictions)
print('One Word Model Accuracy HLcat: ', acc1)
# In[446]:
#Show Two Word Model for HLcat using SVM
#Showing second most accurate HLcat model
twowordvector = CountVectorizer(ngram_range = (2,2))
twowordtrain = twowordvector.fit_transform(trainheadlines)
# In[450]:
#Two word Model SVM on new y-variable HLcat
twowordmodel = sv
twowordmodel = twowordmodel.fit(twowordtrain, train["HLcat"])
#transfrom test data
testheadlines = []
for row in range(0, len(test.index)):
testheadlines.append(' '.join(str(x) for x in test.iloc[row, 11:35]))
twowordtest = twowordvector.transform(testheadlines)
twowordpredictions = twowordmodel.predict(twowordtest)
# In[451]:
#Cross tab results
pd.crosstab(test["HLcat"], twowordpredictions, rownames = ["Actual"], colnames=["Predicted"])
# In[452]:
#Show accuracy for HLcat Prediction
acc2 = accuracy_score(test['HLcat'], twowordpredictions)
print('Two Word Model accuracy on HLcat: ', acc2)
# In[ ]:
##MODELING WITHOUT STOP WORDS
# In[453]:
#Total number of stop words
print(len(stopwords.words('english')))
# In[454]:
#All the stop words in 'english'
print((stopwords.words('english')))
# In[101]:
#Remove Stop Words from trainheadlines
def stopremovedheadlines(trainheadlines1):
trainheadlines1 = [CountVectorizer(lowercase = True).build_tokenizer()(line) for line in trainheadlines]
trainheadlines2 = []
for line in trainheadlines1:
temp = []
for word in line:
temp.append(word.lower())
trainheadlines2.append(temp)
nostopwords = []
counter = 0
for line in trainheadlines2:
#if counter % 100 == 0: print(counter)
temp = []
for word in line:
if word not in stopwords.words('english'):
temp.append(word)
new = ' '.join(temp)
nostopwords.append(new)
counter += 1
return nostopwords
# In[148]:
#Define trainheadlines with no stop words
trainheadlinesNOSTOP = stopremovedheadlines(trainheadlines)
# In[455]:
#Confirm correct length for train set 1611
print(len(trainheadlinesNOSTOP))
# In[456]:
#add that string to the list we need for CountVectorizer
onewordvector = CountVectorizer()
onewordtrain = onewordvector.fit_transform(trainheadlinesNOSTOP)
#confirm numbers of rows 1611
#140, the number of stop words removed from original 31,122
#30982
print(onewordtrain.shape)
# In[457]:
#repeat steps used to prep training data
#predict whether the DJIA increased or decreased for each day in test dataset
testheadlines = []
for row in range(0,len(test.index)):
testheadlines.append(' '.join(str(x) for x in test.iloc[row,11:35]))
# In[458]:
#Remove Stop Words from testheadlines
def stopremovedheadlines1(testheadlines):
testheadlines1 = [CountVectorizer(lowercase = True).build_tokenizer()(line) for line in testheadlines]
testheadlines2 = []
for line in testheadlines1:
temp = []
for word in line:
temp.append(word.lower())
testheadlines2.append(temp)
nostopwords = []
counter = 0
for line in testheadlines2:
#if counter % 100 == 0: print(counter)
temp = []
for word in line:
if word not in stopwords.words('english'):
temp.append(word)
new = ' '.join(temp)
nostopwords.append(new)
counter += 1
return nostopwords
# In[158]:
#Define testheadlines with no stop words
testheadlinesNOSTOP = stopremovedheadlines1(testheadlines)
# In[159]:
print(len(testheadlinesNOSTOP))
# In[465]:
#One Word No Stop Words Model
#Sub LogisticRegression() with different defined algo for comapring results between algos
onewordmodel = LogisticRegression()
onewordmodel = onewordmodel.fit(onewordtrain, train["NetUpDown"]) #Also change y-value here for comparing other
#classification categories
# In[466]:
#add that string to the list we need for CountVectorizer
onewordtest = onewordvector.transform(testheadlinesNOSTOP)
predictions = onewordmodel.predict(onewordtest)
# In[467]:
print(onewordtest.shape)
#shows 30,982 unique words
# In[468]:
#Look at predictions using crosstab
pd.crosstab(test["NetUpDown"], predictions, rownames=["Actual"], colnames=["Predicted"])
# In[470]:
#Show accuracy
#Be sure to label correct y-value being tested
acc1 = accuracy_score(test['NetUpDown'], predictions)
print('One Word Model Accuracy: ', acc1)
# In[471]:
#Identify the Top 10 Positive and Negative coefficients
#Bag of Words
#For LogisticRegression() only
onewordwords = onewordvector.get_feature_names()
onewordcoeffs = onewordmodel.coef_.tolist()[0]
coeffdf = pd.DataFrame({'Word' : onewordwords,
'Coefficient' : onewordcoeffs})
coeffdf = coeffdf.sort_values(['Coefficient', 'Word'], ascending = [0, 1])
#positive word
coeffdf.head(10)
# In[472]:
#Negative word
coeffdf.tail(10)
# In[473]:
#Two-Word Modeling no Stop Words
#n-gram model, n = length of sequence of words to be counted
#n = 2 model
twowordvector = CountVectorizer(ngram_range = (2,2))
twowordtrain = twowordvector.fit_transform(trainheadlinesNOSTOP)
# In[474]:
#view data
print(twowordtrain.shape)
#354,664 unique two-word combinations
# In[475]:
#Name and fit Model
#Sub LogisticRegression() with different defined algo for comapring results between algos
twowordmodel = LogisticRegression()
twowordmodel = twowordmodel.fit(twowordtrain, train["NetUpDown"])
#transfrom test data
testheadlines = []
for row in range(0, len(test.index)):
testheadlines.append(' '.join(str(x) for x in test.iloc[row, 11:35]))
twowordtest = twowordvector.transform(testheadlinesNOSTOP)
twowordpredictions = twowordmodel.predict(twowordtest)
# In[476]:
#Cross tab results
pd.crosstab(test["NetUpDown"], twowordpredictions, rownames = ["Actual"], colnames=["Predicted"])
# In[477]:
#Show accuracy
acc2 = accuracy_score(test['NetUpDown'], twowordpredictions)
print('Two Word Model accuracy: ', acc2)
# In[478]:
#Word Pairing coefficients
twowordwords = twowordvector.get_feature_names()
twowordcoeffs = twowordmodel.coef_.tolist()[0]
twowordcoeffdf = pd.DataFrame({'Words' : twowordwords,
'Coefficient' : twowordcoeffs})
twowordcoeffdf = twowordcoeffdf.sort_values(['Coefficient', 'Words'], ascending=[0, 1])
#Positive Word Pairings
twowordcoeffdf.head(10)
# In[479]:
#Negative Word Pairings
twowordcoeffdf.tail(10)
# In[381]:
#Three Word Modeling No Stop Words
#n-gram model, n = length of sequence of words to be counted
#n = 3 model
threewordvector = CountVectorizer(ngram_range = (3,3))
threewordtrain = threewordvector.fit_transform(trainheadlinesNOSTOP)
# In[382]:
#view data
print(threewordtrain.shape)
#441,541 unique variables representing three-word combinations
# In[480]:
#Name and fit Model
threewordmodel = LogisticRegression()
threewordmodel = threewordmodel.fit(threewordtrain, train["NetUpDown"])
#transfrom test data
testheadlines = []
for row in range(0, len(test.index)):
testheadlines.append(' '.join(str(x) for x in test.iloc[row, 11:35]))
threewordtest = threewordvector.transform(testheadlinesNOSTOP)
threewordpredictions = threewordmodel.predict(threewordtest)
# In[481]:
#Cross tab results
pd.crosstab(test["NetUpDown"], threewordpredictions, rownames = ["Actual"], colnames=["Predicted"])
# In[482]:
#Show accuracy
acc3 = accuracy_score(test['NetUpDown'], threewordpredictions)
print('Three Word Model accuracy: ', acc3)
# In[483]:
#Word Pairing coefficients
threewordwords = threewordvector.get_feature_names()
threewordcoeffs = threewordmodel.coef_.tolist()[0]
threewordcoeffdf = pd.DataFrame({'Words' : threewordwords,
'Coefficient' : threewordcoeffs})
threewordcoeffdf = threewordcoeffdf.sort_values(['Coefficient', 'Words'], ascending=[0, 1])
#Positive Word Pairings
threewordcoeffdf.head(15)
# In[484]:
#Negative word pairings
threewordcoeffdf.tail(10)
# In[500]:
#One Word No Stop Words Model
#Showing Most accurate HLcat model
#Using Multi-Layer Perceptron Model
#Sub LogisticRegression() with different defined algo for comapring results between algos
onewordmodel = mlp
onewordmodel = onewordmodel.fit(onewordtrain, train["HLcat"]) #Also change y-value here for comparing other
#classification categories
# In[501]:
#add that string to the list we need for CountVectorizer
onewordtest = onewordvector.transform(testheadlinesNOSTOP)
predictions = onewordmodel.predict(onewordtest)
# In[502]:
print(onewordtest.shape)
#shows 30,982 unique words
# In[503]:
#Look at predictions using crosstab
pd.crosstab(test["HLcat"], predictions, rownames=["Actual"], colnames=["Predicted"])
# In[504]:
#Show accuracy
#Be sure to label correct y-value being tested
acc1 = accuracy_score(test['HLcat'], predictions)
print('One Word Model Accuracy: ', acc1)
# In[505]:
#Two Word Model no Stop Words
#Shows most accurate OCcat model
#Using Multi-Layer Perceptron Algo
#Sub LogisticRegression() with different defined algo for comapring results between algos
twowordmodel = mlp
twowordmodel = twowordmodel.fit(twowordtrain, train["OCcat"])
#transfrom test data
testheadlines = []
for row in range(0, len(test.index)):
testheadlines.append(' '.join(str(x) for x in test.iloc[row, 11:35]))
twowordtest = twowordvector.transform(testheadlinesNOSTOP)
twowordpredictions = twowordmodel.predict(twowordtest)
# In[506]:
#Cross tab results
pd.crosstab(test["OCcat"], twowordpredictions, rownames = ["Actual"], colnames=["Predicted"])
# In[507]:
#Show accuracy
acc2 = accuracy_score(test['OCcat'], twowordpredictions)
print('Two Word Model accuracy: ', acc2)
| rbarrow2727/stockheadlines | Visualization_and_Modeling_for_Stock_Market_News_Headline_Classification_v5.py | Visualization_and_Modeling_for_Stock_Market_News_Headline_Classification_v5.py | py | 27,699 | python | en | code | 0 | github-code | 36 |
6372282633 | from fastapi import Security, HTTPException, status
from fastapi.security.api_key import APIKeyHeader
from app.db.session import SessionLocal
from app.core.settings import API_KEY_NAME, API_KEY
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
def get_api_key(
api_key_header: str = Security(api_key_header),
):
if api_key_header == API_KEY:
return api_key_header
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED,
detail="Permission denied")
response_codes = {
401: {
"description": "Unauthorized",
"content": {
"application/json": {
"example": {"detail": "string"}
}
}
},
404: {
"description": "Not Found",
"content": {
"application/json": {
"example": {"detail": "string"}
}
}
},
}
| GrupoX-FIUBA/users-service | app/endpoints/base.py | base.py | py | 1,007 | python | en | code | 0 | github-code | 36 |
19294329942 | # server.py - Main server file
# Imports
import paho.mqtt.client as mqtt
import requests
import recommender
import json
import threading
import queue
import config
# Constants for MQTT
TEST_FEED = "charliemm/feeds/test-feed"
RECOMMENDATIONS_FEED = "charliemm/feeds/project.recommendations"
UPDATES_FEED = "charliemm/feeds/project.updates"
STATUS_FEED = "charliemm/feeds/project.status"
# Queue for incoming messages
message_queue = queue.Queue()
# Callback for when server connects to the MQTT broker
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribe to all feeds
client.subscribe("$SYS/#")
client.subscribe(TEST_FEED, qos=2)
client.subscribe(RECOMMENDATIONS_FEED, qos=2)
client.subscribe(UPDATES_FEED, qos=2)
client.subscribe(STATUS_FEED, qos=2)
# Test function for sending and receiving simple messages
def mqtt_test(msg):
print("Running mqtt_test()")
print(str(msg))
return
# Handle messages containing a list of new visitors
def visitor_update(msg):
print("Running visitor_update()")
print(str(msg))
location = msg[0]
num_vis = msg[1]
if int(num_vis) == 0:
print("No visitors to update")
return
else:
vis_list = ''
for i in range(int(num_vis) - 1):
vis_list += msg[i + 2] + ','
vis_list += msg[-1]
# Create JSON string and POST to Sheets
msg_info = {
'type' : "0",
'location' : location,
'numvis' : num_vis,
'visitors' : vis_list,
}
post_visitor_update_Sheets(msg_info)
return
# POSTs visitor update to Sheets
def post_visitor_update_Sheets(data):
url_data = config.DB_URL + "?type=" + data['type'] + "&location=" + data['location'] + "&numvis=" + data['numvis'] + "&visitors=" + data['visitors']
r = requests.post(url_data, data=json.dumps(data))
print(r.status_code)
# Handle messages with user ratings
def visitor_recommendation(msg):
print("Running visitor_recommendation()")
# Update Sheets with new ratings
print("Updating user ratings")
user_info = {
'type' : "1",
'location' : msg[0],
'user' : msg[1],
'rating' : msg[2],
'context' : msg[3],
}
post_rating_Sheets(user_info)
# GET user's ratings from Sheets
print("Fetching recommendations for user", user_info['user'], "at device", user_info['location'])
user_rats = get_user_Sheets(user_info['user'])
# Get recommendation from recommender system
print("Generating recommendations")
recs = recommender.get_recommendations(user_rats[1:], user_info['context'])
out_msg = user_info['location'] + "," + user_info['user']
for i in recs:
out_msg += "," + str(i)
# Publish message with recommendations to MQTT broker
print("Sending recommendations")
client.publish(RECOMMENDATIONS_FEED, payload=out_msg, qos=1, retain=False)
return
# GETs user information from Sheets
def get_user_Sheets(user):
data = {
'user' : user,
}
url_data = config.DB_URL + "?user=" + data['user']
r = requests.get(url_data, params=json.dumps(data))
user_rats = r.text
user_rats = list(user_rats.split(","))
# If user has no rating for a location, set to 0
for i in range(len(user_rats) - 1):
if user_rats[i + 1] == '':
user_rats[i + 1] = 0
else:
user_rats[i + 1] = int(user_rats[i + 1])
return user_rats
# POSTs user ratings to Sheets
def post_rating_Sheets(data):
url_data = config.DB_URL + "?type=" + data['type'] + "&user=" + data['user'] + "&rating=" + data['rating'] + "&location=" + data['location']
r = requests.post(url_data, data=json.dumps(data))
print(r.status_code)
# Handle messages regarding location status
def status_update(msg):
print("Running status_update()")
print(str(msg))
return
# Message handler
def message_handler(msg):
print("Running message_handler()")
msg_in = msg.payload.decode().split(",")
if(msg.topic == TEST_FEED):
mqtt_test(msg_in[1:])
elif(msg.topic == RECOMMENDATIONS_FEED):
visitor_recommendation(msg_in[1:])
elif(msg.topic == UPDATES_FEED):
visitor_update(msg_in[1:])
elif(msg.topic == STATUS_FEED):
status_update(msg_in[1:])
return
# Callback for when a message is received from the MQTT broker
# Function called depends on topic published to
def on_message(client, userdata, msg):
msg_in = msg.payload.decode().split(",")
if (msg_in[0] == "S"):
message_queue.put(msg)
# Connect to Adafruit IO MQTT broker
client = mqtt.Client(client_id="Server")
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(config.MQTT_USER, password=config.MQTT_PASS)
client.connect(config.MQTT_BROKER, port=1883)
# Start MQTT client, runs indefinetely
client.loop_start()
while True:
if not message_queue.empty():
msg = [message_queue.get()]
threading.Thread(target=message_handler, args=(msg), daemon=True).start() | maguic11/WhereNext | server/server.py | server.py | py | 5,092 | python | en | code | 0 | github-code | 36 |
22782805173 | #! /usr/bin/env python3
import boto3
import logging
from botocore.exceptions import ClientError
from regionslist import regions
session = boto3.Session(profile_name='temp')
def deployStack(region):
temp_cfnclient = session.client('cloudformation', region_name=region)
response = temp_cfnclient.create_stack(
StackName='desi-stack',
ResourceTypes=[
'AWS::*'
],
TemplateURL='https://desire-cfn-templates.s3.amazonaws.com/s3bucket.json'
)
if (response):
print("Stack created successfuly!")
for region in regions:
try:
deployStack(region)
except ClientError as e:
logging.error(e)
| desiby/stelligent-u | 01-cloudformation/1.3.2/deploys3.py | deploys3.py | py | 695 | python | en | code | 0 | github-code | 36 |
26067827949 | import torch.utils.data as data
import numpy as np
import os
import torch
import time
import data_reader
from PIL import Image
def collate_fn(batch):
inputs = batch[0][0];
gt = batch[0][1];
noise_std = batch[0][2]
for index in range(1,len(batch)):
inputs = torch.cat((inputs,batch[index][0]),0);
gt = torch.cat((gt,batch[index][1]),0);
noise_std = torch.cat((noise_std,batch[index][2]),0);
return inputs,gt,noise_std;
class dataSet(data.Dataset):
def __init__(self,args):
super(dataSet,self).__init__();
self.args = args;
self.flist = args.flist;
self.pathlist = self.get_file_list();
self.Random = args.Random;
self.Evaluate = args.Evaluate;
self.size = (args.size,args.size);
self.reader = data_reader.data_reader(args,input_type = args.input_type, gt_type = args.gt_type);
def __getitem__(self,index):
data_time_start = time.time();
paths = self.pathlist[index][:-1].split();
input_path = paths[0];
gt_path = paths[1];
inputs,noise_std = self.reader.input_loader(input_path);
gt = self.reader.gt_loader(gt_path);
inputs_final = inputs.transpose(2,0,1);
inputs_final = np.expand_dims(inputs_final,axis = 0);
gt_final = gt.transpose(2,0,1);
gt_final = np.expand_dims(gt_final,axis = 0);
noise_map = np.zeros((self.args.GET_BATCH,1,self.args.size,self.args.size));
noise_map[:,:,:,:] = noise_std;
if self.Random :
inputs_final = np.zeros((self.args.GET_BATCH ,4,self.args.size,self.args.size));
if self.args.gt_type == 'DNG_RAW':
gt_final = np.zeros((self.args.GET_BATCH,4,self.args.size,self.args.size));
else:
gt_final = np.zeros((self.args.GET_BATCH,3,self.args.size*2,self.args.size * 2));
for read_index in range(self.args.GET_BATCH):
tmp_input,tmp_gt = self.reader.RandomCrop(self.size,inputs,gt);
tmp_input,tmp_gt = self.reader.RandomFLipH(tmp_input,tmp_gt);
tmp_input,tmp_gt = self.reader.RandomFlipV(tmp_input,tmp_gt);
tmp_input,tmp_gt = self.reader.RandomTranspose(tmp_input,tmp_gt);
inputs_final[read_index] =tmp_input.transpose(2,0,1).copy();
gt_final[read_index] = tmp_gt.transpose(2,0,1).copy();
if self.args.gt_type == 'DNG_RAW':
inputs_final = self.reader.unpack_raw_single(inputs_final);
else:
inputs_final = self.reader.unpack_raw(inputs_final);
gt_final = self.reader.unpack_raw_single(gt_final);
inputs_final = torch.FloatTensor(inputs_final);
gt_final = torch.FloatTensor(gt_final);
data_time_end = time.time();
noise_map = torch.FloatTensor(noise_map);
return inputs_final,gt_final,noise_map;
def __len__(self):
return len(self.pathlist);
def get_file_list(self):
datafile = open(self.flist);
content = datafile.readlines();
datafile.close();
return content ;
| eedalong/ISP_Demosaic | demosaicnet_src/datasets.py | datasets.py | py | 3,126 | python | en | code | 1 | github-code | 36 |
22403507258 | from SudokuBoard import SudokuBoard, INDEXES_PAIRS, get_random_indexes
# CONSTANTS
NUMBER_OF_ROWS = 9
NUMBER_OF_COLUMNS = 9
class SudokuGame:
def __init__(self):
self.board = SudokuBoard()
self.playing_board = SudokuBoard()
# testing
self.board.fill_board()
pass
def set_play_board(self, number_of_filled_cells=36):
"""gets a number of cells to reveal and sets the playing borad accordingly using the solved board """
cells_indexes = get_random_indexes(number_of_filled_cells)
for index_pair in INDEXES_PAIRS:
if index_pair in cells_indexes:
self.playing_board.board[index_pair[0]][index_pair[1]] = self.board.board[index_pair[0]][index_pair[1]]
def is_game_over(self):
"""checks if playing board is full and returns True or False accordingly"""
for row in range(NUMBER_OF_ROWS):
for col in range(NUMBER_OF_COLUMNS):
if self.playing_board.board[row][col] == 0:
return False
return True
| ofirmeir/Sela_Sudoku | SudokuGame.py | SudokuGame.py | py | 1,069 | python | en | code | 0 | github-code | 36 |
5061905616 | from skimage import io, transform
import torch
import os
import cv2
from torchvision import transforms, datasets, utils as vutils
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
class WormClassifier(nn.Module):
def __init__(self, dim=64):
super(WormClassifier, self).__init__()
self.conv1_1 = nn.Conv2d(1, 12, 5, 1, padding=2)
self.conv1_2 = nn.Conv2d(12, 12, 5, 1, padding=2)
self.conv2_1 = nn.Conv2d(12, 24, 5, 1, padding=2)
self.conv2_2 = nn.Conv2d(24, 24, 5, 1, padding=2)
self.conv3_1 = nn.Conv2d(24, 36, 5, 1, padding=2)
self.conv3_2 = nn.Conv2d(36, 36, 5, 1, padding=2)
self.conv4_1 = nn.Conv2d(36, 48, 5, 1, padding=2)
self.conv4_2 = nn.Conv2d(48, 48, 5, 1, padding=2)
self.fc1 = nn.Linear(768, 256)
self.fc2 = nn.Linear(256, 1)
def forward(self, x, features=False):
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv4_1(x))
x = F.relu(self.conv4_2(x))
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x_features = self.fc2(x)
x = torch.sigmoid(self.fc2(x_features))
if features:
return x_features
return x
class WormDataLoader(Dataset):
def __init__(self, path):
self.path = path
self.img_names = os.listdir(path)
self.remove_ds()
self.data_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Grayscale(num_output_channels=1),
transforms.Resize((64, 64)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomVerticalFlip(p=0.5),
transforms.ToTensor(),
# transforms.Normalize((0.5), (0.5))
])
def remove_ds(self):
if '.DS_Store' in self.img_names:
self.img_names.remove('.DS_Store')
def __len__(self):
return len(self.img_names)
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
img_path = os.path.join(self.path, self.img_names[index])
image = io.imread(img_path)
image = cv2.normalize(image, image, 0, 255, cv2.NORM_MINMAX)
image = self.transform(image) | paolobif/wormifier | models.py | models.py | py | 2,595 | python | en | code | 0 | github-code | 36 |
5424410088 | import frappe
from frappe import _
from frappe.model.document import Document
class UpgradationAndStatusChange(Document):
def validate(self):
pass
#if self.transfer_date > frappe.utils.nowdate():
# frappe.throw("Future date not allowed")
def on_submit(self):
new_school = frappe.get_doc("School", self.school)
for data in self.school_transfer:
if data.property=="Level":
new_school.level = data.new
if data.property=="School Shift":
new_school.shift = data.new
if data.property=="School Gender":
new_school.gender = data.new
if data.property=="School Status":
new_school.status = data.new
if self.new_school_name:
new_school.school_name = self.new_school_name
new_school.save()
| hamza0342/semis | semis/semis/doctype/upgradation_and_status_change/upgradation_and_status_change.py | upgradation_and_status_change.py | py | 730 | python | en | code | 0 | github-code | 36 |
34323690653 | #scan port using nmap
import nmap
import sys
import argparse
def port_scanner(network):
print("Nmap scanning on " + network + " ...\n")
scan_nmap = nmap.PortScanner()
res = scan_nmap.scan(hosts=network, arguments='-sn')
print(res["nmap"])
print("there was " + res["nmap"]["scanstats"]["totalhosts"] + " hosts scanned and " + res["nmap"]["scanstats"]["uphosts"] + " were up")
print()
for result in res["scan"]:
print("The machine " + result + " is " + res["scan"][result]["status"]["state"])
def parseArgs():
parser = argparse.ArgumentParser(description="network scan for up hosts",
epilog="python3 10.1.10.0/24")
parser.add_argument("network", type=str, help="network mask to scan")
args = parser.parse_args()
return args.network
def main(network):
port_scanner(network)
if __name__ == "__main__":
main(parseArgs())
{'nmap': {'command_line': 'nmap -oX - -sn 192.168.200.129/24', 'scaninfo': {}, 'scanstats': {'timestr': 'Thu Jan 20 11:46:03 2022', 'elapsed': '2.66', 'uphosts': '2', 'downhosts': '254', 'totalhosts': '256'}}, 'scan': {'192.168.200.2': {'hostnames': [{'name': '', 'type': ''}], 'addresses': {'ipv4': '192.168.200.2'}, 'vendor': {}, 'status': {'state': 'up', 'reason': 'conn-refused'}}, '192.168.200.129': {'hostnames': [{'name': '', 'type': ''}], 'addresses': {'ipv4': '192.168.200.129'}, 'vendor': {}, 'status': {'state': 'up', 'reason': 'conn-refused'}}}}
| bonnettheo/python_hacking_tools | ip_scanner.py | ip_scanner.py | py | 1,482 | python | en | code | 0 | github-code | 36 |
29466847233 | #@author: Neil
#2018-09-26
import sys, pygame
from pygame.locals import *
from random import randrange
class Weight(pygame.sprite.Sprite):
def __init__(self, speed):
pygame.sprite.Sprite.__init__(self)
self.speed = speed
# 绘制Sprite对象时要用到的图像和矩形:
self.image = weight_image
self.rect = self.image.get_rect()
self.reset()
def reset(self):
"""
将铅锤移到屏幕顶端的一个随机位置
"""
self.rect.top = -self.rect.height
self.rect.centerx = randrange(screen_size[0])
def update(self):
"""
更新下一帧中的铅锤
"""
self.rect.top += self.speed
if self.rect.top > screen_size[1]:
self.reset()
#初始化
pygame.init()
screen_size = 800, 600
pygame.display.set_mode(screen_size, FULLSCREEN)
pygame.mouse.set_visible(0)
#加载铅锤图像
weight_image = pygame.image.load('weight.png')
weight_image = weight_image.convert() # 以便与显示匹配
speed = 5 # 速度
#创建一个Sprite对象编组,并在其中添加一个Weight实例
sprites = pygame.sprite.RenderUpdates()
sprites.add(Weight(speed))
#获取并填充屏幕表面
screen = pygame.display.get_surface()
bg = (255, 255, 255) # 白色
screen.fill(bg)
pygame.display.flip()
#用于清除Sprite对象
def clear_callback(surf, rect):
surf.fill(bg, rect)
while True:
# 检查退出事件
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN and event.key == K_ESCAPE:
sys.exit()
# 清除以前的位置
sprites.clear(screen, clear_callback)
# 更新所有的Sprite对象
sprites.update()
# 绘制所有的Sprite对象
updates = sprites.draw(screen)
# 更新必要的显示部分
pygame.display.update(updates) | Crazyalltnt/Beginning-Python-3-Projects | 10-Do-It-Yourself Arcade Game/weights.py | weights.py | py | 1,998 | python | zh | code | 5 | github-code | 36 |
70234738344 | from sshtunnel import SSHTunnelForwarder
from concurrent.futures import ProcessPoolExecutor
import pymysql
import pandas as pd
import datetime
import numpy as np
from visits import Visits
def read_from_db(query):
tunnel = SSHTunnelForwarder(
('192.168.2.85', 22),
ssh_username='ubuntu',
ssh_password='password',
remote_bind_address=('0.0.0.0', 3306))
tunnel.start()
connection = pymysql.connect(host='localhost', user='molengo',
passwd='%molengo2019', db='molengo',
port=tunnel.local_bind_port)
df = pd.read_sql(query, connection)
connection.close()
tunnel.stop()
return df
cams = read_from_db("SELECT v.id as video, c.attr \
FROM video_files as v JOIN cameras as c \
ON v.cam_id = c.id")
def analyze_df(nums):
df = read_from_db(f"SELECT time_visit, cluster_id, file_id FROM final_clusters WHERE cluster_id IN ({ ','.join([str(i) for i in nums]) })")
df = pd.merge(df, cams, left_on='file_id', right_on='video')
visits = dict()
for id_ in df.cluster_id.unique():
clstr = df[df.cluster_id == id_].sort_values('time_visit')
visits[id_] = []
prev = clstr.iloc[0]
for i in range(clstr.shape[0] - 1):
curr = clstr.iloc[i, :]
next_ = clstr.iloc[i+1, :]
if next_.time_visit - curr.time_visit >= datetime.timedelta(hours=1) and next_.attr == 'entrance' and curr.attr == 'entrance':
visits[id_].append(
(prev.time_visit, curr.time_visit)
)
prev = next_
if prev.eq(clstr.iloc[0]).all():
visits[id_].append(
(clstr.iloc[0].time_visit, clstr.iloc[-1].time_visit)
)
return [Visits(
cluster_id=int(d[0]),
start=d[1][0].strftime("%y-%m-%d %H-%M-%S"),
end=d[1][1].strftime("%y-%m-%d %H-%M-%S")
)
for d in zip(
visits.keys(),
*visits.values()
)]
def count_visits():
per_proc = 1000
df = read_from_db("SELECT DISTINCT cluster_id FROM final_clusters").values
clusters = np.reshape(df[:df.shape[0] // per_proc * per_proc], (-1, per_proc)).tolist() + df[df.shape[0] // 500 * 500:].tolist()
with ProcessPoolExecutor() as executor:
for result in executor.map(analyze_df, clusters):
print('calculated!!!')
Visits.add_db(result)
return True
if __name__ == '__main__':
res = count_visits()
print(res)
| AlexMuliar/counter | count_visits.py | count_visits.py | py | 2,624 | python | en | code | 0 | github-code | 36 |
6890156052 | import requests
import json
from libs.html_parser import html_to_nodes
def createPage(access_token, image_sources):
"""Create a page on https://telegra.ph/
Accepts a list of image sources and creates a page containing those images.
Returns the url of the page in case of success, False otherwise
"""
# create html skeleton of the page
html_content = ''
if not isinstance(image_sources, list) or len(image_sources) == 0:
raise ValueError('No image sources provided')
for src in image_sources:
html_content = html_content + "<img src='{}' />".format(src)
content_to_be_sent = json.dumps(html_to_nodes(html_content))
page_object = {
"access_token": access_token,
"title": "Title",
"content": content_to_be_sent
}
try:
response = requests.post('https://api.telegra.ph/createPage', data=page_object)
print(response)
response.raise_for_status()
result = json.loads(response.text)
except requests.exceptions.RequestException as e:
raise requests.exceptions.RequestException('Error uploading image: {}'.format(str(e)))
except ValueError as e:
raise ValueError('Error decoding server response: {}'.format(str(e)), e.doc, e.pos)
except Exception as e:
raise Exception('An unexpected error occurred: {}'.format(str(e)))
if not ('error' in result):
return result['result']['url']
else:
raise ValueError(result['error'])
| xareyli/telegraph-uploader | libs/telegraph/create_page.py | create_page.py | py | 1,501 | python | en | code | 0 | github-code | 36 |
11311733424 | import npyscreen
import curses
def simpletest(screen):
SA = npyscreen.Form()
w = npyscreen.Textfield(SA, )
w.value = u'\u00c5 U+00C5 LATIN CAPITAL LETTER A WITH RING ABOVE\n'.encode('utf-8')
w.edit()
w.update()
if __name__ == "__main__":
curses.wrapper(simpletest)
| npcole/npyscreen | utf8-pycurses.py | utf8-pycurses.py | py | 277 | python | en | code | 436 | github-code | 36 |
8640560733 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 18 04:52:01 2019.
@author: jamiesom
"""
import pandas as pd
from electricitylci.globals import data_dir, output_dir
import numpy as np
from electricitylci.eia860_facilities import eia860_generator_info
import re
def generate_power_plant_construction(year):
"""
Function uses an NETL study.
That generated the life cycle inventory for power plant construction using
an economic input output model. Two types of plants are considered: sub-
critical pulverized coal and a natural gas combined cycle plant. The
inventory provided by the study is for an entire plant. This inventory is
divided by the net generation capacity of those plants to place the
inventory on the basis of a MW and then divided by an assumed plant life of
30 years, which is a conservative assumption considering the lifetime of
these plants is typically much longer. These per year/per MW impacts are
mapped to the fossil power generators in the U.S. where they are scaled by
the net generating capacity of the plants (as provided by EIA data). These
impacts are eventually divided by the generation for the year in MWh to
provide the construction impacts on the basis of the functional unit.
Parameters
----------
year : int
Year of EIA data to use to provide net generating capacity
Returns
-------
dataframe
This dataframe provides construction inventory for each power plant
reporting to EIA.
"""
gen_df = eia860_generator_info(year)
gen_columns=[
"plant_id",
"generator_id",
"technology",
"prime_mover",
"nameplate_capacity_mw",
"energy_source_1"
]
energy_sources=[
"NG",'BIT', 'DFO', 'LIG', 'SUB', 'RC', 'KER', 'RFO', 'PC',
'WC'
]
compartment_mapping={
'resource/in ground':"resource",
'resource':"resource",
'resource/in water':"resource",
'resource/in air':"resource",
'air/unspecified':"emission/air",
'resource/land':"resource",
'water/unspecified':"emission/water",
'air/low population density':"emission/air",
'soil/groundwater':"emission/water",
'air/unspecified/2,4':"emission/air",
'soil/unspecified':"emission/soil",
'soil/industrial':"emission/soil",
'soil/unspecified/2,4':"emission/soil",
'water/unspecified/2,4':"emission/water",
'/':"",
'resource/groundwater':"resource",
'resource/surface water':"resource",
'water/surface water':"resource"
}
gas_prime = ["GT","IC","OT","CT","CS","CE","CA","ST"]
coal_type = ["BIT","SUB","LIG","WC","RC"]
gen_df = gen_df.loc[gen_df["energy_source_1"].isin(energy_sources), gen_columns]
gen_df["plant_id"]=gen_df["plant_id"].astype(int)
groupby_cols=["plant_id","technology","energy_source_1","prime_mover"]
gen_df_group = gen_df.groupby(by=groupby_cols,as_index=False)["nameplate_capacity_mw"].sum()
prime_energy_combo=gen_df_group.groupby(by=["prime_mover","energy_source_1"]).size().reset_index().rename(columns={0:'count'})
prime_energy_combo["const_type"]="coal"
gas_const_criteria=(prime_energy_combo["prime_mover"].isin(gas_prime))&(~prime_energy_combo["energy_source_1"].isin(coal_type))
prime_energy_combo.loc[gas_const_criteria,"const_type"]="ngcc"
gen_df_group=gen_df_group.merge(prime_energy_combo[['prime_mover', 'energy_source_1', 'const_type']],
on=["prime_mover","energy_source_1"],
how="left")
inventory = pd.read_csv(f"{data_dir}/plant_construction_inventory.csv",low_memory=False)
inventory = pd.concat([inventory, inventory["Flow"].str.rsplit('/',1,expand=True)],axis=1).drop(columns=["Flow"]).rename(columns={0:"Flow",1:"Unit"})
inventory = pd.concat([inventory, inventory["Flow"].str.rsplit('/',1,expand=True)],axis=1).drop(columns=["Flow"]).rename(columns={0:"Compartment_path",1:"FlowName"})
inventory = pd.concat([inventory,inventory["Compartment_path"].str.split('/',n=1,expand=True)],axis=1).rename(columns={0:"Compartment",1:"delete"}).drop(columns="delete")
scpc_inventory = inventory[['SCPC_550_MW', 'Unit', 'Compartment_path', 'FlowName','Compartment']]
scpc_inventory["const_type"]="coal"
scpc_inventory["stage_code"]="coal_const"
scpc_inventory.rename(columns={"SCPC_550_MW":"FlowAmount"},inplace=True)
scpc_inventory["FlowAmount"]=scpc_inventory["FlowAmount"]/30/550
ngcc_inventory = inventory[['NGCC_630_MW', 'Unit', 'Compartment_path', 'FlowName','Compartment']]
ngcc_inventory["const_type"]="ngcc"
ngcc_inventory["stage_code"]="ngcc_const"
ngcc_inventory.rename(columns={"NGCC_630_MW":"FlowAmount"},inplace=True)
ngcc_inventory["FlowAmount"] = ngcc_inventory["FlowAmount"]/30/630
inventory = pd.concat([scpc_inventory,ngcc_inventory])
inventory["Compartment_path"]=inventory["Compartment_path"].map(compartment_mapping)
inventory["input"]=False
input_list=["resource" in x for x in inventory["Compartment"]]
inventory["input"]=input_list
construction_df = gen_df_group.merge(inventory,on="const_type",how="left")
construction_df["FlowAmount"]=construction_df["FlowAmount"]*construction_df["nameplate_capacity_mw"]
construction_df.rename(columns={"nameplate_capacity_mw":"quantity"},inplace=True)
construction_df.drop(columns=["const_type","energy_source_1","prime_mover"],inplace=True)
construction_df["fuel_type"]="Construction"
construction_df["Unit"]=construction_df["Unit"].str.replace("mj","MJ")
return construction_df
if __name__ == "__main__":
year=2016
df = generate_power_plant_construction(year)
| USEPA/ElectricityLCI | electricitylci/power_plant_construction.py | power_plant_construction.py | py | 5,934 | python | en | code | 23 | github-code | 36 |
31712806303 |
# price and discount is float
price=9.99
discount=0.2
result=price*(1-discount)
print(result)
# String
name="Krishna"
name1="Sai"
print(name)
print(name*2)
print(name+" "+name1)
# excercise of creating variables
var1="hola"
var2="hola"
print(var1+" "+var2)
num1=8
num2=2
print(num1*num2)
# using f-string
name="Mr.Bean"
greeting=f"Hi,{name}"
print(greeting)
# using format
name="Bob"
greeting="Hi,{}"
with_name=greeting.format(name)
with_name_second=greeting.format("Rolf")
print(with_name)
print(with_name_second)
| KrishnaSaiVadlamani/Demo-Application | FlaskWithPython/module2/basicCode.py | basicCode.py | py | 536 | python | en | code | 0 | github-code | 36 |
24184036928 | '''
Created on Jul 16, 2011
@author: Giulio
'''
import logging
from system.network.ServerProxy import ServerProxy
class DummyProxy(ServerProxy):
'''
classdocs
'''
recognizedFuncs = ['shell','send','receive','close']
def __init__(self):
ServerProxy.__init__(self)
self._logger = logging.getLogger(type(self).__name__)
self.logCalls = True
def connected(self):
return True
def _dummyCallable(self, *args):
if self.logCalls:
self._logger.debug('args %s' % str(args))
def __getattr__(self, name):
if name in self.recognizedFuncs:
if self.logCalls:
self._logger.debug('Calling %s' % (name))
return self._dummyCallable
if __name__ == '__main__':
dp = DummyProxy()
dp.shell(2,'kool')
| gbottari/ESSenCe | src/system/network/DummyProxy.py | DummyProxy.py | py | 914 | python | en | code | 0 | github-code | 36 |
74611240104 | #!/usr/bin/env python
import altair_saver
from map_maker_lib import create_topo_data, read_file, validate_data, \
create_plot, DATA_TYPES, DELIMITERS, COLOR_SCHEMES
import streamlit as st
BE_GEO_URL = 'https://gist.githubusercontent.com/jandot/ba7eff2e15a38c6f809ba5e8bd8b6977/raw/eb49ce8dd2604e558e10e15d9a3806f114744e80/belgium_municipalities_topojson.json'
BE_MUNICIPALITIES_FEATURE = 'BE_municipalities'
if __name__ == '__main__':
'''# Map Maker
'''
file_column1, file_column2 = st.beta_columns(2)
with file_column1:
data_file = st.file_uploader('Data file')
with file_column2:
encoding = st.selectbox('File encoding', ('utf-8', 'iso-8859-1'))
delimiter = DELIMITERS[st.selectbox('Data delimiter',
list(DELIMITERS.keys()))]
topo_municipalities = create_topo_data(BE_GEO_URL, BE_MUNICIPALITIES_FEATURE)
if data_file:
file_name = data_file.name
bytes_array = data_file.read()
data = read_file(file_name, bytes_array, encoding=encoding, delimiter=delimiter)
validate_data(data)
data_column1, data_column2, data_column3 = st.beta_columns(3)
with data_column1:
column_name = st.selectbox('Data column',
[column for column in data.columns
if column not in ('nis_code',)])
with data_column2:
data_type = DATA_TYPES[st.selectbox('Data type', list(DATA_TYPES.keys()))]
with data_column3:
color_scheme = st.selectbox('Color scheme', COLOR_SCHEMES)
tooltip_names = st.multiselect('Tooltip columns',
[column for column in data.columns
if column not in ('nis_code',)])
plot = create_plot(topo_data=topo_municipalities, data=data,
column_name=column_name, data_type=data_type,
tooltip_columns=tooltip_names, scheme=color_scheme)
st.write(plot)
| gjbex/Python-for-data-science | source-code/streamlit/map_maker/map_maker_app.py | map_maker_app.py | py | 2,085 | python | en | code | 12 | github-code | 36 |
19310364873 | import os
import functools
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path+'/input.txt') as f:
lines = [line.rstrip("\n") for line in f]
print(os.path.basename(dir_path))
def diff(str1, str2):
res = ""
for char in str1:
if char not in str2:
res += char
for char in str2:
if char not in str1:
res += char
return res
def intersection(str1, str2):
res = ""
for char in str1:
if char in str2:
res += char
return res
def subtract(str1, str2):
res = ""
for char in str1:
if char not in str2:
res += char
return res
digitToSegments = {
"abcefg": "0",
"cf": "1",
"acdeg": "2",
"acdfg": "3",
"bcdf": "4",
"abdfg": "5",
"abdefg": "6",
"acf": "7",
"abcdefg": "8",
"abcdfg": "9",
}
sum = 0
for line in lines:
parts = line.split(" | ")
inputs = parts[0].split()
outputs = parts[1].split()
inputsByLen = {2: [], 3: [], 4: [], 5: [], 6: [], 7: []}
for input in inputs:
inputsByLen[len(input)].append(input)
one = inputsByLen[2][0]
seven = inputsByLen[3][0]
four = inputsByLen[4][0]
eight = inputsByLen[7][0]
twoThreeFive = inputsByLen[5]
zeroSixNine = inputsByLen[6]
all = "abcdefg"
# store possibilities
segmentMap = {
"a": all,
"b": all,
"c": all,
"d": all,
"e": all,
"f": all,
"g": all,
}
segmentMap["a"] = diff(one, seven)
# segments that variate between 0, 6 and 9 (d, c, e)
zeroSixNineVariants = ''.join(list(filter(
lambda x: x not in zeroSixNine[0] or x not in zeroSixNine[1] or x not in zeroSixNine[2],
all,
)))
segmentMap["d"] = diff(one, four)
segmentMap["d"] = intersection(
segmentMap["d"], zeroSixNineVariants)
segmentMap["b"] = diff(diff(one, four), segmentMap["d"])
segmentMap["c"] = intersection(zeroSixNineVariants, one)
segmentMap["f"] = diff(one, segmentMap["c"])
segmentMap["e"] = subtract(
zeroSixNineVariants, segmentMap["c"]+segmentMap["d"])
segmentMap["g"] = subtract(
all, segmentMap["a"]+segmentMap["b"]+segmentMap["c"]+segmentMap["d"]+segmentMap["e"]+segmentMap["f"])
print(segmentMap)
translator = {}
for k in segmentMap:
translator[segmentMap[k]] = k
translatedOutput = ""
for output in outputs:
translation = ""
for char in output:
translation += translator[char]
translation = "".join(sorted(translation))
translatedOutput += digitToSegments[translation]
print(translatedOutput)
sum += int(translatedOutput)
print(sum)
| recrtl/aoc21 | day8-2/main.py | main.py | py | 2,736 | python | en | code | 1 | github-code | 36 |
14822140289 | #
# @lc app=leetcode.cn id=257 lang=python3
#
# [257] 二叉树的所有路径
#
# https://leetcode-cn.com/problems/binary-tree-paths/description/
#
# algorithms
# Easy (56.91%)
# Total Accepted: 5.9K
# Total Submissions: 10.4K
# Testcase Example: '[1,2,3,null,5]'
#
# 给定一个二叉树,返回所有从根节点到叶子节点的路径。
#
# 说明: 叶子节点是指没有子节点的节点。
#
# 示例:
#
# 输入:
#
# 1
# / \
# 2 3
# \
# 5
#
# 输出: ["1->2->5", "1->3"]
#
# 解释: 所有根节点到叶子节点的路径为: 1->2->5, 1->3
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
if root is None:
return []
res = []
stack = [(str(root.val), root)]
while stack:
cur, node = stack.pop()
if node:
if not node.left and not node.right:
res.append(cur)
if node.left:
stack.append((cur + '->' + str(node.left.val), node.left))
if node.right:
stack.append((cur + '->' + str(node.right.val), node.right))
return res
| ZodiacSyndicate/leet-code-solutions | easy/257.二叉树的所有路径/257.二叉树的所有路径.py | 257.二叉树的所有路径.py | py | 1,342 | python | en | code | 45 | github-code | 36 |
14248925493 | #입력
n, c = map(int, input().split())
arr = []
for _ in range(n):
arr.append(int(input()))
#과정
#[1, 2, 8, 4, 9]가 있을 때 인접한 영역을 기준으로 이진탐색을 수행
# 먼저 최소 gap과 최대 gap을 지정한 다음 그 중간값을 기준으로 공유기를
#설치할 수 있는 개수를 구한 다음 c보다 크거나 같은 경우
#즉, c보다 더 많은 공유기 설치가 가능한 경우 gap을 더 늘려야 하므로 시작점
#즉, gap의 최소를 업데이트
# 반면 공유기를 c보다 더 적게 설치할 수 밖에 없는 경우 영역을 줄여야하므로
#gap의 최대를 업데이트
#그러다 최소 gap과 최대 gap이 엇갈리면 반복을 종료한다.
arr.sort() #이진 탐색을 위해서 오름차순 정렬
min_gap = 1 # gap의 최솟값을 1로 설정
max_gap = arr[-1] - arr[0] # 최대 gap 설정
result = 0 # 인접한 두 공유기 거리 최댓값을 저장할 것이다.
#이진 탐색을 수행
while min_gap <= max_gap:
# 최대 gap과 최소 gap의 중간값을 설정
#공유기를 설치할 수 있는 간격의 최솟값 gap이다.
gap = (min_gap + max_gap) // 2
current = arr[0] # 이 시작점을 기준으로 gap만큼의 간격으로 공유기를 설치
cnt = 1 # current에 공유기를 설치했으므로 공유기 설채 개수 하나 카운트
# 공유기를 설치하며 개수를 세는 작업
for i in range(1, n):
#만약 현재 원소값이 current 값에 gap만큼 더한 값보다
#크거나 같다면 공유기를 설치 가능하다.
if arr[i] >= current + gap:
current = arr[i]
cnt += 1
#만약 이 공유기 설치 개수가 설치 가능한 공유기 개수 c보다 크거나 같다면
#min_gap을 업데이트
if cnt >= c:
min_gap = gap + 1
result = gap
else: #그렇지 않다면 max_gap을 업데이트
max_gap = gap - 1
#출력
print(result) | vmfaldwntjd/Algorithm | BaekjoonAlgorithm/파이썬/이진 탐색/Baekjoon_2110.py | Baekjoon_2110.py | py | 1,975 | python | ko | code | 0 | github-code | 36 |
37568831331 | D = {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five',6: 'six',7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten',11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen',15: 'fifteen', 16: 'sixteen', 17: 'seventeen',18: 'eighteen', 19: 'nineteen', 20: 'twenty', 30: 'thirty',40: 'forty', 50: 'fifty', 60: 'sixty', 70: 'seventy',80: 'eighty', 90: 'ninety'}
L = [ 1000000, ' million', 1000, ' thousand', 1, '']
def i2_txt(tall):
if tall < 20:
return D[tall]
else:
tier = (tall // 10) * 10
ener = tall % 10
if ener != 0:
return f'{D[tier]}-{D[ener]}'
else:
return D[tier]
def i3_txt(tall):
if len(str(tall)) < 3:
return i2_txt(tall)
else:
hundrer = tall //100
resten = tall - hundrer*100
if resten == 0:
return f'{D[hundrer]} hundred'
else:
resten_txt = i2_txt(resten)
return f'{D[hundrer]} hundred {resten}'
def i9_txt(tall):
if len(str(tall)) <4:
return i3_txt(tall)
else:
millioner = tall // 1000000
tusner = (tall%1000000)//1000
resten = tall - millioner * 1000000 - tusner*1000
if tusner == 0:
if resten == 0:
millioner_txt = i3_txt(millioner)
return f'{millioner_txt} million'
else:
resten_txt = i3_txt(resten)
millioner_txt = i3_txt(millioner)
return f'{millioner_txt} million {resten_txt}'
elif millioner == 0:
if resten == 0:
tusner_txt = i3_txt(tusner)
return f'{tusner_txt} thousand'
else:
resten_txt = i3_txt(resten)
tusner_txt = i3_txt(tusner)
return f'{tusner_txt} thousand {resten_txt}'
elif resten == 0:
millioner_txt = i3_txt(millioner)
tusner_txt = i3_txt(tusner)
return f'{millioner_txt} million {tusner_txt} thousand'
else:
resten_txt = i3_txt(resten)
millioner_txt = i3_txt(millioner)
tusner_txt = i3_txt(tusner)
return f'{millioner_txt} million {tusner_txt} thousand {resten_txt}'
def add_words(tekststreng):
tallindexer = []
liste = tekststreng.split()
for i in range(len(liste)):
try:
tall = int(liste[i])
tallindexer.append(i)
except:
tallindexer = tallindexer
nystreng = ''
index = 0
while index < len(liste):
if index not in tallindexer:
nystreng += (liste[index] + ' ')
index += 1
else:
nystreng += (liste[index] + ' ')
tall_txt = i9_txt(int(liste[index]))
nystreng += '- ' + tall_txt + ' - '
index +=1
return nystreng.strip()
print(add_words('C owes 91 pounds to D and 55 pounds to E')) | jorul/ITGK | ITGK øvinger/Eksamen 2016/4.py | 4.py | py | 2,908 | python | en | code | 0 | github-code | 36 |
17106811081 | import os
inputPath = os.path.join(os.path.dirname(__file__), "input")
with open(inputPath, "r") as inputFile:
initialLines = [line.strip() for line in inputFile.readlines() if line]
def getLineAtCursor(lines, cursor):
command, value = lines[cursor].split(" ")
value = int(value)
return command, value
def advance(lines, cursor, accumulator):
command, value = getLineAtCursor(lines, cursor)
if command == "acc":
accumulator += value
cursor += 1
elif command == "jmp":
cursor += value
elif command == "nop":
cursor += 1
else:
raise Exception("Wtf")
return cursor, accumulator
def stopAtLoop(lines, cursor, accumulator, alreadyVisited):
if cursor >= len(initialLines):
return accumulator, True
if cursor in alreadyVisited:
return accumulator, False
else:
alreadyVisited.add(cursor)
cursor, accumulator = advance(lines, cursor, accumulator)
return stopAtLoop(lines, cursor, accumulator, alreadyVisited)
def invertLine(line):
if line.startswith("nop"):
return line.replace("nop", "jmp")
elif line.startswith("jmp"):
return line.replace("jmp", "nop")
else:
raise Exception("Wtf")
def resolveLoop(lines):
copyLines = lines.copy()
for i in range(len(lines)):
if not lines[i].startswith("acc"):
invertedLine = invertLine(lines[i])
copyLines[i] = invertedLine
accumulator, terminated = stopAtLoop(copyLines, 0, 0, set())
if terminated:
return accumulator
else:
copyLines[i] = lines[i]
def solve1():
return stopAtLoop(initialLines, 0, 0, set())
def solve2():
return resolveLoop(initialLines)
print(solve1())
print(solve2())
| mmmaxou/advent-of-code | 2020/day-8/answer.py | answer.py | py | 1,816 | python | en | code | 0 | github-code | 36 |
6758424060 |
import pygame
import random
pygame.init()
dis_width = 800 # указываем высоту и ширину экрана
dis_height = 600
win = pygame.display.set_mode((dis_width, dis_height))
pygame.display.set_caption("snake") # пишем название экрана
x = 0 # начальные координаты змейки
y = 0
speed = 10 # скорость змейки
x_fruct = round(random.randrange(0, dis_width - 20) / 10.0) * 10.0 # спавн фрукта
y_fruct = round(random.randrange(0, dis_height - 20) / 10.0) * 10.0
x_dvihenie = True # оринтация змейки по направления
y_dvihenie = False
y1_dvihenie = 0
x1_dvihenie = 0
width = 20 # размеры змейки
height = 20
meny = True
second = 0
pole = pygame.image.load('pole.jpg') # спрайты поля и меню
meny_jpg = pygame.image.load('meny.jpg')
Game_over = pygame.image.load('Game_over.jpg')
run = True
while run:
pygame.time.delay(30)
if meny == True: # игра после меню
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if x_dvihenie == True:
x += speed
if y_dvihenie == True:
y += speed
if x1_dvihenie == True:
x -= speed
if y1_dvihenie == True:
y -= speed
if keys[pygame.K_LEFT]:
x1_dvihenie = True
x_dvihenie = False
y_dvihenie = False
y1_dvihenie = False
x -= speed
if keys[pygame.K_RIGHT]:
x_dvihenie = True
y_dvihenie = False
x1_dvihenie = False
y1_dvihenie = False
x += speed
if keys[pygame.K_UP]:
x1_dvihenie = False
y1_dvihenie = True
x_dvihenie = False
y_dvihenie = False
y -= speed
if keys[pygame.K_DOWN]:
x_dvihenie = False
y_dvihenie = True
x1_dvihenie = False
y1_dvihenie = False
y += speed
if x == x_fruct and y == y_fruct:
x_fruct = round(random.randrange(0, dis_width - 20) / 10.0) * 10.0
y_fruct = round(random.randrange(0, dis_height - 20) / 10.0) * 10.0
if x >= dis_width or x < 0 or y >= dis_height or y < 0: # границы поля
win.blit(Game_over,(0,0))
if keys[pygame.K_e]:
meny = False
else:
win.blit(pole, (0,0))
pygame.draw.rect(win, (0, 0, 255), (x, y, height, width)) # отрисовка змейки и фрукта
pygame.draw.rect(win, (0,255, 0, ), (x_fruct, y_fruct, 20, 20))
pygame.display.update()
else: # меню
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
win.blit(meny_jpg, (0,0))
pygame.display.update()
pygame.quit()
| artem6033/snake | snake.py | snake.py | py | 3,345 | python | ru | code | 0 | github-code | 36 |
37726664454 | import threading
import time
from traceback import print_tb
import numpy as np
import serial.tools.list_ports
import serial
import queue
import PySimpleGUI as sg
import matplotlib.figure as figure
import matplotlib.pyplot as plt
number_of_network_nodes = 3
monitor_serial = 'All' # 'Single' or 'All'
class Artist:
MAX_SAMPLES = 5000
def __init__(self, nodes) -> None:
self.nodes = nodes
self.rows = 2
self.cols = 2
plt.ion()
self.subplots = [plt.subplots(self.rows, self.cols, num=i, sharex=True) for i in range(len(nodes))]
self.figures:list[figure.Figure] = [subplot[0] for subplot in self.subplots]
self.axes: list[tuple[tuple[plt.Axes]]] = [subplot[1] for subplot in self.subplots] # self.rows by self.cols
self.latest_time = [0] * len(nodes)
# illuminance
self.l = [[]] * len(nodes)
self.l_t = [[]] * len(nodes)
self.l_lines : list[plt.Line2D] = [ax[0][0].plot([], label='illuminance')[0] for ax in self.axes]
# reference
self.r = [[]] * len(nodes)
self.r_t = [[]] * len(nodes)
self.r_lines : list[plt.Line2D] = [ax[0][0].plot([], label='reference')[0] for ax in self.axes]
# duty cycle
self.DC = [[]] * len(nodes)
self.DC_t = [[]] * len(nodes)
self.DC_lines : list[plt.Line2D] = [ax[0][1].plot([], label='duty cycle')[0] for ax in self.axes]
# integral error
self.IE = [[]] * len(nodes)
self.IE_t = [[]] * len(nodes)
self.IE_lines : list[plt.Line2D] = [ax[1][0].plot([], label='integral error')[0] for ax in self.axes]
# tracking error
self.TE = [[]] * len(nodes)
self.TE_t = [[]] * len(nodes)
self.TE_lines : list[plt.Line2D] = [ax[1][1].plot([], label='tracking error')[0] for ax in self.axes]
def setup_figures(self):
for idx, figure in enumerate(self.figures):
figure.suptitle(f"Node '{self.nodes[idx]}'")
self.axes[idx][1][0].set_xlabel('Time (s)')
self.axes[idx][1][1].set_xlabel('Time (s)')
def clear_figures(self):
for idx in range(len(self.nodes)):
for row in range(self.rows):
for col in range(self.cols):
ax = self.axes[idx][row][col]
for line in ax.get_lines():
line.set_data([], [])
self.update_figures()
# illuminance
self.l = [[]] * len(self.nodes)
self.l_t = [[]] * len(self.nodes)
# reference
self.r = [[]] * len(self.nodes)
self.r_t = [[]] * len(self.nodes)
# duty cycle
self.DC = [[]] * len(self.nodes)
self.DC_t = [[]] * len(self.nodes)
# integral error
self.IE = [[]] * len(self.nodes)
self.IE_t = [[]] * len(self.nodes)
# tracking error
self.TE = [[]] * len(self.nodes)
self.TE_t = [[]] * len(self.nodes)
def update_figures(self):
for idx in range(len(self.nodes)):
for row in range(self.rows):
for col in range(self.cols):
ax = self.axes[idx][row][col]
ax.set_xlim((self.latest_time[idx] - 5, self.latest_time[idx] + 0.5))
ax.legend()
node_axes = self.axes[idx]
lux_ax = node_axes[0][0]
dc_ax = node_axes[0][1]
ie_ax = node_axes[1][0]
te_ax = node_axes[1][1]
dc_ax.set_ylim((-0.05, 1.05))
if self.l[idx]:
lux_ax.set_ylim((-2, max(max(max(self.l[idx]), max(self.r[idx])) * 1.1, 10)))
else:
lux_ax.set_ylim((-2, 10))
if self.IE[idx]:
ie_ax.set_ylim((min(-1, min(self.IE[idx]) - 0.3), max(1, max(self.IE[idx]) + 0.3) ) )
else:
ie_ax.set_ylim((-1, 1))
if self.TE[idx]:
te_ax.set_ylim((min(-1, min(self.TE[idx]) - 0.3), max(1, max(self.TE[idx]) + 0.3) ) )
else:
te_ax.set_ylim((-1, 1))
@staticmethod
def wrap(t: list, vals: list):
vals = vals[-Artist.MAX_SAMPLES:]
tmin = t[-1] - 6.0
tmax = t[-1] + 0.5
t_arr = np.array(t)
tmin_idx = np.searchsorted(t_arr, tmin)
tmax_idx = np.searchsorted(t_arr, tmax)
return t[tmin_idx:tmax_idx], vals[tmin_idx:tmax_idx]
def func_TE(self, idx, tracking_error, time):
self.TE_t[idx].append(time)
self.TE[idx].append(tracking_error)
self.TE_t[idx], self.TE[idx] = self.wrap(self.TE_t[idx], self.TE[idx])
self.TE_lines[idx].set_data(self.TE_t[idx], self.TE[idx])
def func_l(self, idx, illuminance, time):
self.l_t[idx].append(time)
self.l[idx].append(illuminance)
self.l_t[idx], self.l[idx] = self.wrap(self.l_t[idx], self.l[idx])
self.l_lines[idx].set_data(self.l_t[idx], self.l[idx])
def func_r(self, idx, reference, time):
self.r_t[idx].append(time)
self.r[idx].append(reference)
self.r_t[idx], self.r[idx] = self.wrap(self.r_t[idx], self.r[idx])
self.r_lines[idx].set_data(self.r_t[idx], self.r[idx])
def func_IE(self, idx, integral_error, time):
self.IE_t[idx].append(time)
self.IE[idx].append(integral_error)
self.IE_t[idx], self.IE[idx] = self.wrap(self.IE_t[idx], self.IE[idx])
self.IE_lines[idx].set_data(self.IE_t[idx], self.IE[idx])
def func_DC(self, idx, duty_cycle, time):
self.DC_t[idx].append(time)
self.DC[idx].append(duty_cycle)
self.DC_t[idx], self.DC[idx] = self.wrap(self.DC_t[idx], self.DC[idx])
self.DC_lines[idx].set_data(self.DC_t[idx], self.DC[idx])
ports = serial.tools.list_ports.comports()
serial_ports = []
for port, desc, hwid in sorted(ports):
print(f"{port}: {desc} [{hwid}]")
for port, desc, hwid in sorted(ports):
if 'Serial' in desc:
print(f"Added {port}")
serial_ports.append(port)
if monitor_serial == 'Single':
ports_to_monitor = [serial_ports[0]]
else:
ports_to_monitor = serial_ports
names = [f"Node {i+1}" for i in range(number_of_network_nodes)]
artist = Artist(names)
artist.setup_figures()
commands = (
('s t ', 'tracking error', artist.func_TE),
('s l ', 'illuminance', artist.func_l),
('s r ', 'reference', artist.func_r),
('s i ', 'integral error', artist.func_IE),
('s d ', 'duty cycle', artist.func_DC),
)
QUEUE_SIZE = 1000
input_height = 5
output_height = 7
inputs_str = [""]*input_height
outputs_str = [[""]*output_height for _ in range(number_of_network_nodes)]
def serial_read(serial: serial.Serial, data_queue_out: queue.Queue, data_queue_in: queue.Queue):
buf = b''
while True:
buf = serial.read_all()
if buf:
data_queue_out.put(buf)
try:
data_in = data_queue_in.get(block=False)
if data_in:
print(f"Writing: {data_in}")
serial.write(data_in)
serial.flush()
data_in = None
except queue.Empty:
pass
def update_str_lst(lst, new_str):
lst[:-1] = lst[1:]
lst[-1] = new_str
queues_out = [queue.Queue(QUEUE_SIZE) for _ in range(len(ports_to_monitor))]
queues_in = [queue.Queue(QUEUE_SIZE) for _ in range(len(ports_to_monitor))]
serial_objects = []
for port in ports_to_monitor:
serial_objects += [serial.Serial(port, timeout=None)]
time.sleep(0.2)
threads = [threading.Thread(target=serial_read, args=(serial_objects[i], queues_out[i], queues_in[i]), daemon=True) for i in range(len(serial_objects))]
[thread.start() for thread in threads]
data_read_buffers = [b'' for _ in range(len(ports_to_monitor))]
sg.theme('Python')
column = [[sg.Text(f'{port}:'), sg.Text(size=(80,output_height), key=port)] for port in ports_to_monitor]
column += [[sg.Text(f'[{ports_to_monitor[0]}] Input:'), sg.Text(size=(80, input_height), key='input_win')]]
TOGGLE_RL_KEYS = [f"{node}_toggle_rl" for node in range(number_of_network_nodes)]
toggle_rl = [sg.Button(f"Node {i+1}", key=TOGGLE_RL_KEYS[i]) for i in range(number_of_network_nodes)]
TOGGLE_TE_KEYS = [f"{node}_toggle_te" for node in range(number_of_network_nodes)]
toggle_te = [sg.Button(f"Node {i+1}", key=TOGGLE_TE_KEYS[i]) for i in range(number_of_network_nodes)]
TOGGLE_IE_KEYS = [f"{node}_toggle_ie" for node in range(number_of_network_nodes)]
toggle_ie = [sg.Button(f"Node {i+1}", key=TOGGLE_IE_KEYS[i]) for i in range(number_of_network_nodes)]
TOGGLE_DC_KEYS = [f"{node}_toggle_dc" for node in range(number_of_network_nodes)]
toggle_dc = [sg.Button(f"Node {i+1}", key=TOGGLE_DC_KEYS[i]) for i in range(number_of_network_nodes)]
layout = [[sg.Column(column, element_justification='l', vertical_alignment='t')],
[sg.Input(key='SerialInput', do_not_clear=True)],
[sg.Button('[Enter] Send', bind_return_key=True), sg.Button('Clear Figures')],
[sg.Text("Toggle Reference/Lux Measurements")] + toggle_rl,
[sg.Text("Toggle Tracking Error Measurements")] + toggle_te,
[sg.Text("Toggle Integral Error Measurements")] + toggle_ie,
[sg.Text("Toggle Duty-Cycle Measurements")] + toggle_dc,
[sg.Button('Exit')]
]
window = sg.Window('Serial monitor', layout)
_iter = 0
window.read(timeout=1)
while True:
for idx, buffer_queue in enumerate(queues_out):
try:
while not buffer_queue.empty():
data_read_buffers[idx] += buffer_queue.get(block=False)
data_string = data_read_buffers[idx].decode('latin-1')
data_read_buffers[idx] = b''
if _iter == 0:
break
if data_string:
for line in data_string.split('\n'):
if line:
update_str_lst(outputs_str[idx], line)
for command in commands:
if line.startswith(command[0]):
args = (line.split(command[0])[1]).split(' ')
flargs = [float(arg) for arg in args[1:]]
time_of_command = float(args[-1])/1000.0
if artist.latest_time[int(args[0])] == 0.0:
artist.latest_time[int(args[0])] = time_of_command
if time_of_command < artist.latest_time[int(args[0])] - 1.0: # ignore old samples
continue
if time_of_command > artist.latest_time[int(args[0])] + 6000.0: # ignore probably corrupted samples
continue
artist.latest_time[int(args[0])] = max(artist.latest_time[int(args[0])], time_of_command)
command[2](int(args[0]), *flargs[:-1], time_of_command)
window[serial_ports[idx]].update('\n'.join(outputs_str[idx]))
except queue.Empty:
pass
_iter += 1
if _iter % 100 == 0:
artist.update_figures()
event, values = window.read(timeout=1)
if event == sg.TIMEOUT_KEY:
continue
if event == sg.WIN_CLOSED or event == 'Exit':
break
if event == '[Enter] Send':
update_str_lst(inputs_str, values['SerialInput'])
window['input_win'].update('\n'.join(inputs_str))
queues_in[0].put(f"{values['SerialInput']}\n".encode('latin-1'))
window['SerialInput'].update('')
if event == 'Clear Figures':
artist.clear_figures()
for idx, toggle_info_key in enumerate(TOGGLE_RL_KEYS):
if event == toggle_info_key:
queues_in[0].put(f"s l {idx}\n".encode('latin-1'))
queues_in[0].put(f"s r {idx}\n".encode('latin-1'))
for idx, toggle_info_key in enumerate(TOGGLE_IE_KEYS):
if event == toggle_info_key:
queues_in[0].put(f"s i {idx}\n".encode('latin-1'))
for idx, toggle_info_key in enumerate(TOGGLE_TE_KEYS):
if event == toggle_info_key:
queues_in[0].put(f"s t {idx}\n".encode('latin-1'))
for idx, toggle_info_key in enumerate(TOGGLE_DC_KEYS):
if event == toggle_info_key:
queues_in[0].put(f"s d {idx}\n".encode('latin-1'))
window.close() | PedroTaborda/DistributedIlluminationSystem | serial_com_async.py | serial_com_async.py | py | 12,472 | python | en | code | 0 | github-code | 36 |
34111347494 | import argparse
'''
Given a file with both genders, creates 2 CSVs, one for male and one for female.
Uses 'gender' column; '0'=female, '1'=male.
'''
def split_genders(file, dest_file, gender_idx):
lines = open(file, 'r').readlines()
with open(dest_file + '_male.csv','w') as file:
for line_idx in range(len(lines)):
if line_idx == 0:
file.write(lines[0])
file.write('\n')
else:
line = lines[line_idx]
line_list = line.split(',')
gender = line_list[gender_idx].strip()
if gender == '1' or gender == 'male':
file.write(line)
file.write('\n')
with open(dest_file + '_female.csv','w') as file:
for line_idx in range(len(lines)):
if line_idx == 0:
file.write(lines[0])
file.write('\n')
else:
line = lines[line_idx]
line_list = line.split(',')
gender = line_list[gender_idx].strip()
if gender == '0' or gender == 'female':
file.write(line)
file.write('\n')
if __name__ == "__main__":
# Usage: python3 split_genders.py --data=[ravdess,msp,masc]
parser = argparse.ArgumentParser(description='Split data into male and female CSVs.')
parser.add_argument('--data', required=True)
data = parser.parse_args().data
if data == 'ravdess':
train_file = "final_data/ravdess/speech_train_data_numerized.csv"
val_file = "final_data/ravdess/speech_val_data_numerized.csv"
test_file = "final_data/ravdess/speech_test_data_numerized.csv"
split_genders(train_file, "final_data/ravdess/speech_train_data_numerized", -2)
split_genders(val_file, "final_data/ravdess/speech_val_data_numerized", -2)
split_genders(test_file, "final_data/ravdess/speech_test_data_numerized", -2)
elif data == 'msp':
train_file = "final_data/msp/train_data_dup.csv"
val_file = "final_data/msp/val_data_dup.csv"
test_file = "final_data/msp/test_data_dup.csv"
split_genders(train_file, "final_data/msp/train_data", -2)
split_genders(val_file, "final_data/msp/val_data", -2)
split_genders(test_file, "final_data/msp/test_data", -2)
elif data == 'masc':
train_file = "final_data/masc/train_masc_2.csv"
val_file = "final_data/masc/val_masc_2.csv"
test_file = "final_data/masc/test_masc_2.csv"
split_genders(train_file, "final_data/masc/train", -2)
split_genders(val_file, "final_data/masc/val", -2)
split_genders(test_file, "final_data/masc/test", -2)
| carisatinie/emotion_bias | programs/split_genders.py | split_genders.py | py | 2,471 | python | en | code | 0 | github-code | 36 |
7784669889 | import requests
import json
def versiontuple(v):
return tuple(map(int, (v.split("."))))
if __name__ == '__main__':
# Load the .json file containing the remote version
url = 'https://raw.githubusercontent.com/matteocali/DEILabs/main/data/version.json'
f = requests.get(url)
# The .json() method automatically parses the response into JSON.
git_version = f.json()["version"]
# Load the .json file containing the local version
with open('data/version.json') as f:
version = json.load(f)["version"]
# Compare the two versions
if versiontuple(git_version) > versiontuple(version):
print(f"Update available ({version} -> {git_version}) at https://github.com/caligola25/DEILabs")
else:
print("")
| matteocali/DEILabs | data/version_checker.py | version_checker.py | py | 763 | python | en | code | 4 | github-code | 36 |
32625776262 | import re
class Solution:
def myAtoi(self, string):
"""
:type string: str
:rtype: int
"""
string = string.strip()
pattern = re.compile('[+-]?\d+')
try:
result = int(pattern.match(string).group())
if result > 2147483647:
return 2147483647
elif result < -2147483648:
return -2147483648
else:
return result
except (ValueError, AttributeError):
return 0 | 7forz/leetcode_algo_data_structure | 008-String to Integer (atoi).py | 008-String to Integer (atoi).py | py | 527 | python | en | code | 0 | github-code | 36 |
5049733369 | import argparse
import json
import os
import time
# isort: off
import torch
import torch.multiprocessing as mp
import tensorrt as trt
# isort: on
from safetensors import safe_open
from transformers import AutoModelForCausalLM, GPTNeoXConfig
from weight import load_from_hf_gpt_neox
import tensorrt_llm
from tensorrt_llm.builder import Builder
from tensorrt_llm.logger import logger
from tensorrt_llm.mapping import Mapping
from tensorrt_llm.models import quantize_model
from tensorrt_llm.network import net_guard
from tensorrt_llm.plugin.plugin import ContextFMHAType
from tensorrt_llm.quantization import QuantMode
MODEL_NAME = "gptneox"
hf_gpt = None
class StateDict():
def __init__(self, quant_ckpt_dir):
self.model_state_dict = safe_open(quant_ckpt_dir,
framework="pt",
device=0)
def get(self, k):
return self.model_state_dict.get_tensor(k).cpu()
class GPTQModel():
def __init__(self, model_dir, quant_ckpt_dir):
with open(model_dir + '/config.json', 'r') as f:
model_config = json.load(f)
self.config = GPTNeoXConfig()
self.config.vocab_size = model_config['vocab_size']
self.config.hidden_size = model_config['hidden_size']
self.config.num_hidden_layers = model_config['num_hidden_layers']
self.config.num_attention_heads = model_config[
'num_attention_heads']
self.config.intermediate_size = model_config['intermediate_size']
self.config.hidden_act = model_config['hidden_act']
self.config.rotary_pct = model_config['rotary_pct']
self.config.rotary_emb_base = model_config['rotary_emb_base']
self.config.max_position_embeddings = model_config[
'max_position_embeddings']
self.config.initializer_range = model_config['initializer_range']
self.config.layer_norm_eps = model_config['layer_norm_eps']
self.config.use_cache = model_config['use_cache']
self.config.bos_token_id = model_config['bos_token_id']
self.config.eos_token_id = model_config['eos_token_id']
self.config.tie_word_embeddings = model_config[
'tie_word_embeddings']
self.model_state_dict = StateDict(quant_ckpt_dir)
def state_dict(self):
return self.model_state_dict
def get_engine_name(model, dtype, tp_size, rank):
return '{}_{}_tp{}_rank{}.engine'.format(model, dtype, tp_size, rank)
def serialize_engine(engine, path):
logger.info(f'Serializing engine to {path}...')
tik = time.time()
with open(path, 'wb') as f:
f.write(engine)
tok = time.time()
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
logger.info(f'Engine serialized. Total time: {t}')
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--world_size',
type=int,
default=1,
help='world size, only support tensor parallelism now')
parser.add_argument(
'--model_dir',
type=str,
default=None,
help='The path to HF GPT-NeoX model / checkpoints to read weights from')
parser.add_argument('--dtype',
type=str,
default='float16',
choices=['float16', 'float32'])
parser.add_argument(
'--timing_cache',
type=str,
default='model.cache',
help=
'The path of to read timing cache from, will be ignored if the file does not exist'
)
parser.add_argument('--log_level', type=str, default='info')
parser.add_argument('--vocab_size', type=int, default=50432)
parser.add_argument('--n_layer', type=int, default=44)
parser.add_argument('--n_positions', type=int, default=2048)
parser.add_argument('--n_embd', type=int, default=6144)
parser.add_argument('--n_head', type=int, default=64)
parser.add_argument('--hidden_act', type=str, default='gelu')
parser.add_argument(
'--rotary_pct',
type=float,
default=0.25,
help="Percentage of hidden dimensions to allocate to rotary embeddings."
)
parser.add_argument('--max_batch_size', type=int, default=64)
parser.add_argument('--max_input_len', type=int, default=1024)
parser.add_argument('--max_output_len', type=int, default=1024)
parser.add_argument('--max_beam_width', type=int, default=1)
parser.add_argument('--use_gpt_attention_plugin',
nargs='?',
const='float16',
type=str,
default=False,
choices=['float16', 'float32'])
parser.add_argument('--use_gemm_plugin',
nargs='?',
const='float16',
type=str,
default=False,
choices=['float16', 'float32'])
parser.add_argument('--use_weight_only_quant_matmul_plugin',
nargs='?',
const='float16',
type=str,
default=False,
choices=['float16'])
parser.add_argument('--use_weight_only_groupwise_quant_matmul_plugin',
nargs='?',
const='float16',
type=str,
default=False,
choices=['float16'])
parser.add_argument(
'--groupwise_quant_safetensors_path',
type=str,
default=None,
help=
"The path to groupwise quantized GPT-NeoX model / checkpoints to read weights from."
)
parser.add_argument('--use_layernorm_plugin',
nargs='?',
const='float16',
type=str,
default=False,
choices=['float16', 'float32'])
parser.add_argument('--parallel_build', default=False, action='store_true')
parser.add_argument('--enable_context_fmha',
default=False,
action='store_true')
parser.add_argument('--enable_context_fmha_fp32_acc',
default=False,
action='store_true')
parser.add_argument(
'--multi_block_mode',
default=False,
action='store_true',
help=
'Split long kv sequence into multiple blocks (applied to generation MHA kernels). \
It is beneifical when batchxnum_heads cannot fully utilize GPU.'
)
parser.add_argument('--gpus_per_node', type=int, default=8)
parser.add_argument(
'--output_dir',
type=str,
default='engine_outputs',
help=
'The path to save the serialized engine files, timing cache file and model configs'
)
parser.add_argument('--remove_input_padding',
default=False,
action='store_true')
parser.add_argument(
'--use_parallel_embedding',
action="store_true",
default=False,
help=
'By default embedding parallelism is disabled. By setting this flag, embedding parallelism is enabled'
)
parser.add_argument(
'--embedding_sharding_dim',
type=int,
default=1, # Meta does TP on hidden dim
choices=[0, 1],
help=
'By default the embedding lookup table is sharded along vocab dimension (--embedding_sharding_dim=0). '
'To shard it along hidden dimension, set --embedding_sharding_dim=1'
'Note: embedding sharing is only enabled when --embedding_sharding_dim=0'
)
parser.add_argument(
'--strongly_typed',
default=False,
action="store_true",
help=
'This option is introduced with trt 9.1.0.1+ and will reduce the building time significantly for fp8.'
)
args = parser.parse_args()
logger.set_level(args.log_level)
if args.model_dir is not None:
global hf_gpt
if not args.use_weight_only_groupwise_quant_matmul_plugin:
logger.info(f'Loading HF GPT-NeoX model from {args.model_dir}...')
hf_gpt = AutoModelForCausalLM.from_pretrained(args.model_dir)
args.n_embd = hf_gpt.config.hidden_size
args.n_head = hf_gpt.config.num_attention_heads
args.n_layer = hf_gpt.config.num_hidden_layers
args.n_positions = hf_gpt.config.max_position_embeddings
args.vocab_size = hf_gpt.config.vocab_size
args.rotary_pct = hf_gpt.config.rotary_pct
else:
assert (
args.groupwise_quant_safetensors_path is not None
), f'Please set the path to the groupwise quantized GPT-NeoX checkpoints with --groupwise_quant_safetensors_path'
logger.info(
f'Loading GPTQ quantized HF GPT-NeoX model from {args.groupwise_quant_safetensors_path}...'
)
hf_gpt = GPTQModel(args.model_dir,
args.groupwise_quant_safetensors_path)
args.n_embd = hf_gpt.config.hidden_size
args.n_head = hf_gpt.config.num_attention_heads
args.n_layer = hf_gpt.config.num_hidden_layers
args.n_positions = hf_gpt.config.max_position_embeddings
args.vocab_size = hf_gpt.config.vocab_size
args.rotary_pct = hf_gpt.config.rotary_pct
return args
def build_rank_engine(builder: Builder,
builder_config: tensorrt_llm.builder.BuilderConfig,
engine_name, rank, args):
'''
@brief: Build the engine on the given rank.
@param rank: The rank to build the engine.
@param args: The cmd line arguments.
@return: The built engine.
'''
kv_dtype = trt.float16 if args.dtype == 'float16' else trt.float32
rotary_dim = int((args.n_embd // args.n_head) * args.rotary_pct)
# Initialize Module
tensorrt_llm_gpt = tensorrt_llm.models.GPTNeoXForCausalLM(
num_layers=args.n_layer,
num_heads=args.n_head,
hidden_size=args.n_embd,
vocab_size=args.vocab_size,
hidden_act=args.hidden_act,
max_position_embeddings=args.n_positions,
rotary_dim=rotary_dim,
dtype=kv_dtype,
mapping=Mapping(world_size=args.world_size,
rank=rank,
tp_size=args.world_size), # TP only
apply_query_key_layer_scaling=builder_config.
apply_query_key_layer_scaling,
use_parallel_embedding=args.use_parallel_embedding,
embedding_sharding_dim=args.embedding_sharding_dim)
if args.use_weight_only_quant_matmul_plugin or args.use_weight_only_groupwise_quant_matmul_plugin:
quant_mode = QuantMode.from_description(
quantize_weights=True,
quantize_activations=False,
per_token=False,
per_channel=False,
per_group=args.use_weight_only_groupwise_quant_matmul_plugin,
use_int4_weights=False)
quantize_kwargs = {}
if args.use_weight_only_groupwise_quant_matmul_plugin:
quantize_kwargs = {
"group_size": 128,
"zero": True,
}
tensorrt_llm_gpt = quantize_model(tensorrt_llm_gpt, quant_mode,
**quantize_kwargs)
if args.model_dir is not None:
assert hf_gpt is not None, f'Could not load weights from hf_gpt model as it is not loaded yet.'
if args.world_size > 1:
assert (
args.n_embd % args.world_size == 0
), f'Embedding size/hidden size must be divisible by world size.'
assert (
args.n_head % args.world_size == 0
), f'Number of attention heads must be divisible by world size.'
load_from_hf_gpt_neox(
tensorrt_llm_gpt, hf_gpt, (args.dtype == 'float16'), rank,
args.world_size, args.use_weight_only_groupwise_quant_matmul_plugin)
# Module -> Network
network = builder.create_network()
network.trt_network.name = engine_name
if args.use_gpt_attention_plugin:
network.plugin_config.set_gpt_attention_plugin(
dtype=args.use_gpt_attention_plugin)
if args.use_gemm_plugin:
network.plugin_config.set_gemm_plugin(dtype=args.use_gemm_plugin)
if args.use_layernorm_plugin:
network.plugin_config.set_layernorm_plugin(
dtype=args.use_layernorm_plugin)
assert not (args.enable_context_fmha and args.enable_context_fmha_fp32_acc)
if args.enable_context_fmha:
network.plugin_config.set_context_fmha(ContextFMHAType.enabled)
if args.enable_context_fmha_fp32_acc:
network.plugin_config.set_context_fmha(
ContextFMHAType.enabled_with_fp32_acc)
if args.multi_block_mode:
network.plugin_config.enable_mmha_multi_block_mode()
if args.use_weight_only_quant_matmul_plugin:
network.plugin_config.set_weight_only_quant_matmul_plugin(
dtype=args.use_weight_only_quant_matmul_plugin)
if args.use_weight_only_groupwise_quant_matmul_plugin:
network.plugin_config.set_weight_only_groupwise_quant_matmul_plugin(
dtype=args.use_weight_only_groupwise_quant_matmul_plugin)
if args.world_size > 1:
network.plugin_config.set_nccl_plugin(args.dtype)
if args.remove_input_padding:
network.plugin_config.enable_remove_input_padding()
with net_guard(network):
# Prepare
network.set_named_parameters(tensorrt_llm_gpt.named_parameters())
# Forward
inputs = tensorrt_llm_gpt.prepare_inputs(args.max_batch_size,
args.max_input_len,
args.max_output_len, True,
args.max_beam_width)
tensorrt_llm_gpt(*inputs)
tensorrt_llm.graph_rewriting.optimize(network)
engine = None
# Network -> Engine
engine = builder.build_engine(network, builder_config)
if rank == 0:
config_path = os.path.join(args.output_dir, 'config.json')
builder.save_config(builder_config, config_path)
return engine
def build(rank, args):
torch.cuda.set_device(rank % args.gpus_per_node)
tensorrt_llm.logger.set_level(args.log_level)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# when doing serializing build, all ranks share one engine
apply_query_key_layer_scaling = False
builder = Builder()
cache = None
for cur_rank in range(args.world_size):
# skip other ranks if parallel_build is enabled
if args.parallel_build and cur_rank != rank:
continue
builder_config = builder.create_builder_config(
name=MODEL_NAME,
precision=args.dtype,
timing_cache=args.timing_cache if cache is None else cache,
tensor_parallel=args.world_size, # TP only
parallel_build=args.parallel_build,
num_layers=args.n_layer,
num_heads=args.n_head,
hidden_size=args.n_embd,
vocab_size=args.vocab_size,
hidden_act=args.hidden_act,
max_position_embeddings=args.n_positions,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
max_batch_size=args.max_batch_size,
max_beam_width=args.max_beam_width,
max_input_len=args.max_input_len,
int8=args.use_weight_only_quant_matmul_plugin
or args.use_weight_only_groupwise_quant_matmul_plugin,
max_output_len=args.max_output_len,
strongly_typed=args.strongly_typed)
engine_name = get_engine_name(MODEL_NAME, args.dtype, args.world_size,
cur_rank)
engine = build_rank_engine(builder, builder_config, engine_name,
cur_rank, args)
assert engine is not None, f'Failed to build engine for rank {cur_rank}'
if cur_rank == 0:
# Use in-memory timing cache for multiple builder passes.
if not args.parallel_build:
cache = builder_config.trt_builder_config.get_timing_cache()
serialize_engine(engine, os.path.join(args.output_dir, engine_name))
if rank == 0:
ok = builder.save_timing_cache(
builder_config, os.path.join(args.output_dir, "model.cache"))
assert ok, "Failed to save timing cache."
if __name__ == '__main__':
args = parse_arguments()
tik = time.time()
if args.parallel_build and args.world_size > 1 and \
torch.cuda.device_count() >= args.world_size:
logger.warning(
f'Parallelly build TensorRT engines. Please make sure that all of the {args.world_size} GPUs are totally free.'
)
mp.spawn(build, nprocs=args.world_size, args=(args, ))
else:
args.parallel_build = False
logger.info('Serially build TensorRT engines.')
build(0, args)
tok = time.time()
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
logger.info(f'Total time of building all {args.world_size} engines: {t}')
| NVIDIA/TensorRT-LLM | examples/gptneox/build.py | build.py | py | 17,487 | python | en | code | 3,328 | github-code | 36 |
70864089065 | import multiprocessing
import os
class Logger:
info_enabled = 1
debug_enabled = 1
output_enabled = 1
@staticmethod
def output(prefix, s):
if Logger.output_enabled:
print(multiprocessing.current_process().name, prefix, s)
@staticmethod
def info(s):
if Logger.info_enabled:
Logger.output(" [INFO] ", str(s))
@staticmethod
def debug(s):
if Logger.debug_enabled:
Logger.output(" [DEBUG] ", str(s)) | b49nd1n/IPS | ips/logger.py | logger.py | py | 495 | python | en | code | 0 | github-code | 36 |
11904566223 | from collections import defaultdict
class DigitSignals:
def __init__(self, signals):
self.signals = ["".join(sorted(s)) for s in signals]
self.signalsByLength = defaultdict(list)
self.wires = ["?"] * 7
self.numberMap = {}
self.signalsToNumbers = {}
self.solve()
def solve(self):
for signal in self.signals:
self.signalsByLength[len(signal)].append(signal)
if len(signal) == 2:
self.numberMap[1] = signal
elif len(signal) == 3:
self.numberMap[7] = signal
elif len(signal) == 4:
self.numberMap[4] = signal
elif len(signal) == 7:
self.numberMap[8] = signal
# use 1 and 7 to find top wire
top = list(filter(lambda c: c not in self.numberMap[1], self.numberMap[7]))[0]
self.wires[0] = top
# 3 has length 5 and has all the wires of 7
for signal in self.signalsByLength[5]:
isPossible = True
for c in self.numberMap[7]:
if c not in signal:
isPossible = False
if isPossible:
self.numberMap[3] = signal
break
# 9 is only 6 character number that has all of 4 characters
for signal in self.signalsByLength[6]:
signalSet = set(signal)
isPossible = True
for c in self.numberMap[4]:
if c not in signalSet:
isPossible = False
if isPossible:
self.numberMap[9] = signal
break
# 0 has one character difference from 9 and has both of 1
for signal in self.signalsByLength[6]:
signalSet = set(signal)
differences = 0
for c in self.numberMap[9]:
if c not in signalSet:
differences += 1
if differences == 1:
isPossible = True
for c in self.numberMap[1]:
if c not in signalSet:
isPossible = False
if isPossible:
self.numberMap[0] = signal
# 6 is last remaining 6 char
for signal in self.signalsByLength[6]:
if signal not in self.numberMap.values():
self.numberMap[6] = signal
# 5 is 6 without 1 character
sixSet = set(self.numberMap[6])
for signal in self.signalsByLength[5]:
if signal == self.numberMap[3]:
continue
isPossible = True
for c in signal:
if c not in sixSet:
isPossible = False
if isPossible:
self.numberMap[5] = signal
break
matched = set(self.numberMap.values())
unmatched = list(filter(lambda s: s not in matched, self.signals))
for signal in unmatched:
if len(signal) == 5:
self.numberMap[2] = signal
# reverse number map
for number, signal in self.numberMap.items():
self.signalsToNumbers[signal] = str(number)
def translate(self, signals):
sortedSignals = ["".join(sorted(s)) for s in signals]
return int("".join([self.signalsToNumbers[s] for s in sortedSignals]))
def solveDisplay(signals, outputs):
digit = DigitSignals(signals)
num = digit.translate(outputs)
return num
def solve(file):
lines = [l.strip().split(" | ") for l in open(file).readlines()]
total = 0
for signals, outputs in lines:
num = solveDisplay(signals.split(), outputs.split())
print(num)
total += num
print("total: " + str(total))
solve("inputs/08/full.txt") | ianlayzer/adventofcode2021 | code/08.py | 08.py | py | 3,775 | python | en | code | 0 | github-code | 36 |
18046091832 | # daf_test_start.py - DAF connection test based on ppo_aoa_random_start.py
import logging
import numpy as np
from stable_baselines3 import PPO
from backend.rl_base_classes.aoa_base_class import AoABaseClass
from backend.rl_environments.discrete_environment import DiscreteEnv
from backend.utils.analysis import plot_average_reward_curve, run_network
from backend.utils.daf_client import DafClient
from backend.utils.logger import start_log
start_log(logging.DEBUG) # Change to ...INFO or ...DEBUG as needed
log = logging.getLogger('a4h')
nominal_start = np.array([30000, 0, 0, 3000, -10*3.14159/180, 0])
variation = np.array([5000, 0, 0, 500, 10*3.14159/180, 0])
rand_gen = np.random.default_rng()
class DafTestStart(AoABaseClass):
def __init__(self, initial_state=nominal_start):
log.info('__init__()')
AoABaseClass.__init__(self, initial_state)
self._fpa_tol = 5 * np.pi / 180
self.n_actions = 21
self._aoa_options = np.linspace(-20, 20, self.n_actions)/180*np.pi
self.dt = 1
self._max_time = 100
self.resetc = 0
self.instep = 0
self.daf = DafClient()
def reset(self, initial_state=None):
self.resetc += 1
log.info('reset(%d)' % self.resetc)
if initial_state is None:
initial_state = np.random.normal(nominal_start, variation)
self.__init__(initial_state=initial_state)
if self.resetc > 1:
result_msg = client.receive()
log.info('Sim result: '+result_msg)
log.info('Requesting sim run #%d...' % self.resetc)
runner_params = self.daf.run_sim('daf_sim.TestRunner', self._max_time)
log.info('Received params %s...' % runner_params)
def reward(self):
return self._reward3()
def _inner_step(self, action):
self.instep += 1
log.info('_inner_step(%d)' % self.instep)
u = np.array([self._aoa_options[action], 0.])
self.constant_step(self.dt, u)
self.sim_step(self.dt)
self.daf.send_action(0) # Action 0: Continue sim
integrated_test = False
if integrated_test: # SB3 and DAF
env = DiscreteEnv(DafTestStart())
log.info('Created env')
model = PPO('MlpPolicy', env, verbose=1)
model.learn(total_timesteps=10_000) # was 250_000
model.save('daf_test_start')
log.info('Created model')
log.info('Pre run')
plot_average_reward_curve(env.saved_agents, 100)
run_network(nominal_start, env, model)
log.info('Post run')
# TODO Better way than reaching into env.agent.daf?
msg = env.agent.daf.receive()
log.info('Sim result: '+msg)
log.info('Requesting DAF exit...')
env.agent.daf.exit_daf()
else: # DAF only
log.info('Creating DAF connection...')
client = DafClient()
num_runs = 3
sim_secs = 10
for run in range(num_runs):
log.info('Requesting sim run #%d...' % (run+1,))
params = client.run_sim('daf_sim.TestRunner', sim_secs)
log.info('Received params %s...' % params)
for sec in range(sim_secs):
state = client.get_state()
# Simple example of actions: terminate or continue
if run == 1 and sec == 5:
client.send_action(1) # Action 1: Terminate sim early
break
else:
client.send_action(0) # Action 0: Continue
msg = client.receive()
log.info('Sim result: '+msg)
log.info('Requesting DAF exit...')
client.exit_daf()
| hmdmia/HighSpeedRL | rl_runners/daf_test_start.py | daf_test_start.py | py | 3,524 | python | en | code | 0 | github-code | 36 |
5050338619 | import tempfile
import unittest
from collections import OrderedDict
from itertools import product
import numpy as np
import parameterized
# isort: off
import torch
import tensorrt as trt
# isort: on
from parameterized import parameterized
from transformers import BertConfig, BertForQuestionAnswering, BertModel
import tensorrt_llm
import tensorrt_llm.runtime
from tensorrt_llm import Builder
from tensorrt_llm._utils import trt_dtype_to_torch
from tensorrt_llm.network import net_guard
from tensorrt_llm.plugin.plugin import ContextFMHAType
from tensorrt_llm.runtime import TensorInfo
def extract_layer_idx(name):
ss = name.split('.')
for s in ss:
if s.isdigit():
return s
return None
def split(v, tp_size, idx, dim=0):
if tp_size == 1:
return v
if len(v.shape) == 1:
return np.ascontiguousarray(np.split(v, tp_size)[idx])
elif len(v.shape) == 2:
return np.ascontiguousarray(np.split(v, tp_size, axis=dim)[idx])
return None
def load_from_hf_bert(tensorrt_llm_bert,
hf_bert,
hf_bert_config,
rank=0,
tensor_parallel=1,
fp16=False):
qkv_weight = [[None, None, None]
for _ in range(hf_bert_config.num_hidden_layers)]
qkv_bias = [[None, None, None]
for _ in range(hf_bert_config.num_hidden_layers)]
torch_dtype = torch.float16 if fp16 else torch.float32
for k, v in hf_bert.state_dict().items():
v = v.to(torch_dtype).cpu().numpy()
if 'embeddings.word_embeddings.weight' in k:
tensorrt_llm_bert.embedding.vocab_embedding.weight.value = v
elif 'embeddings.position_embeddings.weight' in k:
tensorrt_llm_bert.embedding.position_embedding.weight.value = v
elif 'embeddings.token_type_embeddings.weight' in k:
tensorrt_llm_bert.embedding.token_embedding.weight.value = v
elif 'embeddings.LayerNorm.weight' in k:
tensorrt_llm_bert.embedding.embedding_ln.weight.value = v
elif 'embeddings.LayerNorm.bias' in k:
tensorrt_llm_bert.embedding.embedding_ln.bias.value = v
else:
layer_idx = extract_layer_idx(k)
if layer_idx is None:
continue
idx = int(layer_idx)
if 'attention.output.dense.weight' in k:
tensorrt_llm_bert.layers[
idx].attention.dense.weight.value = split(v,
tensor_parallel,
rank,
dim=1)
elif 'attention.output.dense.bias' in k:
tensorrt_llm_bert.layers[idx].attention.dense.bias.value = v
elif 'attention.output.LayerNorm.weight' in k:
tensorrt_llm_bert.layers[idx].input_layernorm.weight.value = v
elif 'attention.output.LayerNorm.bias' in k:
tensorrt_llm_bert.layers[idx].input_layernorm.bias.value = v
elif 'intermediate.dense.weight' in k:
tensorrt_llm_bert.layers[idx].mlp.fc.weight.value = split(
v, tensor_parallel, rank)
elif 'intermediate.dense.bias' in k:
tensorrt_llm_bert.layers[idx].mlp.fc.bias.value = split(
v, tensor_parallel, rank)
elif 'output.dense.weight' in k:
tensorrt_llm_bert.layers[idx].mlp.proj.weight.value = split(
v, tensor_parallel, rank, dim=1)
elif 'output.dense.bias' in k:
tensorrt_llm_bert.layers[idx].mlp.proj.bias.value = v
elif 'output.LayerNorm.weight' in k:
tensorrt_llm_bert.layers[idx].post_layernorm.weight.value = v
elif 'output.LayerNorm.bias' in k:
tensorrt_llm_bert.layers[idx].post_layernorm.bias.value = v
elif 'attention.self.query.weight' in k:
qkv_weight[idx][0] = v
elif 'attention.self.query.bias' in k:
qkv_bias[idx][0] = v
elif 'attention.self.key.weight' in k:
qkv_weight[idx][1] = v
elif 'attention.self.key.bias' in k:
qkv_bias[idx][1] = v
elif 'attention.self.value.weight' in k:
qkv_weight[idx][2] = v
elif 'attention.self.value.bias' in k:
qkv_bias[idx][2] = v
for i in range(hf_bert_config.num_hidden_layers):
tensorrt_llm_bert.layers[i].attention.qkv.weight.value = split(
np.concatenate(qkv_weight[i]), tensor_parallel, rank)
tensorrt_llm_bert.layers[i].attention.qkv.bias.value = split(
np.concatenate(qkv_bias[i]), tensor_parallel, rank)
def load_from_hf_qa_bert(tensorrt_llm_qa_bert,
hf_qa_bert,
hf_bert_config,
rank=0,
tensor_parallel=1,
fp16=False):
load_from_hf_bert(tensorrt_llm_qa_bert.bert, hf_qa_bert, hf_bert_config,
rank, tensor_parallel, fp16)
states = hf_qa_bert.state_dict()
torch_dtype = torch.float16 if fp16 else torch.float32
tensorrt_llm_qa_bert.qa_outputs.weight.value = states[
'qa_outputs.weight'].to(torch_dtype).cpu().numpy()
tensorrt_llm_qa_bert.qa_outputs.bias.value = states['qa_outputs.bias'].to(
torch_dtype).cpu().numpy()
class TestBert(unittest.TestCase):
def load_test_cases():
models = [BertModel.__name__, BertForQuestionAnswering.__name__]
test_cases = []
test_cases += product(models, [False], [False], [False],
[ContextFMHAType.disabled], ['float32'])
test_cases += product(models, [False], [True], [True], [
ContextFMHAType.disabled, ContextFMHAType.enabled,
ContextFMHAType.enabled_with_fp32_acc
], ['float16'])
return test_cases
def custom_name_func(testcase_func, param_num, param):
return "%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name("_".join(str(x) for x in param.args)),
)
@parameterized.expand(load_test_cases, name_func=custom_name_func)
def test_bert(self, model, use_refit, use_plugin, fast_building,
context_fmha_type, dtype):
tensorrt_llm.logger.set_level('error')
fp16 = (dtype == 'float16')
world_size = 1
rank = 0
batch_size = 8
input_len = 128
vocab_size = 51200
num_layers = 12
num_heads = 12
hidden_act = 'gelu'
max_position_embeddings = 512
hidden_size = 768
bs_range = [1, (batch_size + 1) // 2, batch_size]
inlen_range = [1, (input_len + 1) // 2, input_len]
torch_dtype = torch.float16 if fp16 else torch.float32
trt_dtype = trt.float16 if fp16 else trt.float32
timing_cache = 'model.cache'
torch.manual_seed(0)
builder = Builder()
with tempfile.TemporaryDirectory() as tmpdirname:
builder_config = builder.create_builder_config(
name=model,
precision='float16' if fp16 else 'float32',
timing_cache=timing_cache,
tensor_parallel=world_size, # TP only
use_refit=use_refit)
network = builder.create_network()
if use_plugin:
network.plugin_config.set_bert_attention_plugin(dtype)
if fast_building:
network.plugin_config.set_gemm_plugin(dtype)
network.plugin_config.set_context_fmha(context_fmha_type)
with net_guard(network):
# Prepare inputs
# TODO: could class be better than dict for profiles?
input_ids = tensorrt_llm.Tensor(name='input_ids',
dtype=trt.int32,
shape=[-1, -1],
dim_range=OrderedDict([
('batch_size', [bs_range]),
('input_len', [inlen_range])
]))
input_lengths = tensorrt_llm.Tensor(name='input_lengths',
dtype=trt.int32,
shape=[-1],
dim_range=OrderedDict([
('batch_size',
[bs_range])
]))
# Initialize model
bert_config = BertConfig(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_layers,
num_attention_heads=num_heads,
intermediate_size=4 * hidden_size,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
torch_dtype=torch_dtype,
)
output_name = "hidden_states"
if model == BertModel.__name__:
hf_bert = BertModel(
bert_config,
add_pooling_layer=False).cuda().to(torch_dtype).eval()
tensorrt_llm_bert = tensorrt_llm.models.BertModel(
num_layers=num_layers,
num_heads=num_heads,
hidden_size=hidden_size,
vocab_size=vocab_size,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
type_vocab_size=bert_config.type_vocab_size,
mapping=tensorrt_llm.Mapping(
world_size=world_size,
rank=rank,
tp_size=world_size), # TP only
dtype=trt_dtype)
load_from_hf_bert(tensorrt_llm_bert,
hf_bert,
bert_config,
rank=rank,
tensor_parallel=world_size,
fp16=fp16)
elif model == BertForQuestionAnswering.__name__:
hf_bert = BertForQuestionAnswering(bert_config).cuda().to(
torch_dtype).eval()
output_name = "logits"
tensorrt_llm_bert = tensorrt_llm.models.BertForQuestionAnswering(
num_layers=num_layers,
num_heads=num_heads,
hidden_size=hidden_size,
vocab_size=vocab_size,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
type_vocab_size=bert_config.type_vocab_size,
num_labels=
2, # just make it a const here, seems to me not worth as a config
mapping=tensorrt_llm.Mapping(
world_size=world_size,
rank=rank,
tp_size=world_size), # TP only
dtype=trt_dtype)
load_from_hf_qa_bert(tensorrt_llm_bert,
hf_bert,
bert_config,
rank=rank,
tensor_parallel=world_size,
fp16=fp16)
else:
assert False, f"Unknown model {model}"
# Prepare
network.set_named_parameters(
tensorrt_llm_bert.named_parameters())
# Forward
output = tensorrt_llm_bert(input_ids=input_ids,
input_lengths=input_lengths)
# Mark outputs
output_dtype = trt.float16 if fp16 else trt.float32
output.mark_output(output_name, output_dtype)
# Build engine
engine_buffer = builder.build_engine(network, builder_config)
session = tensorrt_llm.runtime.Session.from_serialized_engine(
engine_buffer)
stream = torch.cuda.current_stream().cuda_stream
# Inference
# The dtype of input_ids should be queried from the engine,
# for testing purpose, int32 is fine for now.
input_ids = torch.randint(100, (batch_size, input_len)).int().cuda()
input_lengths = input_len * torch.ones(
(batch_size, ), dtype=torch.int32, device='cuda')
output_info = session.infer_shapes([
TensorInfo('input_ids', trt.DataType.INT32,
(batch_size, input_len)),
TensorInfo('input_lengths', trt.DataType.INT32, (batch_size, ))
])
session._print_engine_info()
outputs = {
t.name: torch.empty(tuple(t.shape),
dtype=trt_dtype_to_torch(t.dtype),
device='cuda')
for t in output_info
}
assert output_name in outputs, f'{output_name} not found in outputs'
session.run(inputs={
'input_ids': input_ids,
'input_lengths': input_lengths
},
outputs=outputs,
stream=stream)
torch.cuda.synchronize()
res = outputs[output_name]
with torch.no_grad():
hf_outputs = hf_bert.forward(input_ids)
torch.cuda.synchronize()
if model == BertModel.__name__:
ref = hf_outputs.last_hidden_state
np.testing.assert_allclose(ref.cpu().numpy(),
res.cpu().numpy(),
atol=1e-2,
rtol=1e-2)
elif model == BertForQuestionAnswering.__name__:
res_start_logits, res_end_logits = torch.split(res, 1, -1)
res_start_logits = res_start_logits.squeeze()
res_end_logits = res_end_logits.squeeze()
ref_start_logits = hf_outputs.start_logits
ref_end_logits = hf_outputs.end_logits
np.testing.assert_allclose(ref_start_logits.cpu().numpy(),
res_start_logits.cpu().numpy(),
atol=1.5e-2)
np.testing.assert_allclose(ref_end_logits.cpu().numpy(),
res_end_logits.cpu().numpy(),
atol=1.5e-2)
if __name__ == '__main__':
unittest.main()
| NVIDIA/TensorRT-LLM | tests/model/test_bert.py | test_bert.py | py | 15,445 | python | en | code | 3,328 | github-code | 36 |
13100989511 | from time import time
import six
import json
from chameleon import PageTemplate
BIGTABLE_ZPT = """\
<table xmlns="http://www.w3.org/1999/xhtml"
xmlns:tal="http://xml.zope.org/namespaces/tal">
<tr tal:repeat="row python: options['table']">
<td tal:repeat="c python: row.values()">
<span tal:define="d python: c + 1"
tal:attributes="class python: 'column-' + %s(d)"
tal:content="python: d" />
</td>
</tr>
</table>""" % six.text_type.__name__
def main(event):
latencies = {}
timestamps = {}
timestamps["starting_time"] = time()
num_of_rows = event['num_of_rows']
num_of_cols = event['num_of_cols']
metadata = event['metadata']
start = time()
tmpl = PageTemplate(BIGTABLE_ZPT)
data = {}
for i in range(num_of_cols):
data[str(i)] = i
table = [data for x in range(num_of_rows)]
options = {'table': table}
data = tmpl.render(options=options)
latency = time() - start
latencies["function_execution"] = latency
timestamps["finishing_time"] = time()
return {"latencies": latencies, "timestamps": timestamps, "metadata": metadata} | ddps-lab/serverless-faas-workbench | openwhisk/cpu-memory/chameleon/function.py | function.py | py | 1,104 | python | en | code | 96 | github-code | 36 |
3106445086 | # coding: utf-8
import unittest
from unittest.mock import MagicMock, patch
from car_rewrite_model.model import CarRewriteBaseKeywords, CarRewriteSynonymsReplace
class TestDemo(unittest.TestCase):
def test_car_rewrite_base_keywords(self):
CarRewriteBaseKeywords.get_tf_results = MagicMock(return_value=['改', '写', '结', '果'])
total_comments_filepath = 'oss://modelzoo/dev/ludezheng/total_comments.txt'
vocab_filepath = 'oss://modelzoo/dev/ludezheng/vocab_jieba.txt'
keywords_filepath = 'oss://modelzoo/dev/ludezheng/keywords_table.txt'
car_info_filepath = 'oss://modelzoo/dev/ludezheng/car_info.txt'
stop_words_filepath = 'oss://modelzoo/dev/ludezheng/stop_words.txt'
m = CarRewriteBaseKeywords(total_comments_filepath=total_comments_filepath, vocab_filepath=vocab_filepath,
keywords_filepath=keywords_filepath, car_info_filepath=car_info_filepath,
stop_words_filepath=stop_words_filepath)
test_data = [
{'id': 1, 'content': '驾驶性能超好,还真别说,会开车的,开着瑞虎7操控感觉肯定也不错,外观造型漂亮,设计不会老土,最后就是虽然加速动力欠缺,但是确实很省油', 'domain': '最满意'}]
ret = m.predict(test_data)
# self.assertEqual(ret, 'hello')
def test_car_rewrite_synonyms_replace(self):
pos_model = '/data/share/liuchang/car_articles/pos.model'
phrases_before_colon='/data/share/liuchang/car_rewirte_compare/remove_words'
# pos_model = 'oss://modelzoo/dev/pos_model_for_car_rewrite/pos.model'
# phrases_before_colon='oss://modelzoo/dev/car_rewrites/phrases_before_colon'
model = CarRewriteSynonymsReplace(pos_model_path=pos_model,all_mask=True,
phrases_before_colon=phrases_before_colon)
l = [{'maskword': '性能',
'candidates': [{'word': '<unk>', 'confidence': 1},
{'word': '<unk>', 'confidence': 1},
{'word': '<unk>', 'confidence': 1},
{'word': '<unk>', 'confidence': 1},
{'word': '<unk>', 'confidence': 1}]},
{'maskword': '外观',
'candidates': [{'word': '<unk>', 'confidence': 1},
{'word': '<unk>', 'confidence': 1},
{'word': '<unk>', 'confidence': 1},
{'word': '<unk>', 'confidence': 1},
{'word': '<unk>', 'confidence': 1}]}]
data_size = 3
return_list = [l] * data_size
model.synonyms_recom_model.predict = MagicMock(return_value=return_list)
test_data = [
{'id': 1,
'content': '驾驶性能超好,还真别说,会开车的,开着瑞虎7操控感觉肯定也不错,'
'高速路行驶:不费劲。',
'domain': '最满意'}] * data_size
# input = {'data': test_data, 'topk': 5}
result = model.predict(test_data)
assert len(result) == 3
print(result)
| flyliu2017/car_rewrite_model | tests/test_demo.py | test_demo.py | py | 3,205 | python | en | code | 0 | github-code | 36 |
72152970985 |
import numpy as np
import torch
from torch import nn
crop_size = 256
cfa_pattern = 1
idx_R = np.tile(
np.concatenate((np.concatenate((np.zeros((cfa_pattern, cfa_pattern)), np.ones((cfa_pattern, cfa_pattern))), axis=1),
np.concatenate((np.zeros((cfa_pattern, cfa_pattern)), np.zeros((cfa_pattern, cfa_pattern))), axis=1)), axis=0),
(crop_size // 2 // cfa_pattern, crop_size // 2 // cfa_pattern))
idx_G1 = np.tile(
np.concatenate((np.concatenate((np.ones((cfa_pattern, cfa_pattern)), np.zeros((cfa_pattern, cfa_pattern))), axis=1),
np.concatenate((np.zeros((cfa_pattern, cfa_pattern)), np.zeros((cfa_pattern, cfa_pattern))), axis=1)), axis=0),
(crop_size // 2 // cfa_pattern, crop_size // 2 // cfa_pattern))
idx_G2 = np.tile(
np.concatenate((np.concatenate((np.zeros((cfa_pattern, cfa_pattern)), np.zeros((cfa_pattern, cfa_pattern))), axis=1),
np.concatenate((np.zeros((cfa_pattern, cfa_pattern)), np.ones((cfa_pattern, cfa_pattern))), axis=1)), axis=0),
(crop_size // 2 // cfa_pattern, crop_size // 2 // cfa_pattern))
idx_B = np.tile(
np.concatenate((np.concatenate((np.zeros((cfa_pattern, cfa_pattern)), np.zeros((cfa_pattern, cfa_pattern))), axis=1),
np.concatenate((np.ones((cfa_pattern, cfa_pattern)), np.zeros((cfa_pattern, cfa_pattern))), axis=1)), axis=0),
(crop_size // 2 // cfa_pattern, crop_size // 2 // cfa_pattern))
idx_G = np.tile(
np.concatenate((np.concatenate((np.ones((cfa_pattern, cfa_pattern)), np.zeros((cfa_pattern, cfa_pattern))), axis=1),
np.concatenate((np.zeros((cfa_pattern, cfa_pattern)), np.ones((cfa_pattern, cfa_pattern))), axis=1)), axis=0),
(crop_size // 2 // cfa_pattern, crop_size // 2 // cfa_pattern))
idx_RGB = np.concatenate((idx_R[np.newaxis, ...],
idx_G[np.newaxis, ...],
idx_B[np.newaxis, ...]), axis=0)
idx_G1RBG2 = np.concatenate((idx_G1[np.newaxis, ...],
idx_R [np.newaxis, ...],
idx_B [np.newaxis, ...],
idx_G2[np.newaxis, ...]), axis=0)
idx_R = torch.unsqueeze(torch.Tensor(idx_R), dim=0)
idx_G1 = torch.unsqueeze(torch.Tensor(idx_G1), dim=0)
idx_G2 = torch.unsqueeze(torch.Tensor(idx_G2), dim=0)
idx_G = torch.unsqueeze(torch.Tensor(idx_G), dim=0)
idx_B = torch.unsqueeze(torch.Tensor(idx_B), dim=0)
idx_RGB = torch.unsqueeze(torch.Tensor(idx_RGB), dim=0)
idx_G1RBG2 = torch.unsqueeze(torch.Tensor(idx_G1RBG2), dim=0)
class BayerLoss(nn.Module):
def __init__(self, norm='l2', crop_size=128, cfa_pattern=1):
super(BayerLoss, self).__init__()
self.crop_size = crop_size
self.cfa_pattern = cfa_pattern
norms = ['l1', 'l2']
assert norm in norms, 'norm should be ' + norms
if norm == 'l1':
self.norm = nn.L1Loss()
elif norm =='l2':
self.norm = nn.MSELoss()
else:
ValueError('norm not in ', norms)
def get_patternized_1ch_raw_image(self, image):
patternized = self.get_patternized_3ch_image(image)
patternized = torch.unsqueeze(torch.sum(patternized, dim=1), dim=0)
return patternized
def get_patternized_3ch_image(self, image):
RGB = idx_RGB.type(torch.float32)
print(type(image), image.shape)
print(type(RGB), RGB.shape)
patternized = torch.mul(image, RGB)
return patternized
def forward(self, inputs, targets):
inputs_raw = self.get_patternized_1ch_raw_image(inputs)
targets_raw = self.get_patternized_1ch_raw_image(targets)
return self.norm(targets_raw, inputs_raw)
if __name__ == '__main__':
pass
# # print(idx_R.shape)
# # print(idx_RGB.shape)
# input = np.arange(48).reshape((1,3,4,4))
# input = torch.Tensor(input)
# # print(input)
# bayerLoss = BayerLoss('l2')
# # i3 = bl.get_patternized_3ch_image(input)
# # print(i3, i3.shape)
# # i1 = bl.get_patternized_1ch_raw_image(input)
# # print(i1, i1.shape)
# img1 = np.arange(48).reshape((1,3,4,4))
# img1 = torch.Tensor(img1)
# img1 = bayerLoss.get_patternized_1ch_raw_image(img1)
# img2 = np.arange(48).reshape((1,3,4,4))-47
# img2 = torch.Tensor(img2)
# img2 = bayerLoss.get_patternized_1ch_raw_image(img2)
# print(img1, img1.shape)
# print(img2, img2.shape)
# l = bayerLoss(img1, img2)
# print(l)
# print(abs(torch.mean((img1-img2)**2)))
| samsungexpert/snu | myloss.py | myloss.py | py | 4,782 | python | en | code | 1 | github-code | 36 |
7961895091 | import RPi.GPIO as GPIO
import time
import datetime
import math
from ISStreamer.Streamer import Streamer
streamer = Streamer(bucket_name="Hamster Fitness Tracker", access_key="PUT YOUR CLIENT KEY HERE")
streamer.log("ZooZoo Says","")
# Setup Pins
pinNumLaserBreak = 18
pinNumLED = 4
GPIO.setmode(GPIO.BCM) # numbering scheme that corresponds to breakout board and pin layout
GPIO.setup(pinNumLaserBreak,GPIO.IN)
GPIO.setup(pinNumLED,GPIO.OUT)
# Setup Constants
diameter = 13 # inches
circumference = diameter * math.pi * 0.0000157828283 # miles
distanceTotal = 0
timeNoActivity = 5 # seconds
speed = 0
lastTime = datetime.datetime.now()
while True:
input = GPIO.input(pinNumLaserBreak)
if not input:
if speed == 0:
streamer.log("ZooZoo Says", "It's time to get pumped")
# Calculate stuff
thisTime = datetime.datetime.now()
timeDiff = (thisTime-lastTime).total_seconds()
speed = circumference/(timeDiff/3600) # miles per hour
# Log stuff
streamer.log("Full Rotation", "1")
if speed < 5: # Filter out glitches (rocking on the sensor)
distanceTotal += circumference
streamer.log("Speed(mph)", speed)
streamer.log("Total Distance(miles)", distanceTotal)
GPIO.output(pinNumLED,GPIO.HIGH) # Turn LED on for visual cue that everthing is working
lastTime = thisTime
# Wait for sensor break to clear
input = GPIO.input(pinNumLaserBreak)
while not input:
input = GPIO.input(pinNumLaserBreak)
time.sleep(.05)
else:
if speed > 0:
thisTime = datetime.datetime.now()
timeDiff = (thisTime-lastTime).total_seconds()
# Reset the speed to 0 if no activity (for log visualization)
if timeDiff > timeNoActivity:
speed = 0
# Log stuff
streamer.log("ZooZoo Says", "I need a rest")
streamer.log("Speed(mph)", speed)
streamer.flush()
GPIO.output(pinNumLED,GPIO.LOW) # Turn LED off for visual cue that everthing is working
| initialstate/blog | hamster_fitness.py | hamster_fitness.py | py | 1,974 | python | en | code | 2 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.