text string | size int64 | token_count int64 |
|---|---|---|
import unittest
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
import time
from BaseWordpressData import BaseWordPressData
class LoginWordPressData(BaseWordPressData):
def __init__(self,type, dataDict,browser):
BaseWordPressData.__init__(self, type,browser)
self.dataDict = dataDict
def login(self):
username = self.dataDict['username']
password = self.dataDict['password']
url = self.dataDict['url']
app = self.dataDict['app']
login_page = "https://" + url + "/" + app + "/wp-login.php"
self.browser.get( login_page )
# Fetch username, password input boxes and submit button
# This time I'm now testing if the elements were found.
# See the previous exmaples to see how to do that.
usernameTB = self.browser.find_element_by_id( "user_login" )
passwordTB = self.browser.find_element_by_id( "user_pass" )
submit = self.browser.find_element_by_id( "wp-submit" )
# Input text in username and password inputboxes
usernameTB.send_keys( username)
passwordTB.send_keys( password )
# Click on the submit button
submit.click()
# Create wait obj with a 5 sec timeout, and default 0.5 poll frequency
wait = WebDriverWait( self.browser, 5 )
time.sleep(5)
| 1,450 | 417 |
from src.datasets import DATASETS
from src.dataloaders import DATALOADERS
from src.models import MODELS
from src.trainers import TRAINERS
import argparse
parser = argparse.ArgumentParser(description='SASRec')
################
# Test
################
parser.add_argument('--load_pretrained_weights', type=str, default=None)
################
# Dataset
################
parser.add_argument('--dataset_code', type=str, default='item', choices=DATASETS.keys())
parser.add_argument('--split', type=str, default='leave_one_out', help='How to split the datasets')
parser.add_argument('--dataset_split_seed', type=int, default=98765)
parser.add_argument('--data_path', type=str, default='data/ml-1m')
################
# Dataloader
################
parser.add_argument('--dataloader_code', type=str, default='sasrec', choices=DATALOADERS.keys())
parser.add_argument('--dataloader_random_seed', type=float, default=0.0)
parser.add_argument('--train_batch_size', type=int, default=64)
parser.add_argument('--val_batch_size', type=int, default=64)
parser.add_argument('--test_batch_size', type=int, default=64)
################
# NegativeSampler
################
parser.add_argument('--train_negative_sampler_code', type=str, default='random', choices=['popular', 'random'],
help='Method to sample negative items for training. Not used in bert')
parser.add_argument('--train_negative_sample_size', type=int, default=100)
parser.add_argument('--train_negative_sampling_seed', type=int, default=0)
parser.add_argument('--test_negative_sampler_code', type=str, default='random', choices=['popular', 'random'],
help='Method to sample negative items for evaluation')
parser.add_argument('--test_negative_sample_size', type=int, default=100)
parser.add_argument('--test_negative_sampling_seed', type=int, default=98765)
################
# Trainer
################
parser.add_argument('--trainer_code', type=str, default='sasrec_sample', choices=TRAINERS.keys())
parser.add_argument('--device', type=str, default='cpu', choices=['cpu', 'cuda'])
parser.add_argument('--num_gpu', type=int, default=1)
parser.add_argument('--device_idx', type=str, default='0')
parser.add_argument('--emb_device_idx', type=str, default=None,
help="None: as the same as device_idx; cpu: move all to the cpu mem; \
{'cpu':(0,16), 'cuda:0':(16,64)}: Embed[:16] on cpu and Embed[16:64] on cuda:0")
# optimizer #
parser.add_argument('--optimizer', type=str, default='Adam', choices=['SGD','Adam'])
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--weight_decay', type=float, default=0, help='l2 regularization')
parser.add_argument('--momentum', type=float, default=None, help='SGD momentum')
parser.add_argument('--adam_epsilon', type=float, default=1e-6, help='Adam Epsilon')
# training #
parser.add_argument('--verbose', type=int, default=10)
# training on large gpu #
parser.add_argument('--num_epochs', type=int, default=100, help='Number of epochs for training')
# training on small gpu #
parser.add_argument('--global_epochs', type=int, default=1000, help='Number of epochs for global training')
parser.add_argument('--local_epochs', type=int, default=10, help='Number of epochs for local training')
parser.add_argument('--subset_size', type=int, default=1000, help='Maximal Items Size')
# evaluation #
parser.add_argument('--metric_ks', nargs='+', type=int, default=[5, 10, 20], help='ks for Metric@k')
parser.add_argument('--best_metric', type=str, default='NDCG@10', help='Metric for determining the best model')
################
# Model
################
parser.add_argument('--model_code', type=str, default='sasrec', choices=MODELS.keys())
parser.add_argument('--model_init_seed', type=int, default=0)
# Transformer Blocks #
parser.add_argument('--trm_max_len', type=int, default=200, help='Length of sequence for bert')
parser.add_argument('--trm_hidden_dim', type=int, default=50, help='Size of hidden vectors (d_model)')
parser.add_argument('--trm_num_blocks', type=int, default=2, help='Number of transformer layers')
parser.add_argument('--trm_num_heads', type=int, default=1, help='Number of heads for multi-attention')
parser.add_argument('--trm_dropout', type=float, default=0.2, help='Dropout probability to use throughout the model')
parser.add_argument('--trm_att_dropout', type=float, default=0.2, help='Dropout probability to use throughout the attention scores')
################
# Experiment
################
parser.add_argument('--experiment_dir', type=str, default='experiments')
parser.add_argument('--experiment_description', type=str, default='test')
| 4,688 | 1,517 |
from django.shortcuts import render
def staff_home(request):
return render(request, 'staff_template/staff_home.html')
| 124 | 40 |
import message_tools as mt
import time
class ProgressTaskIterator:
def __init__(self, job_id, task, status_handler, file_sys_addr):
'''
Tengo que poner a esperar a que inserten en la tabla block de status_db_url los bloques correspodientes
a la tarea de task.
lo que tengo que esperar que el status_phase sea distinto de 'GETWORKERS' y 'SLICES'
esto quiere decir que que ya estan puestos todos los bloques, y los cargo e inicializo self.blocks_id
ESTE PROGRESSTASK HAY QUE MANDARLO A CORRER JUSTAMENTE DESPUES QUE MANDE EL MENSAJE DE JOB,
EN UN HILO APARTE.
:param job_id:
:param task:
'''
self.job_id = job_id
self.task = task
self.status_handler = status_handler
self.file_sys_addr = file_sys_addr
# todo: guardar los bloques id aqui
filters = [('job_id', job_id)]
jobs = self.status_handler.get_status_rows(file_sys_addr, 'job', filters)
while True:
if len(jobs) > 0:
# si ya pasamos de map a reduce
# if jobs[0]['job_state'] != self.task:
# break
if jobs[0]['job_state'] > self.task:
break
# si ya pasamos las fases de gettingworkers y slices, implica que ya se pusieron
# todos los bloques de la tarea actual
elif jobs[0]['status_phase'] != mt.task_phases[0] \
and jobs[0]['status_phase'] != mt.task_phases[1]\
and jobs[0]['job_state'] == self.task:
break
jobs = self.status_handler.get_status_rows(file_sys_addr, 'job', filters)
time.sleep(0.5)
# print("Progress: current job status: ",jobs[0])
# Ya aqui estamos en un estado donde tenemos todos los bloques de la tarea que esperamos mostrar el progreso
print("Progress: Comenzando la tarea: ",self.task)
filters = [('phase', self.task)]
blocks_rows = self.status_handler.get_status_rows(file_sys_addr, 'block', filters)
self.blocks_id = [block_row['block_id'] for block_row in blocks_rows]
self.len_blocks = len(self.blocks_id)
self.done_blocks = []
self.yielded_blocks = 0
def __len__(self):
return self.len_blocks
def __iter__(self):
'''
:return:
'''
while self.yielded_blocks < self.len_blocks:
for block_id in self.done_blocks:
# print("Progress: vamos a hacerle yield a: ",block_id)
yield block_id
self.yielded_blocks += 1
self.done_blocks = []
# me quedo con los bloques que esten corriendo mi tareaself.task y que esten en DONE
filters = [('phase', self.task),('state', mt.task_phases[-1])]
blocks = self.status_handler.get_status_rows(self.file_sys_addr, 'block', filters)
# print("Progress: Estos son los bloques que me mandaron: ",blocks)
done_blocks = [block['block_id'] for block in blocks if block['block_id'] in self.blocks_id]
blocks_id = [block_id for block_id in self.blocks_id if block_id not in done_blocks]
self.blocks_id = blocks_id
self.done_blocks = done_blocks
time.sleep(0.5)
# print("Progress: Ended task: ",self.task)
| 3,416 | 1,067 |
from math import *
print(ceil(4.2))
print(sum([0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]))
print(fsum([0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1])) | 137 | 98 |
BACKBONE = "resnet101"
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
RPN_NMS_THRESHOLD = 0.7
POOL_SIZE = 7
MASK_POOL_SIZE = 14
TRAIN_BN = False # Defaulting to False since batch size is often small
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
TRAIN_ROIS_PER_IMAGE = 200
ROI_POSITIVE_RATIO = 0.33
MASK_SHAPE = [28, 28]
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
DETECTION_MAX_INSTANCES = 100
DETECTION_MIN_CONFIDENCE = 0.7
DETECTION_NMS_THRESHOLD = 0.3
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
RPN_ANCHOR_STRIDE = 1
RPN_NMS_THRESHOLD = 0.7
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
TOP_DOWN_PYRAMID_SIZE = 256
import numpy as np
import tensorflow as tf
from .bbox import *
class BBoxesLayer(object):
def __init__(self, img_shp=None, img_num=None):
self.img_shp = img_shp
self.img_num = img_num
self.box_siz_min = 5
self.box_prb_min = 0.5
self.box_nms_pre = None
self.box_nms_pst = 100 #200
self.box_nms_max = 0.3 #0.2
self.box_msk_min = 0.5
self.box_msk_siz = [28, 28]
def generate_boxs(self, rois=None, roi_prbs_pst=None, roi_prds_pst=None, roi_imxs=None):
#取出最佳类的预测值
box_clss = tf.argmax(roi_prbs_pst, axis=1)
box_clss = tf.cast(box_clss, tf.int32)
box_prbs = tf.reduce_max(roi_prbs_pst, axis=1)
#设置一个box索引,避免大量的gather操作(prds、msks),节省内存,提升速度
box_idxs = tf.range(tf.shape(rois)[0])
#剔除背景box
idxs = tf.where(box_clss>0)
box_clss = tf.gather_nd(box_clss, idxs)
box_prbs = tf.gather_nd(box_prbs, idxs)
box_idxs = tf.gather_nd(box_idxs, idxs)
#剔除得分较低的box
if self.box_prb_min is not None:
idxs = tf.where(box_prbs>=self.box_prb_min)
box_clss = tf.gather_nd(box_clss, idxs)
box_prbs = tf.gather_nd(box_prbs, idxs)
box_idxs = tf.gather_nd(box_idxs, idxs)
#根据box_idxs进行剩余的gather操作
rois = tf.gather(rois, box_idxs)
box_imxs = tf.gather(roi_imxs, box_idxs)
box_idxs = tf.stack([box_idxs, box_clss], axis=-1) #如果box的预测是定类的话要加上这句
roi_prds_pst = tf.gather(roi_prds_pst, box_idxs)
#还原出box以进行后续的滤除
boxs = bbox_transform_inv(rois, roi_prds_pst)
boxs = bbox_clip(boxs, [0.0, 0.0, self.img_shp[0]-1.0, self.img_shp[1]-1.0])
#剔除过小的box
idxs = bbox_filter(boxs, self.box_siz_min)
boxs = tf.gather_nd(boxs, idxs)
box_clss = tf.gather_nd(box_clss, idxs)
box_prbs = tf.gather_nd(box_prbs, idxs)
box_imxs = tf.gather_nd(box_imxs, idxs)
#做逐img逐cls的nms
#设置一个box索引,避免大量的concat操作(boxs、clss、prbs、imxs),节省内存,提升速度
box_idxs = tf.zeros(shape=[0], dtype=tf.int32)
def cond0(i, boxs, box_clss, box_prbs, box_imxs, box_idxs):
c = tf.less(i, self.img_num)
return c
def body0(i, boxs, box_clss, box_prbs, box_imxs, box_idxs):
box_idxs_img = tf.where(tf.equal(box_imxs, i))
boxs_img = tf.gather_nd(boxs, box_idxs_img) #和box_idxs_img对应
box_clss_img = tf.gather_nd(box_clss, box_idxs_img)
box_prbs_img = tf.gather_nd(box_prbs, box_idxs_img)
#进一步剔除过多的roi
if self.box_nms_pre is not None:
box_nms_pre = tf.minimum(self.box_nms_pre, tf.shape(boxs_img)[0])
box_prbs_img, idxs = tf.nn.top_k(box_prbs_img, k=box_nms_pre, sorted=True)
boxs_img = tf.gather(boxs_img, idxs)
box_clss_img = tf.gather(box_clss_img, idxs)
box_idxs_img = tf.gather(box_idxs_img, idxs)
#####################################
box_idxs_kep = tf.zeros(shape=[0], dtype=tf.int32)
box_clss_unq, idxs = tf.unique(box_clss_img)
def cond1(j, boxs_img, box_clss_img, box_prbs_img, box_clss_unq, box_idxs_kep):
box_cls_num = tf.shape(box_clss_unq)[0]
c = tf.less(j, box_cls_num)
return c
def body1(j, boxs_img, box_clss_img, box_prbs_img, box_clss_unq, box_idxs_kep):
#选出对应类的rois
box_cls = box_clss_unq[j]
box_idxs_cls = tf.where(tf.equal(box_clss_img, box_cls))
boxs_cls = tf.gather_nd(boxs_img, box_idxs_cls)
box_prbs_cls = tf.gather_nd(box_prbs_img, box_idxs_cls)
#进行非极大值抑制操作
idxs = tf.image.non_max_suppression(boxs_cls, box_prbs_cls, self.box_nms_pst, self.box_nms_max)
box_idxs_cls = tf.gather(box_idxs_cls, idxs)
# 保存结果
box_idxs_kep = tf.concat([box_idxs_kep, box_idxs_cls], axis=0)
return [j+1, boxs_img, box_clss_img, box_prbs_img, box_clss_unq, box_idxs_kep]
j = tf.constant(0)
[j, boxs_img, box_clss_img, box_prbs_img, box_clss_unq, box_idxs_kep] = \
tf.while_loop(cond1, body1, loop_vars=[j, boxs_img, box_clss_img, box_prbs_img, box_clss_unq, box_idxs_kep], \
shape_invariants=[j.get_shape(), boxs_img.get_shape(), box_clss_img.get_shape(), \
box_prbs_img.get_shape(), box_clss_unq.get_shape(), tf.TensorShape([None])], \
parallel_iterations=10, back_prop=False, swap_memory=True)
box_prbs_img = tf.gather(box_prbs_img, box_idxs_kep)
box_idxs_img = tf.gather(box_idxs_img, box_idxs_kep)
box_num_img = tf.minimum(self.box_nms_pst, tf.shape(box_idxs_img)[0])
box_prbs_img, idxs = tf.nn.top_k(box_prbs_img, k=box_num_img, sorted=True)
box_idxs_img = tf.gather(box_idxs_img, idxs)
# 保存结果
box_idxs = tf.concat([box_idxs, box_idxs_img], axis=0)
return [i+1, boxs, box_clss, box_prbs, box_imxs, box_idxs]
i = tf.constant(0)
[i, boxs, box_clss, box_prbs, box_imxs, box_idxs] = \
tf.while_loop(cond, body, loop_vars=[i, boxs, box_clss, box_prbs, box_imxs, box_idxs], \
shape_invariants=[i.get_shape(), boxs.get_shape(), box_clss.get_shape(), \
box_prbs.get_shape(), box_imxs.get_shape(), tf.TensorShape([None])], \
parallel_iterations=10, back_prop=False, swap_memory=True)
boxs = tf.gather_nd(boxs, box_idxs)
box_clss = tf.gather_nd(box_clss, box_idxs)
box_prbs = tf.gather_nd(box_prbs, box_idxs)
box_imxs = tf.gather_nd(box_imxs, box_idxs)
return boxs, box_clss, box_prbs, box_imxs
def generate_msks(self, boxs=None, box_clss=None, box_msks_pst=None):
return
| 7,310 | 3,209 |
import logging
import os
import pytorch_lightning as pl
from meddlr.config.config import CfgNode
from meddlr.engine.trainer import convert_cfg_time_to_iter as _convert_cfg_time_to_iter
from meddlr.engine.trainer import format_as_iter
from meddlr.utils import env
from meddlr.utils.env import supports_wandb
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.loggers import CSVLogger
from pytorch_lightning.profiler import SimpleProfiler
from pytorch_lightning.utilities.distributed import rank_zero_only
from skm_tea.callbacks import PLPeriodicCheckpointer
from skm_tea.utils.pl_utils import LoggerCollection, TensorBoardLogger, WandbLogger
__all__ = ["PLDefaultTrainer"]
def convert_cfg_time_to_iter(cfg: CfgNode, iters_per_epoch: int):
"""Convert all config time-related parameters to iterations.
Note:
When adding to this list, be careful not to convert config parameters
multiple times.
"""
time_scale = cfg.TIME_SCALE
cfg = _convert_cfg_time_to_iter(cfg.clone(), iters_per_epoch, ignore_missing=True).defrost()
cfg.SOLVER.EARLY_STOPPING.PATIENCE = format_as_iter(
cfg.SOLVER.EARLY_STOPPING.PATIENCE, iters_per_epoch, time_scale
)
cfg.TIME_SCALE = "iter"
cfg.freeze()
return cfg
class PLDefaultTrainer(pl.Trainer):
def __init__(
self,
cfg,
iters_per_epoch: int,
log_gpu_memory=None,
replace_sampler_ddp=False,
num_gpus=0,
resume=False,
eval_only=False,
**kwargs,
):
logger = logging.getLogger("skm_tea")
self.eval_only = eval_only
if "limit_train_batches" in kwargs:
iters_per_epoch = kwargs["limit_train_batches"]
cfg = convert_cfg_time_to_iter(cfg, iters_per_epoch)
self.cfg = cfg
callbacks = self.build_callbacks() # includes user-specified callbacks
kwargs["callbacks"] = callbacks
if resume:
assert not kwargs.get(
"resume_from_checkpoint", None
), "Cannot specify resume=True and resume_from_checkpoint"
resume_from_checkpoint = self.configure_resume(callbacks)
logger.info(f"Resuming from checkpoint {resume_from_checkpoint}")
kwargs["resume_from_checkpoint"] = resume_from_checkpoint
early_stopping_callback = self.build_early_stopping(iters_per_epoch)
if early_stopping_callback:
callbacks.append(early_stopping_callback)
# Hacky way to get around the definition of "step" as optimizer.step in pt-lightning.
# Without this the training time would be scaled by a factor of SOLVER.GRAD_ACCUM_ITERS.
max_steps = cfg.SOLVER.MAX_ITER // cfg.SOLVER.GRAD_ACCUM_ITERS
# Default arguments based on Trainer. Any keyword args provided will overwrite these.
args = dict(
logger=self.build_logger() if not self.eval_only else False,
default_root_dir=cfg.OUTPUT_DIR,
max_steps=max_steps,
# TODO Issue #4406: https://github.com/PyTorchLightning/pytorch-lightning/issues/4406
val_check_interval=min(
cfg.TEST.EVAL_PERIOD, kwargs.get("limit_train_batches", float("inf"))
),
accumulate_grad_batches=cfg.SOLVER.GRAD_ACCUM_ITERS,
log_gpu_memory=log_gpu_memory,
checkpoint_callback=False,
sync_batchnorm=False,
profiler=SimpleProfiler(dirpath=cfg.OUTPUT_DIR, filename="profile.txt"),
log_every_n_steps=5,
replace_sampler_ddp=replace_sampler_ddp,
deterministic=env.is_repro(),
)
if num_gpus > 0:
args.update({"gpus": num_gpus, "auto_select_gpus": True})
args.update(kwargs)
super().__init__(**args)
def build_early_stopping(self, iters_per_epoch):
monitor = self.cfg.SOLVER.EARLY_STOPPING.MONITOR
patience = self.cfg.SOLVER.EARLY_STOPPING.PATIENCE
min_delta = self.cfg.SOLVER.EARLY_STOPPING.MIN_DELTA
if patience == 0:
return False
patience = patience / iters_per_epoch
assert (
self.cfg.TIME_SCALE == "iter" and patience > 0 and int(patience) == patience
), f"Got time scale '{self.cfg.TIME_SCALE}' and patience '{patience}'"
return EarlyStopping(monitor=monitor, min_delta=min_delta, patience=patience, verbose=True)
@rank_zero_only
def build_logger(self):
cfg = self.cfg
version = ""
loggers = [
CSVLogger(cfg.OUTPUT_DIR, name="", version=version),
TensorBoardLogger(cfg.OUTPUT_DIR, name="", version=version, log_graph=False),
]
if supports_wandb():
import wandb
loggers.append(WandbLogger(experiment=wandb.run))
return LoggerCollection(loggers)
def build_callbacks(self, **kwargs):
"""Append default callbacks to list of user-defined callbacks."""
cfg = self.cfg
callbacks = list(kwargs.get("callbacks", []))
if "checkpoint_callback" not in kwargs and not any(
isinstance(x, PLPeriodicCheckpointer) for x in callbacks
):
callbacks.append(
PLPeriodicCheckpointer(
frequency=cfg.SOLVER.CHECKPOINT_PERIOD,
filepath=os.path.join(cfg.OUTPUT_DIR, "{global_step:07d}-{epoch:03d}"),
save_after_val=True,
)
)
return callbacks
def configure_resume(self, callbacks):
"""Configure setup for resume.
Currently finds the latest epoch and resumes from there.
"""
# cfg = self.cfg
checkpointer = [x for x in callbacks if isinstance(x, PLPeriodicCheckpointer)]
if len(checkpointer) == 0:
raise ValueError("Resuming training only works with PLPeriodicCheckpointer")
elif len(checkpointer) > 1 and any(
cp.dirpath != checkpointer[0].dirpath for cp in checkpointer
):
raise ValueError("Found more than one checkpointer with different save directories")
return checkpointer[0].get_latest()
| 6,198 | 1,963 |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from uw_canvas.utilities import fdao_canvas_override
from uw_canvas.quizzes import Quizzes
from uw_canvas.models import Quiz
@fdao_canvas_override
class CanvasTestQuizzes(TestCase):
def test_quizzes_by_course_id(self):
canvas = Quizzes()
submissions = canvas.get_quizzes("862539")
sub = submissions[0]
self.assertEquals(sub.quiz_id, 762037, "Has correct quiz id")
self.assertEquals(sub.published, True, "Is published")
self.assertEquals(sub.due_at.day, 1, "due at datetime")
def test_quizzes_by_sis_id(self):
canvas = Quizzes()
submissions = canvas.get_quizzes_by_sis_id("2013-autumn-PHYS-248-A")
self.assertEquals(len(submissions), 1, "Submission Count")
def test_quiz_without_due_date(self):
quiz = Quiz(data={
"id": "1",
"title": "title",
"html_url": "http://...",
"published": False,
"points_possible": 0,
})
self.assertEquals(quiz.title, "title")
self.assertEquals(quiz.due_at, None)
| 1,189 | 426 |
from django.apps import AppConfig
class RecordConfig(AppConfig):
name = 'record'
| 87 | 26 |
from autogluon.tabular.models.fastainn.tabular_nn_fastai import NNFastAiTabularModel
def test_tabular_nn_fastai_binary(fit_helper):
fit_args = dict(
hyperparameters={NNFastAiTabularModel: {}},
)
dataset_name = 'adult'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
def test_tabular_nn_fastai_multiclass(fit_helper):
fit_args = dict(
hyperparameters={NNFastAiTabularModel: {}},
)
dataset_name = 'covertype'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
def test_tabular_nn_fastai_regression(fit_helper):
fit_args = dict(
hyperparameters={NNFastAiTabularModel: {}},
)
dataset_name = 'ames'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
| 820 | 293 |
import os
from xml.etree import ElementTree as ET
from datetime import datetime
from collections import namedtuple
__author__ = "Manuel Escriche < mev@tid.es>"
class Settings:
def __init__(self):
self._dashboard = dict()
self._review = dict()
self.home = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
self.configHome = os.path.join(self.home, 'config')
self.storeHome = os.path.join(self.home, 'store')
self._logoshome = os.path.join(self.home, 'logos')
self.outHome = os.path.join(self.home, 'reports')
xmlfile = os.path.join(self.configHome, 'settings.xml')
# print(xmlfile)
tree = ET.parse(xmlfile)
root = tree.getroot()
self.logoAzul = os.path.join(self._logoshome, root.find('logo1').text)
self.logoAzulOsc = os.path.join(self._logoshome, root.find('logo2').text)
self.logofiware = os.path.join(self._logoshome, root.find('logo3').text)
self._today = datetime.now().strftime("%Y%m%d")
self._dashboard['deliverable'] = root.find('dashboard').find('deliverable').text
self.domain = root.find('domain').text
self._servers = dict()
record = namedtuple('record', 'domain, username, password')
for _server in root.findall('server'):
name = _server.get('name')
domain = _server.find('domain').text
username = _server.find('username').text
password = _server.find('password').text
self._servers[name] = record(domain, username, password)
# print(len(self.__chapters))
@property
def server(self):
return self._servers
@property
def chapters(self):
return 'Apps', 'Cloud', 'Data', 'IoT', 'I2ND', 'Security', 'WebUI', 'Ops', 'Academy', 'Catalogue', 'Lab'
@property
def management(self):
return 'Coordination', 'TechnicalCoordination'
@property
def deliverable(self):
return self._dashboard['deliverable']
settings = Settings()
if __name__ == "__main__":
pass
| 2,082 | 665 |
"""General utility functions for solvers."""
import warnings
from ..core import (expand_mul, expand_multinomial, nan, oo,
preorder_traversal, zoo)
from ..core.sympify import sympify
from ..simplify.simplify import posify, simplify
__all__ = 'checksol',
def checksol(f, sol, **flags):
r"""Checks whether sol is a solution of equations f.
Examples
========
>>> checksol(x**4 - 1, {x: 1})
True
>>> checksol(x**4 - 1, {x: 0})
False
>>> checksol(x**2 + y**2 - 5**2, {x: 3, y: 4})
True
Returns
=======
bool or None
Return True, if solution satisfy all equations
in ``f``. Return False, if a solution doesn't
satisfy any equation. Else (i.e. one or more checks
are inconclusive), return None.
Parameters
==========
f : Expr or iterable of Expr's
Equations to substitute solutions in.
sol : dict of Expr's
Mapping of symbols to values.
\*\*flags : dict
A dictionary of following parameters:
minimal : bool, optional
Do a very fast, minimal testing. Default is False.
warn : bool, optional
Show a warning if it could not conclude. Default is False.
simplify : bool, optional
Simplify solution before substituting into function and
simplify the function before trying specific simplifications.
Default is True.
force : bool, optional
Make positive all symbols without assumptions regarding
sign. Default is False.
"""
minimal = flags.get('minimal', False)
if not isinstance(sol, dict):
raise ValueError(f'Expecting dictionary but got {sol}')
if sol and not f.has(*list(sol)):
# if f(y) == 0, x=3 does not set f(y) to zero...nor does it not
if f.is_Number:
return f.is_zero
else:
return
illegal = {nan, zoo, oo, -oo}
if any(sympify(v).atoms() & illegal for k, v in sol.items()):
return False
was = f
attempt = -1
while 1:
attempt += 1
if attempt == 0:
val = f.subs(sol)
if val.atoms() & illegal:
return False
elif attempt == 1:
assert val.free_symbols
if not val.is_constant(*list(sol), simplify=not minimal):
return False
# there are free symbols -- simple expansion might work
_, val = val.as_content_primitive()
val = expand_mul(expand_multinomial(val))
elif attempt == 2:
if minimal:
return
if flags.get('simplify', True):
for k in sol:
sol[k] = simplify(sol[k])
# start over without the failed expanded form, possibly
# with a simplified solution
val = simplify(f.subs(sol))
if flags.get('force', True):
val, reps = posify(val)
# expansion may work now, so try again and check
exval = expand_mul(expand_multinomial(val))
if exval.is_number or not exval.free_symbols:
# we can decide now
val = exval
else:
# if there are no radicals and no functions then this can't be
# zero anymore -- can it?
pot = preorder_traversal(expand_mul(val))
seen = set()
saw_pow_func = False
for p in pot:
if p in seen:
continue
seen.add(p)
if p.is_Pow and not p.exp.is_Integer:
saw_pow_func = True
elif p.is_Function:
saw_pow_func = True
if saw_pow_func:
break
if saw_pow_func is False:
return False
if flags.get('force', True):
# don't do a zero check with the positive assumptions in place
val = val.subs(reps)
val # XXX "peephole" optimization, http://bugs.python.org/issue2506
break
if val == was:
continue
elif val.is_Rational:
return val == 0
elif val.is_nonzero:
return False
if not val.free_symbols:
return bool(abs(val.evalf(18, strict=False).evalf(12, chop=True)) < 1e-9)
was = val
if flags.get('warn', False):
warnings.warn(f'\n\tWarning: could not verify solution {sol}.')
| 4,554 | 1,283 |
with open('file.csv', 'r') as f:
a = [i for i in f]
print(a)
with open('file.csv', 'r') as f:
a = [i[1: -2] for i in f]
print(a)
| 148 | 73 |
import os
from django.template.response import TemplateResponse
from _helpers.common import make_list_of_lists
from app_user.models import ClosingPeriod
from datetime import datetime
from django.utils import timezone
from django.core.paginator import Paginator
from django.db.models.query import QuerySet
from django.template.loader import render_to_string
from num2words import num2words
from xhtml2pdf import pisa
from _helpers.models import areas_ar_en, find_in, get_currency, get_payment_type, modified_num2words
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from django.http.request import HttpHeaders, HttpRequest
from django.http.response import HttpResponse, HttpResponseRedirect
from django.urls.base import reverse
from django.urls.conf import path
from django.urls.resolvers import URLPattern
from django.utils.html import format_html
from payment.models import ClientPaymentTransaction, PaymentTransactionType
from django.contrib import admin
from .models import Client
from django.utils.translation import gettext, ugettext as _, ugettext_lazy
import xlsxwriter
from _helpers.admin import Amount, ClientHelper, CommonMethods, ConsumerTransaction,ConsumerTransactionDownloder, admin_client_download_transaction_pdf, admin_supplier_download_transaction_pdf, make_xls_data, make_xls_headers, str_to_date
from zipfile import ZipFile
class ClientAdmin(admin.ModelAdmin):
class Media:
js = (
'client_transactions.js',
'client_account_statment.js'
)
css = {
"all":('client_admin.css',)
}
model = Client
search_fields = ['name', 'phone', 'email']
extra = 1
list_display_links = [
'name'
]
list_display = [
'id',
'name',
'phone',
'is_active',
'taxes',
# 'make_transaction',
'parsed_get_debit',
'parsed_get_credit',
'parsed_get_net',
'account_statment_from',
'account_statment_to',
'account_statment_btn',
]
add_fieldsets = (
(ugettext_lazy('Main info'), {
'classes': ("wide",),
"fields": (
_('id'),
_('name'),
_('email'),
_('phone'),
_('gender'),
_('img'),
),
}),
(ugettext_lazy("Location"), {
'classes': ('collapse', 'wide'),
'fields': (
_('country'),
_('area'),
_('city'),
_('address'),
)
})
,(ugettext_lazy('Cash'), {
'classes': ('collapse', 'wide'),
"fields" : (
_('debit'),
_('credit'),
_('get_cash'),
_('cash')
)
}),(ugettext_lazy('Taxes'), {
"classes": ('collapse', 'wide'),
'fields' :
(
_('taxes'),
_('taxes_rate'),
)
}),
(ugettext_lazy('Status'), {'classes': ('collapse',),"fields": ('is_active',)})
)
fieldsets = (
(ugettext_lazy('Main info'), {
'classes': ("wide",),
"fields": (
_('id'),
_('name'),
_('email'),
_('phone'),
_('gender'),
_('img'),
),
}),
(ugettext_lazy("Location"), {
'classes': ('collapse', 'wide'),
'fields': (
_('country'),
_('area'),
_('city'),
_('address'),
)
})
,(ugettext_lazy('Cash'), {
'classes': ('collapse', 'wide'),
"fields" : (
_('debit'),
_('credit'),
_('get_cash'),
_('period_close'),
)
}),(ugettext_lazy('Taxes'), {
"classes": ('collapse', 'wide'),
'fields' :
(
_('taxes'),
_('taxes_rate'),
)
}),
(ugettext_lazy('Status'), {'classes': ('collapse',),"fields": ('is_active',)})
)
search_fields = ['name', 'phone', 'email','area__name', 'city']
list_filter = ('is_active', 'taxes')
list_per_page = 20
actions = [
_('activate'),
_('deactivate'),
_('export_as_xls'),
_('export_invoices_for'),
]
change_form_template = 'admin/client/client/custom_change_form.html'
change_list_template = 'admin/client/client/custom_change_list.html'
def activate(self, request, queryset):
client_no = queryset.update(is_active=True)
supplier_string = 'clients have' if client_no < 1 else 'client has'
self.message_user(request, f'{client_no} {supplier_string} activated successfully')
activate.short_description = ugettext_lazy('Activate selected clients')
def deactivate(self, request, queryset):
client_no = queryset.update(is_active=False)
supplier_string = 'clients have' if client_no < 1 else 'client has'
self.message_user(request, f'{client_no} {supplier_string} activated successfully')
deactivate.short_description = ugettext_lazy('Deactivate selected clinets')
def export_invoices_for(self, request, queryset):
response = HttpResponse(content_type='application/vnd.ms-excel')
xls_sheet = xlsxwriter.Workbook(response)
headers_format = xls_sheet.add_format()
headers_format.set_font_shadow()
headers_format.set_bg_color('gray')
headers_format.set_border(1)
headers_format.set_font_color('white')
headers_format.set_bold()
headers_format.set_align('center')
headers_format.set_locked(True)
headers_format.set_size(15)
data_format = xls_sheet.add_format()
data_format.set_align('center')
data_format.set_bg_color('#8d8894')
data_format.set_font_color('#e7e7e7')
data_format.set_font_size(14)
data_format.set_bold()
data_format.set_border()
data_format.set_border_color('black')
clients = queryset.all()
for supplier in clients:
qs_values = supplier.transactions.order_by('-id').all()
work_sheet = xls_sheet.add_worksheet()
make_xls_headers(work_sheet, [
_('issued_at'),
_('type_tranasction'),
_('amount'),
_('description'),
_('id'),
_('client'),
], headers_format)
data_fields_names = [
'client',
'id',
'description',
'amount',
'type_tranasction',
'issued_at',
]
make_xls_data(work_sheet, qs_values, data_fields_names, data_format)
xls_sheet.close()
return response
export_invoices_for.short_description = ugettext_lazy('Export invoices for')
def get_search_results(self, request: HttpRequest, queryset: QuerySet, search_term: str) -> Tuple[QuerySet, bool]:
area = find_in(areas_ar_en(), search_term)
if area:
search_term = area[search_term]
return super().get_search_results(request, queryset, search_term)
def get_readonly_fields(self, request: HttpRequest, obj: Optional["Client"]) -> Union[List[str], Tuple]:
readonly_fields = [
'id',
'make_transaction',
'debit',
'credit',
'period_close',
'get_cash',
'get_net',
]
if obj:
return readonly_fields+ ['cash']
return readonly_fields
def add_view(self, *args, **kwargs):
self.fieldsets = self.add_fieldsets
return super().add_view(*args, **kwargs)
def changeform_view(self, request: HttpRequest, object_id: Optional[str], form_url: str, extra_context: Optional[Dict[str, bool]]) -> Any:
transact_to_value = request.COOKIES.get('to_value')
transact_from_value = request.COOKIES.get('from_value')
today_full = datetime.today().date()
extra_context = {
"filter_label": _('Filter Transactions'),
'from' :format_html('<label>{}</label>: <input type=date value={} id=transactions_from>', _('from'), today_full),
'to' : format_html(' <label>{}</label>: <input type=date value={} id=transactions_to>', _('to'), today_full),
'trs': [
_('id of transaction'),
_('ISSUED AT'),
_('TRANSACTION TYPE'),
_('Description'),
_('debit'),
_('credit'),
_('balance'),
_('VIEW'),
_('CSV'),
_('PDF'),
],
"page": _('Page'),
'of': _('of'),
'next': _('next'),
'previous':_('previous'),
'last': _('last page'),
'first': _('first')
}
client = self.get_object(request=request,object_id=object_id)
if not client:
return super().changeform_view(request, object_id=object_id, form_url=form_url, extra_context=extra_context)
transactions = client.transactions.all()
if transact_to_value:
to_date=str_to_date(transact_to_value)
from_date=str_to_date(transact_from_value)
transactions = transactions.filter(issued_at__gte=from_date).filter(issued_at__lte=to_date).all()
extra_context.update(ConsumerTransaction.prepare_tarnsactions_table(request, transactions, 'admin:payment_clientpaymenttransaction_change', consumer= 'client'))
return super().changeform_view(request, object_id=object_id, form_url=form_url, extra_context=extra_context)
def get_urls(self) -> List[URLPattern]:
urls = super().get_urls()
urls += [
path('<int:client_id>/make_a_transaction', self.process_make_transaction, ),
path('<int:client_id>/change/<int:id>/download/csv', self.download_transaction_csv ,),
path('<int:client_id>/change/<int:id>/download/pdf', self.download_transaction_pdf , name='client_transaction_download_pdf'),
path('<int:client_id>/change/period_close', self.period_close_controller , name='client_period_close'),
path('account-statment/<str:date_from>/<str:date_to>/<str:client_id>', self.account_statment_handler , name='client_account_statment'),
]
return urls
def account_statment_handler(self, request, date_from, date_to, client_id):
client = self.get_object(request, client_id)
return CommonMethods.account_statment_pdf(
date_from=date_from,
date_to=date_to,
consumer_obj=client,
request=request
)
def period_close_controller(self, request, client_id):
client = self.get_object(request, client_id)
transactions = client.transactions
return CommonMethods.make_peroid_close(transactions, client)
def changelist_view(self, request: HttpRequest, extra_context: Optional[Dict[str, str]]=None) -> TemplateResponse:
extra_context = {
'to': _('to'),
'from': _('from'),
'export': _('export'),
"account_stament_label": _('account statment')
}
response = super().changelist_view(request, extra_context=extra_context)
if request.COOKIES.get('client_id'):
response.delete_cookie('client_id')
return response
def process_make_transaction(self, request, **kwargs):
url = reverse("admin:payment_clientpaymenttransaction_add")
response = HttpResponseRedirect(url)
response.set_cookie('client_id', kwargs.get('client_id'))
return response
def get_queryset(self, request: HttpRequest) -> QuerySet:
client_id = request.COOKIES.get('client_id')
if not client_id:
return super().get_queryset(request)
client_list_path = reverse('admin:client_client_changelist')
queryset = super().get_queryset(request)
if not request.path == client_list_path:
queryset = queryset.filter(id=client_id)
return queryset
def download_transaction_pdf(self, request, **kwargs):
transaction = ClientPaymentTransaction.objects.get(id=kwargs['id'])
return admin_client_download_transaction_pdf(transaction, request)
def download_transaction_csv(self, *args_, **kwargs):
url = reverse('admin:client_transaction_download_csv', args=[kwargs['id']])
return HttpResponseRedirect(url)
def has_delete_permission(self, request, obj=None):
return False
def activate_clients(self, request, queryset):
clinets_no = queryset.update(is_active=True)
client_string = 'clients have' if clinets_no < 1 else 'client has'
self.message_user(request, f'{clinets_no} {client_string} activated successfully')
def dactivate_clients(self, request, queryset):
clinets_no = queryset.update(is_active=False)
client_string = 'clients have' if clinets_no < 1 else 'client has'
self.message_user(request, f'{clinets_no} {client_string} deactivated successfully')
def get_action(self, action: Union[Callable, str]) -> Tuple[Callable, str, str]:
return super().get_action(action)
def save_model(self, request: Any, client: "Client", form: Any, change: Any) -> None:
super().save_model(request, client, form, change)
if (not client.cash == 0) and (change == False):
if not ClientPaymentTransaction.objects.filter(client_id=client.pk).exists():
if client.cash < 0:
try:
type_tranasction = PaymentTransactionType.objects.get(name=_('Opening account'))
except PaymentTransactionType.DoesNotExist:
type_tranasction = PaymentTransactionType.objects.create(
name=_('Opening account'),
transaction_for=2,
)
ClientPaymentTransaction.objects.create(
amount=abs(client.cash),
client=client,
type_tranasction=type_tranasction,
payment_type=2,
issued_by = request.user
)
return
try:
type_tranasction = PaymentTransactionType.objects.get(name=_('Opening account'))
except PaymentTransactionType.DoesNotExist:
type_tranasction = PaymentTransactionType.objects.create(
name=_('Opening account'),
transaction_for=1,
)
ClientPaymentTransaction.objects.create(
amount=client.cash,
client=client,
type_tranasction=type_tranasction,
payment_type=1,
issued_by = request.user
)
def export_as_xls(self, request, queryset):
response = HttpResponse(content_type='application/vnd.ms-excel')
xls = xlsxwriter.Workbook(response)
work_sheet = xls.add_worksheet()
headers_format = xls.add_format()
headers_format.set_font_shadow()
headers_format.set_bg_color('gray')
headers_format.set_border(1)
headers_format.set_font_color('white')
headers_format.set_bold()
headers_format.set_align('center')
headers_format.set_locked(True)
headers_format.set_size(15)
data_format = xls.add_format()
data_format.set_align('center')
data_format.set_bg_color('#8d8894')
data_format.set_font_color('#e7e7e7')
data_format.set_font_size(14)
data_format.set_bold()
data_format.set_border()
data_format.set_border_color('black')
sheet_headers = [
_('city'),
_('address'),
_('phone'),
_('email'),
_('id'),
_('name'),
]
qs_values = queryset.order_by('-id').all()
make_xls_headers(work_sheet, sheet_headers, headers_format)
data_fields_names = [
'name',
'id',
'email',
'phone',
'address',
'city',
]
make_xls_data(work_sheet, qs_values, data_fields_names, data_format)
xls.close()
return response
export_as_xls.short_description = ugettext_lazy("Export Selected as xls")
admin.site.register(Client, ClientAdmin)
| 16,798 | 4,890 |
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import defaultdict, deque
n = int(input())
paths = defaultdict(set)
for _ in range(n - 1):
u, v = map(lambda x: int(x) - 1, input().strip().split())
paths[u].add(v)
paths[v].add(u)
parents = [-1] * n
stack = deque([0])
vertexes = deque([])
lens = [len(paths[i]) for i in range(n)]
while stack:
u = stack.popleft()
vertexes.append(u)
for v in paths[u]:
stack.append(v)
paths[v].remove(u)
parents[v] = u
levels = [0] * n
while vertexes:
u = vertexes.pop()
lvl = 0
for v in paths[u]:
lvl = max(lvl, levels[v] + 1)
levels[u] = lvl
result = -1
c = None
for i in range(n):
if lens[i] >= 5:
u = i
lvl = levels[u]
d = 1
while parents[u] != -1:
d += 1
for v in paths[parents[u]]:
if v != u:
lvl = max(lvl, levels[v] + d)
u = parents[u]
if lvl + lens[i] > result:
result = lvl + lens[i]
c = i
if result != -1:
print(result, c + 1)
else:
print(-1)
| 1,352 | 499 |
import itertools
def chunk(iterable, n):
it = iter(iterable)
cls = list
chunk = cls(itertools.islice(it, n))
while chunk:
yield chunk
chunk = cls(itertools.islice(it, n))
def map_chunk(iterable, n, f_chunk):
"""
Map over iterable in chunks of size n, applying f_chunk to
each chunk, and then flattening the result back into the original
shape of iterable
"""
cls = iterable.__class__
it_result = itertools.chain.from_iterable(
f_chunk(c) for c in chunk(iterable, n)
)
return cls(it_result)
def paginator(page_getter, page_limit, n):
start_pos = 0
num_results = 0
while True:
page = page_getter(start_pos)
yield page
page_size = len(page)
num_results += page_size
start_pos += page_size
if num_results >= n or page_size < page_limit:
return
def paginate(page_getter, page_limit, n):
return list(itertools.chain.from_iterable(paginator(page_getter, page_limit, n)))
| 949 | 347 |
#
# This script is Copyrighted (c) 2012 by William Gibb and
# licensed under the OSI Educational Community License version 1.0
#
# Please see the LICENSE.txt file for a full reading of the appopriate
# license.
#
import sys
import os
import csv
import optparse
import string
import logging
import re
import datetime
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
__version__ = 1
USAGE='''
This script is designed to convert normalized waveforms into VCD files.
This is not designed to implement extended VCD format.
The input for this script is intended to come from normalize_waveforms.py
with the --zero_timescale option enabled.
'''
def main(options):
'''
stuff
'''
# constant RE to indentify the time coloumn
time_re = '^Time\s\([\w]{1,2}\)$'
compiled_time = re.compile(time_re)
'''
Generate a list of printable ASCII characters, in order to comply with
IEEE Standard 1364-1995 Section 18. These can be used to indentify all
signals in a VCD.
'''
IEEE_PRINTABLE = []
for i in range(33,127):
IEEE_PRINTABLE.append(chr(i))
if not os.path.isfile(options.input):
logger.error('input file "%s" does not exist' %options.input)
sys.exit(-1)
else:
input_fn = options.input
if os.path.isdir(options.output):
logger.error('output file "%s" is a directory. must be a filename.' % options.output)
sys.exit(-1)
if os.path.exists(options.output):
logger.warning('output file "%s" already exists. THIS WILL OVERWRITE THE EXISTING FILE' % options.output)
output_fn = options.output
if not options.verbose:
logging.info('Disable verbose logging')
logger.setLevel(logging.INFO)
logger.debug('Welcome to the VCD waveform writer tool')
logger.debug('You have specified the file "%s" to transform into a vcd file.' % input_fn)
logger.debug('You have specified the file "%s" to write data to.' % output_fn)
if options.verbose:
if not ask_ok("Is this what you intend do process?"):
sys.exit(-1)
#now to open up csv file for reading and writing
ifile = open(input_fn, 'r')
reader = csv.reader(ifile)
ofile = open(output_fn, 'wb')
# build header and VCD symbol information
# this will work for up to 96 columns
symbols = list(IEEE_PRINTABLE)
timeIndex = None
header= reader.next()
hzip = zip(header,range(len(header)))
header_dict = {}
for i in hzip:
value,indx = i
m = compiled_time.match(value)
if m:
timeIndex = indx
else:
symbol = symbols.pop()
header_dict[indx] = {'name':value, 'symbol':symbol}
if not timeIndex:
logger.error('Did not identify time index')
logger.error('Time column must match the regex: %s' % time_re)
sys.exit(-1)
# write VCD header information
write_vcd_header(ofile,header_dict)
# get the first row of data, and use that row to initialize the variables
# at the first time #0
row = reader.next()
t = row[timeIndex]
del row[timeIndex]
t,_ = t.split('.')
if int(t) != 0:
logger.error('First row of data does not contain "0" as the timestamp.')
sys.exit(-1)
ofile.write('$comment\n Note: Executing $dumpvars on all variables at time 0\n$end\n')
ofile.write('$dumpvars\n')
for index in header_dict:
symbol = header_dict[index]['symbol']
value = row[index]
ofile.write('%s%s\n' % (str(value),symbol))
ofile.write('$end\n')
old_row = list(row)
change_data = []
for row in reader:
changes = {}
t = row[timeIndex]
del row[timeIndex]
t,_ = t.split('.')
if row != old_row:
for i in range(len(row)):
if row[i] != old_row[i]:
value = row[i]
symbol = header_dict[i]['symbol']
changes[symbol] = value
changes['time'] = t
old_row = list(row)
change_data.append(changes)
# write change data
for change in change_data:
write_changes(change,ofile)
ofile.write('#%s\n' %(t))
ifile.close()
ofile.close()
sys.exit(0)
def write_changes(changes, ofile):
'''
write out the value/time data to ofile in VCD format
'''
time = changes['time']
changes.pop('time')
ofile.write('#%s\n' % (time))
for symbol in changes:
ofile.write('%s%s\n' % (str(changes[symbol]),symbol))
def write_vcd_header(ofile,header_dict):
'''
write out the VCD header
this assumes a fixed timescale of 1 microsecond
'''
version_string = 'normalize_to_vcd %d' % (__version__)
date_format = '%Y-%m-%dT%H:%M:%SZ'
t = datetime.datetime.utcnow()
date_string = t.strftime(date_format)
ofile.write('$date %s $end\n' %(date_string))
ofile.write('$version %s $end\n' %(version_string))
ofile.write('$timescale 1 us $end\n')
# scope
ofile.write('$scope module logic_analyzer $end\n')
for i in header_dict:
symbol = header_dict[i]['symbol']
name = header_dict[i]['name']
ofile.write('$var wire 1 %s %s $end\n' %(symbol,name))
ofile.write('$upscope $end\n')
ofile.write('$enddefinitions $end\n')
return True
def ask_ok(prompt, retries=4, complaint='Yes or no, please!'):
while True:
ok = raw_input(prompt)
if ok in ('y', 'ye', 'yes'):
return True
if ok in ('n', 'no', 'nop', 'nope'):
return False
retries = retries - 1
if retries < 0:
raise IOError('refusenik user')
print complaint
def vcd_opts():
'''
add optparse options here
'''
options = []
options.append(optparse.make_option('-i','--input', dest='input', default=False,
help='input file'))
options.append(optparse.make_option('-o','--output', dest='output', default=False,
help='output file'))
options.append(optparse.make_option('--verbose', action='store_true',default=False,
help='Enable verbose output'))
return options
if __name__ == "__main__":
'''
add optparse parsing here
add option checking here
'''
parser = optparse.OptionParser(option_list=vcd_opts(),epilog=USAGE)
(options,args) = parser.parse_args()
if not (options.input and options.output):
logger.error('Must specify input and output files')
logger.error('%s' % USAGE)
sys.exit(-1)
main(options) | 6,622 | 2,143 |
import glob
import os
import sys
import pdb
import os.path as osp
sys.path.append(os.getcwd())
import math
import pickle as pk
import argparse
import time
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import joblib
from khrylib.utils import *
from meva.utils.config import Config
from meva.lib.model import *
from meva.utils.transform_utils import *
from meva.utils.image_utils import *
from meva.lib.smpl import SMPL, SMPL_MODEL_DIR, H36M_TO_J14, SMPL_MEAN_PARAMS
from meva.utils.video_config import MEVA_DATA_DIR
from meva.utils.eval_utils import (
compute_accel,
compute_error_accel,
compute_error_verts,
batch_compute_similarity_transform_torch,
smpl_to_joints,
compute_metric_on_seqs
)
from copycat.smpllib.smpl_mujoco import SMPL_M_Renderer
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_index", type=int, default=0)
parser.add_argument("--cfg", default=None)
parser.add_argument("--image_size", action="store_true", default=400)
parser.add_argument("--render", action="store_true", default=False)
parser.add_argument("--iter", type=int, default=-2)
args = parser.parse_args()
dtype = torch.float32
torch.set_default_dtype(dtype)
cfg_name = args.cfg
cfg = Config(args.cfg)
gpu_index = args.gpu_index
device = torch.device('cuda', index=gpu_index)
image_size = args.image_size
has_smpl_root = cfg.data_specs['has_smpl_root']
model, _, run_batch = get_models(cfg, iter = args.iter)
model.to(device)
model.eval()
smpl = SMPL(
SMPL_MODEL_DIR,
batch_size=50,
create_transl=False,
dtype = dtype
).to(device)
J_regressor = torch.from_numpy(np.load(osp.join(MEVA_DATA_DIR, 'J_regressor_h36m.npy'))).float()
output_base = "/hdd/zen/data/ActmixGenenerator/output/3dpw"
output_path = osp.join(output_base, cfg_name)
if not osp.isdir(output_path): os.makedirs(output_path)
dataset_3dpw = joblib.load("/hdd/zen/data/ActBound/AMASS/3dpw_train_res.pkl")
# dataset_3dpw = joblib.load("/hdd/zen/data/ActBound/AMASS/3dpw_val_res.pkl")
# dataset_3dpw = joblib.load("/hdd/zen/data/ActBound/AMASS/3dpw_test_res.pkl")
image_size = 400
total = cfg.data_specs['t_total']
if args.render:
# renderer = SMPL_Renderer(device = device, image_size = 400, camera_mode="look_at")
renderer = SMPL_M_Renderer(render_size = (image_size, image_size))
eval_recs =[]
# eval_vibe =[]
idx = 0
for k, v in tqdm(dataset_3dpw.items()):
curr_name = v
mocap_thetas = v['target_traj']
vibe_thetas = v['traj']
vis_feats = v['feat']
mocap_betas = v['target_beta']
vibe_betas = v['traj_beta']
with torch.no_grad():
vibe_pose = torch.tensor(vibe_thetas).squeeze().to(device)
mocap_pose = torch.tensor(mocap_thetas).squeeze().to(device)
vis_feats = torch.tensor(vis_feats).squeeze().to(device)
vibe_betas = torch.tensor(vibe_betas).squeeze().to(device)
mocap_betas = torch.tensor(mocap_betas).squeeze().to(device)
mocap_pose_6d = convert_aa_to_orth6d(mocap_pose).reshape(-1, 144)
mocap_pose_6d = mocap_pose_6d[None, :].permute(1, 0, 2)
vibe_pose_6d = convert_aa_to_orth6d(vibe_pose).reshape(-1, 144)
vibe_pose_6d = vibe_pose_6d[None, :].permute(1, 0, 2)
vis_feats = vis_feats[None, :].permute(1, 0, 2)
mocap_pose_6d_chunks = torch.split(mocap_pose_6d, total, dim=0)
vibe_pose_6d_chunks = torch.split(vibe_pose_6d, total, dim=0)
vis_feats_chunks = torch.split(vis_feats, total, dim=0)
X_r_acc = []
for i in range(len(mocap_pose_6d_chunks)):
mocap_pose_chunk = mocap_pose_6d_chunks[i]
vibe_pose_chunk = vibe_pose_6d_chunks[i]
vis_feats_chunk = vis_feats_chunks[i]
label_rl = torch.tensor([[1,0]]).to(device).float()
X_r, mu, logvar = model(mocap_pose_chunk)
X_r_acc.append(X_r[:mocap_pose_chunk.shape[0]])
X_r = torch.cat(X_r_acc)
X_r = X_r.permute(1,0,2)
ref_pose_curr_rl = convert_orth_6d_to_aa(X_r.squeeze())
######## Rendering...... ########
if args.render:
mocap_pose = vertizalize_smpl_root(mocap_pose).cpu().numpy()
ref_pose_curr_rl = vertizalize_smpl_root(ref_pose_curr_rl).cpu().numpy()
tgt_images = renderer.render_smpl(mocap_pose)
ref_images = renderer.render_smpl(ref_pose_curr_rl)
grid_size = [1,2]
videos = [tgt_images, ref_images]
descriptions = ["Mocap", "VAE"]
output_name = "{}/output_vae{:02d}.mp4".format(output_path, idx)
assemble_videos(videos, grid_size, descriptions, output_name)
print(output_name)
idx += 1
else:
eval_acc = compute_metric_on_seqs(ref_pose_curr_rl, mocap_betas, mocap_pose, mocap_betas, smpl, J_regressor=J_regressor)
eval_recs.append(eval_acc)
print(np.mean(eval_recs, axis = 0))
| 5,372 | 2,066 |
import yaml
import json
def main():
yaml_file = 'my_yaml.yml'
json_file = 'my_json.json'
net_dict = {'ip_addr' : '192.168.1.214', 'model' : 'wlc', 'manufacturer' : 'Cisco', 'model': '2504'}
net_list = ['test_strings','1','2','3', net_dict, 'python', 'neteng']
with open(yaml_file, "w") as f:
f.write(yaml.dump(net_list, default_flow_style=False))
with open(json_file, "w") as f:
json.dump(net_list, f)
main()
| 475 | 207 |
from perceptual.metric_copy import Metric
import cv2
import os
import glob
import heapq
import torch
import time
from torch.utils.data import Dataset, DataLoader
data_dir = "/Users/yuxiao/Desktop/data/Corbis128BigExperiment_gray/"
data = glob.glob(data_dir + "*.tiff")
class ImgData(Dataset):
def __init__(self, k, data):
self.data = data
self.img1 = cv2.imread(data[k], cv2.IMREAD_GRAYSCALE)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img2_path = self.data[idx]
img2 = cv2.imread(img2_path, cv2.IMREAD_GRAYSCALE)
score = m.STSIM2(self.img1, img2)
sample = score
return sample
def del_path(s):
(_, temp) = os.path.split(s)
return temp
def takesecond(elem):
return elem[1]
m = Metric()
res = []
knum = 10
for k in range(len(data)):
tmp = []
score = []
img1name = del_path(data[k])
tmp.append(img1name)
dataset = ImgData(k, data)
#print(len(dataset))
dataloader = DataLoader(dataset,
batch_size = 16,
shuffle = False,
num_workers = 16,
pin_memory = True)
score_list = []
for idx, batch_data in enumerate(dataloader):
score_list.extend(batch_data.numpy().tolist())
max_num_index_list = list(map(score_list.index, heapq.nlargest(knum, score_list)))
for ind in max_num_index_list:
tmp.append(del_path(data[ind]))
score.append(score_list[ind])
tmp.extend(score)
res.append(tmp)
if k%256 == 0:
print("%d images done"%(k+1))
#-----------------------------------------
outputfile = "./stsim_2_result.txt"
with open(outputfile, 'a') as f:
for i in range(len(res)):
line = ''
for name in res[i]:
line = line + str(name) + ','
line = line[:-1] + '\n'
f.write(line)
f.close()
| 1,978 | 703 |
"""
File: testbag.py
Author: Yeyuning
A tester program for bag implementations.
"""
from Bag.arraybag import ArrayBag
from Bag.linkedbag import LinkedBag
# def test(bagType):
# lyst = [2013, 61, 1973]
# print('The list of items added is:', lyst)
# b1 = bagType(lyst)
# print("Expect 3:", len(b1))
# print("Expect the bag's string:", b1)
# print("Expect True:", 2013 in b1)
# print("Expect False:", 2012 in b1)
# print("Expect the items on separate lines:")
# for item in b1:
# print(item)
# b1.clear()
# print('Expect {}:', b1)
# b1.add(25)
# b1.remove(25)
# print("Expect {}:", b1)
# b1 = bagType(lyst)
# b2 = bagType(b1)
# print("Expect True:", b1 == b2)
# print("Expect False:", b1 is b2)
# print("Expect two of each item:", b1 + b2)
# for item in lyst:
# b1.remove(item)
# print("Expect {}:", b1)
# print("Expect crash with KeyError:")
# b2.remove(99)
# if __name__ == '__main__':
# test(ArrayBag)
# test(LinkedBag)
| 1,040 | 426 |
from django.apps import AppConfig
class PlotsearchConfig(AppConfig):
name = "plotsearch"
def ready(self):
import plotsearch.signals # noqa: F401
| 165 | 53 |
# pylint: disable=W0221,W0223
import os
from codebase.web import APIRequestHandler
class HealthHandler(APIRequestHandler):
def get(self):
self.write("ok")
class SpecHandler(APIRequestHandler):
"""
提供 SwaggerUI YAML 文档
"""
def get(self):
path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
abspath = os.path.join(path, "schema.yml")
self.set_header("Content-Type", "text/plain")
self.write(open(abspath, "rb").read())
| 509 | 190 |
import os.path
import random
def gerar_id():
return "AZ" + str(random.randrange(1, 1000))
def designar_arq():
return gerar_id() + ".txt"
def formulario(fich):
fich.write("Id: " + gerar_id() + "\n")
fich.write("Nome: " + input("Nome: ").capitalize() + "\n")
fich.write("Perfil: Docente\n")
fich.write(input("Nome usuario: ") + "\n")
fich.write(input("Palavra-passe: ") + "\n")
fich.close()
path = "professor.txt"
def fill_up_list():
users = []
with open(path) as arquivo:
for user in arquivo:
users.append(user.strip())
return users
def registar_prof(registar):
if registar:
arq = open(path, "w")
formulario(arq)
else:
if fill_up_list() is None:
fich = open(path, "w")
formulario(fich)
else:
fich = open(path, "a")
formulario(fich)
def login(username, password):
found = False
for i in range(len(fill_up_list())):
if username and password in fill_up_list():
found = True
if found:
return True
else:
return False
def registar_est():
codigo_est = int(input("Codigo do estudade: "))
est = open(codigo_est+".txt","w")
est.write("Codigo: " + str(codigo_est)+"\n")
est.write("Nome do estudade: " + input("Nome do estudante"))
est.write("Curso: "+ input("Curso"))
est.close()
def pesquisar_est(codigo_est):
est = []
if os.path.exists(str(codigo_est) + ".txt"):
with open(str(codigo_est) + ".txt") as arquivo:
for estudante in arquivo:
est.append(estudante.strip())
return est
def actualizar_est(codigo_est):
found = False
for i in range(len(pesquisar_est(codigo_est))):
if "Codigo: "+codigo_est in pesquisar_est(codigo_est):
found = True
if found:
print(pesquisar_est(codigo_est)[0])
print()
def main():
if not os.path.exists(path):
registar_prof(registar=True)
elif login(input("Nome de usuario: "), input("Palavra-passe: ")):
print("CONSEGUIU")
else:
print("Nome de usuario ou palavra-passe incorrecto")
main()
| 2,189 | 801 |
v = float(input('qual e o valor do seu produto? R$:'))
d = v / 100 * 5
print('o valor do produto de R$:{} agora com 5% de desconto ficou R$:{:.2f}'.format(v, (v-d)))
| 166 | 73 |
from ...language.base import parse
from ...utils.ast_to_code import ast_to_code
from ..compiled import GraphQLCompiledDocument
from .schema import schema
def test_compileddocument_from_module_dict():
# type: () -> None
document_string = "{ hello }"
document_ast = parse(document_string)
document = GraphQLCompiledDocument.from_module_dict(
schema,
{
"document_string": document_string,
"document_ast": document_ast,
"execute": lambda *_: True,
},
)
assert document.operations_map == {None: "query"}
assert document.document_string == document_string
assert document.document_ast == document_ast
assert document.schema == schema
assert document.execute()
def test_compileddocument_from_code():
# type: () -> None
document_string = "{ hello }"
document_ast = parse(document_string)
code = '''
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from graphql.language import ast
from graphql.language.parser import Loc
from graphql.language.source import Source
schema = None
document_string = """{document_string}"""
source = Source(document_string)
def loc(start, end):
return Loc(start, end, source)
document_ast = {document_ast}
def execute(*_):
return True
'''.format(
document_string=document_string, document_ast=ast_to_code(document_ast)
)
document = GraphQLCompiledDocument.from_code(schema, code)
assert document.operations_map == {None: "query"}
assert document.document_string == document_string
assert document.document_ast == document_ast
assert document.schema == schema
assert document.execute()
| 1,692 | 482 |
# -*- coding: utf-8 -*-
"""Design views."""
from flask import Blueprint, request, jsonify, render_template, make_response
from flask_login import login_required, current_user
from .forms import CreateECOForm
from .models import ECO
from pid.common.models import Project, Approver
from pid.mail import send_email
from pid.user.models import User
from pid.design.models import Design
blueprint = Blueprint('eco', __name__, url_prefix='/eco', static_folder='../static')
@blueprint.route('/create', methods=['POST'])
@login_required
def create_eco():
"""Create new ECO."""
form = CreateECOForm(request.form)
validated = form.validate_on_submit()
design_ids = form.designs.data.split(',')
designs = []
for design_id in design_ids:
design = Design.get_by_id(design_id)
if design != None:
designs.append(design)
if validated:
variables = {
'name': form.name.data,
'owner': form.owner.data,
'project': designs[0].project
}
eco = ECO.create(**variables)
for design in designs:
eco.designs.append(design)
eco.save()
jsonData = {
'success': True,
'ecoId': eco.id,
'url': eco.get_url()
}
return jsonify(jsonData), 200, {'ContentType': 'application/json'}
else:
return make_response(render_template('eco/create_eco.html', form=form, designs=designs), 500)
@blueprint.route('/update', methods=['POST'])
@login_required
def update_eco():
id = request.form['pk']
# UID for field will be ala [fieldname]-[classname]-[id]-editable, field name will be first section always
field = request.form['name'].split('-')[0]
field_value = request.form['value']
eco = ECO.get_by_id(id)
original_value = None
if field == 'name':
original_value = eco.name
eco.update(name=field_value)
if field == 'summary':
original_value = eco.summary
eco.update(summary=field_value)
if field == 'analysis':
original_value = eco.analysis
eco.update(analysis=field_value)
if field == 'corrective_action':
original_value = eco.corrective_action
eco.update(corrective_action=field_value)
elif field == 'project':
if eco.project:
original_value = eco.project.name
project = Project.get_by_id(field_value)
eco.update(project=project)
field_value = project.name if project else None
elif field == 'owner':
if eco.owner:
original_value = eco.owner.get_name()
if eco.owner.padawan:
for approver in eco.approvers:
if approver.approver == eco.owner.supervisor and approver.capacity == 'Supervisor':
eco.approvers.remove(approver)
approver.delete()
owner = User.get_by_id(field_value)
if owner.padawan:
approver = Approver.create(approver_id=owner.supervisor_id, capacity='Supervisor')
eco.approvers.append(approver)
eco.update(owner=owner)
field_value = owner.get_name() if owner else None
if field == 'thumbnail_id':
thumbnail_id = None if field_value == 'default' else field_value
eco.update(thumbnail_id=thumbnail_id)
return render_template('shared/thumbnail_return.html', record=eco)
eco.add_change_log_entry(action='Edit', field=field.title().replace('_', ' '),
original_value=original_value, new_value=field_value)
return jsonify({'success': True}), 200, {'ContentType': 'application/json'}
@blueprint.route('/update_state', methods=['POST'])
@login_required
def update_eco_state():
# TODO: verify that current_user is owner of record and can edit it
design_id = request.values['parent_id']
state = request.form['state']
transition = request.form['transition']
comment = request.values['comment']
eco = ECO.get_by_id(design_id)
eco.update(state=state)
eco.add_workflow_log_entry(capacity='Owner', action=transition, comment=comment)
if state == eco.workflow.get_approval_state():
for approver in eco.approvers:
if not approver.approved_at:
variables = {
'record': eco,
'approver': approver,
'comment': comment
}
send_email(subject='Approval Required for {0}: {1}'.format(eco.descriptor, eco.get_name()),
recipients=[approver.approver.email],
text_body=render_template('mail/approvals/new_approver.txt', **variables),
html_body=render_template('mail/approvals/new_approver.html', **variables))
elif state == eco.workflow.released_state:
# Only self-approval will trigger this
eco.add_workflow_log_entry(capacity='PLAIDmin', action='Approved')
return jsonify({'success': True}), 200, {'ContentType': 'application/json'}
@blueprint.route('/<string:key>', methods=['GET'])
@login_required
def view_eco(key):
"""View existing eco."""
eco = ECO.get_by_key(key)
users = User.query.all()
projects = Project.query.all()
variables = {
'eco': eco,
'users': users,
'projects': projects
}
return render_template('eco/view_eco.html', **variables)
@blueprint.route('/typeahead_search', methods=['GET'])
@login_required
def typeahead_search():
query = request.args.get('query')
ecos = ECO.typeahead_search(query)
results = []
for eco in ecos:
eco_dict = {}
eco_dict['class'] = eco.get_class_name()
eco_dict['icon'] = '<i class="pri-typeahead-icon pri-icons-record-eco" aria-hidden="true"></i>'
eco_dict['id'] = eco.id
eco_dict['name'] = eco.name
eco_dict['number'] = eco.key
eco_dict['object_type'] = 'ECO'
eco_dict['state'] = eco.state
eco_dict['thumb_url'] = eco.get_thumbnail_url()
eco_dict['url'] = eco.get_url()
results.append(eco_dict)
return jsonify({'success': True, 'data': results}), 200, {'ContentType': 'application/json'}
@blueprint.route('/get_create_modal', methods=['POST'])
@login_required
def get_eco_modal():
form = CreateECOForm(request.form)
variables = {
'form': form
}
design_id = request.form.get('design_id', None)
if design_id:
variables['designs'] = [Design.get_by_id(design_id)]
return render_template('eco/create_eco.html', **variables)
@blueprint.route('/advanced_search', methods=['GET'])
@login_required
def advanced_search_ecos():
params = request.args.to_dict()
ecos = ECO.advanced_search(params)
results = []
for eco in ecos:
eco_dict = {
'eco_number': eco.key,
'name': eco.name,
'state': eco.state,
'project': eco.project.name,
'summary': eco.summary,
'owner': eco.owner.get_name(),
'created_by': eco.created_by.get_name(),
'created_at': eco.created_at,
'url': eco.get_url()
}
results.append(eco_dict)
return jsonify({'success': True, 'data': results}), 200, {'ContentType': 'application/json'}
@blueprint.route('/get_add_design_typeahead_modal', methods=['POST'])
@login_required
def get_add_design_typeahead_modal():
eco_id = request.values['eco_id']
eco = ECO.get_by_id(eco_id)
designs = []
for design in eco.designs:
designs.extend([rev_design.id for rev_design in design.find_all_revisions()])
variables = {
'eco': eco,
'designs': designs
}
return render_template('eco/add_design_typeahead_modal.html', **variables)
@blueprint.route('/update_design', methods=['POST'])
@login_required
def update_design():
eco_id = request.values['eco_id']
old_design_id = request.values['old_design_id']
new_design_id = request.values['new_design_id']
eco = ECO.get_by_id(eco_id)
old_design = Design.get_by_id(old_design_id)
new_design = Design.get_by_id(new_design_id)
eco.designs.remove(old_design)
eco.designs.append(new_design)
eco.add_change_log_entry(action='Edit', field='Design', original_value=old_design.get_descriptive_url(),
new_value=new_design.get_descriptive_url())
eco.save()
variables = {
'eco': eco,
'design': new_design
}
return render_template('eco/eco_design_row.html', **variables)
@blueprint.route('/add_design', methods=['POST'])
@login_required
def add_design():
eco_id = request.values['eco_id']
design_id = request.values['design_id']
eco = ECO.get_by_id(eco_id)
design = Design.get_by_id(design_id)
eco.designs.append(design)
eco.add_change_log_entry(action='Add', field='Design', new_value=design.get_descriptive_url())
eco.save()
variables = {
'eco': eco,
'design': design
}
return render_template('eco/eco_design_row.html', **variables)
@blueprint.route('/remove_design', methods=['POST'])
@login_required
def remove_design():
eco_id = request.values['eco_id']
eco = ECO.get_by_id(eco_id)
design_id = request.values['design_id']
design = Design.get_by_id(design_id)
eco.designs.remove(design)
eco.add_change_log_entry(action='Remove', field='Design', original_value=design.get_descriptive_url())
eco.save()
return jsonify({'success': True}), 200, {'ContentType': 'application/json'}
| 9,568 | 3,141 |
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, exceptions
from ..fields import UUIDField, BooleanField
from ..models import User, User_Group_Membership
import re
class CreateMembershipSerializer(serializers.Serializer):
user_id = UUIDField(required=True)
group_id = UUIDField(required=True)
secret_key = serializers.CharField(required=True)
secret_key_nonce = serializers.CharField(max_length=64, required=True)
secret_key_type = serializers.CharField(default='asymmetric')
private_key = serializers.CharField(required=True)
private_key_nonce = serializers.CharField(max_length=64, required=True)
private_key_type = serializers.CharField(default='asymmetric')
group_admin = BooleanField(default=False)
share_admin = BooleanField(default=True)
def validate_secret_key(self, value):
value = value.strip()
if not re.match('^[0-9a-f]*$', value, re.IGNORECASE):
msg = _('secret_key must be in hex representation')
raise exceptions.ValidationError(msg)
return value
def validate_secret_key_nonce(self, value):
value = value.strip()
if not re.match('^[0-9a-f]*$', value, re.IGNORECASE):
msg = _('secret_key_nonce must be in hex representation')
raise exceptions.ValidationError(msg)
return value
def validate_secret_key_type(self, value):
value = value.strip()
if value not in ('symmetric', 'asymmetric'):
msg = _('Unknown secret key type')
raise exceptions.ValidationError(msg)
return value
def validate_private_key(self, value):
value = value.strip()
if not re.match('^[0-9a-f]*$', value, re.IGNORECASE):
msg = _('private_key must be in hex representation')
raise exceptions.ValidationError(msg)
return value
def validate_private_key_nonce(self, value):
value = value.strip()
if not re.match('^[0-9a-f]*$', value, re.IGNORECASE):
msg = _('private_key_nonce must be in hex representation')
raise exceptions.ValidationError(msg)
return value
def validate_private_key_type(self, value):
value = value.strip()
if value not in ('symmetric', 'asymmetric'):
msg = _('Unknown private key type')
raise exceptions.ValidationError(msg)
return value
def validate_user_id(self, value):
try:
User.objects.get(pk=value)
except User.DoesNotExist:
msg = _('Target user does not exist.')
raise exceptions.ValidationError(msg)
return value
def validate_group_id(self, value):
# This line also ensures that the desired group exists and that the user firing the request has admin rights
if not User_Group_Membership.objects.filter(group_id=value, user=self.context['request'].user, group_admin=True, accepted=True).exists():
msg = "NO_PERMISSION_OR_NOT_EXIST"
raise exceptions.ValidationError(msg)
return value
def validate(self, attrs: dict) -> dict:
user_id = attrs.get('user_id')
group_id = attrs.get('group_id')
if User_Group_Membership.objects.filter(group_id=group_id, user_id=user_id).count() > 0:
msg = _("User is already part of the group.")
raise exceptions.ValidationError(msg)
return attrs
| 3,485 | 1,012 |
# -*- coding: utf-8 -*-
from . import __version__
import logging
import stanza
from KafNafParserPy import *
from lxml.etree import XMLSyntaxError
from io import BytesIO
import sys
from itertools import groupby
from operator import itemgetter
from xml.sax.saxutils import escape
logger = logging.getLogger(__name__)
this_name = 'Morphosyntactic parser based on stanza'
default_treebank = 'alpino'
def get_naf(input_file):
input = input_file.read()
try:
naf = KafNafParser(BytesIO(input))
except XMLSyntaxError:
input = input.decode("utf-8")
if "<NAF" in input and "</NAF>" in input:
# I'm guessing this should be a NAF file but something is wrong
logging.exception("Error parsing NAF file")
raise
naf = KafNafParser(type="NAF")
naf.set_version("3.0")
naf.set_language("nl")
naf.lang = "nl"
naf.raw = input
naf.set_raw(naf.raw)
return naf
def create_text_layer(st_doc, knaf_obj):
id_to_tokenid = {}
wcount = 1
offsets = {}
txt = knaf_obj.get_raw()
for sid, sentence in enumerate(st_doc.sentences):
id_to_tokenid[sid+1] = {}
for token in sentence.tokens:
token_obj = Cwf(type=knaf_obj.get_type())
token_id = 'w{}'.format(wcount)
token_length = len(token.text)
offsets[wcount] = txt.find(token.text, offsets.get(wcount-1, 0))
token_obj.set_id(token_id)
token_obj.set_length(str(token_length))
# token_obj.set_offset(str(offset)) # Is this correct????
token_obj.set_para('1')
token_obj.set_sent(str(sid+1))
token_obj.set_text(token.text)
token_obj.set_offset(str(offsets[wcount]))
wcount += 1
id_to_tokenid[sid+1][token.id[0]] = token_id
knaf_obj.add_wf(token_obj)
return id_to_tokenid
def get_term_type(pos):
if pos in ['det', 'pron', 'prep', 'vg', 'conj']:
return 'close'
else:
return 'open'
def create_term_layer(st_doc, knaf_obj, id_to_tokenid):
tcount = 0
term_id_mapping = {} # Mapping from stanford word index -> NAF term id
for sid, sentence in enumerate(st_doc.sentences):
for term in sentence.words:
new_term_id = 't_'+str(tcount)
term_id_mapping[(sid, term.id)] = new_term_id
term_obj = Cterm(type=knaf_obj.get_type())
term_obj.set_id(new_term_id)
new_span = Cspan()
new_span.create_from_ids([id_to_tokenid[sid+1]
[term.parent.id[0]]])
term_obj.set_span(new_span)
# lemma: copy from stanza
term_obj.set_lemma(term.lemma)
# pos: take the UD UPOS value
term_obj.set_pos(term.upos.lower())
# external reference: the UD FEATS value
if term.feats:
ext_ref = CexternalReference()
ext_ref.set_resource('Stanza')
ext_ref.set_reftype('FEATS')
ext_ref.set_reference(term.feats)
term_obj.add_external_reference(ext_ref)
# morphofeat: reformatted UD XPOS value
if term.xpos:
feats = term.xpos.split('|')
feat = feats[0] + '(' + ','.join(feats[1:]) + ')'
term_obj.set_morphofeat(feat)
termtype = get_term_type(term.upos.lower())
term_obj.set_type(termtype)
knaf_obj.add_term(term_obj)
tcount += 1
return term_id_mapping
def create_dependency_layer(st_doc, knaf_obj, term_id_mapping):
for s_id, sent in enumerate(st_doc.sentences):
for source, rel, target in sent.dependencies:
# Do not include root
if rel != 'root':
# Creating comment
str_comment = ' '+rel+'('+str(target.lemma)+','+str(source.lemma)+') '
str_comment = escape(str_comment, {"--": "&ndash"})
my_dep = Cdependency()
my_dep.set_from(term_id_mapping.get((s_id, source.id)))
my_dep.set_to(term_id_mapping.get((s_id, target.id)))
my_dep.set_function(rel)
my_dep.set_comment(str_comment)
knaf_obj.add_dependency(my_dep)
def add_linguistic_processors(in_obj, added_text_layer, treebank):
name = this_name + ' using {} treebank'.format(treebank)
if added_text_layer:
my_lp = Clp()
my_lp.set_name(name)
my_lp.set_version(__version__)
my_lp.set_timestamp()
in_obj.add_linguistic_processor('text', my_lp)
my_lp = Clp()
my_lp.set_name(name)
my_lp.set_version(__version__)
my_lp.set_timestamp()
in_obj.add_linguistic_processor('terms', my_lp)
my_lp = Clp()
my_lp.set_name(name)
my_lp.set_version(__version__)
my_lp.set_timestamp()
in_obj.add_linguistic_processor('deps', my_lp)
return in_obj
def parse(input_file, treebank=None):
treebank = treebank if treebank is not None else default_treebank
if isinstance(input_file, KafNafParser):
in_obj = input_file
else:
in_obj = get_naf(input_file)
lang = in_obj.get_language()
if lang != 'nl':
logging.warning('ERROR! Language is {} and must be nl (Dutch)'
.format(lang))
sys.exit(-1)
if in_obj.text_layer is None:
added_text_layer = True
nlp = stanza.Pipeline(lang='nl',
processors='tokenize,pos,lemma,depparse',
package=treebank)
text = in_obj.get_raw()
in_obj.remove_text_layer()
doc = nlp(text)
id_to_tokenid = create_text_layer(doc, in_obj)
else:
# Use existing tokenization
added_text_layer = False
nlp = stanza.Pipeline(lang='nl',
tokenize_pretokenized=True,
processors='tokenize,pos,lemma,depparse',
package=treebank)
sent_tokens_ixa = [(token.get_sent(), token.get_text())
for token in in_obj.get_tokens()]
text = [[t for s2, t in toks]
for s, toks in groupby(sent_tokens_ixa, itemgetter(0))]
# TODO: is this correct??? can we make it more elegant?
id_to_tokenid = {int(k):
{i+1: t.get_id() for i, t in enumerate(g)}
for k, g in
groupby(in_obj.get_tokens(), lambda t: t.get_sent())}
doc = nlp(text)
# Check that we don't get mutli-word get_tokens
if any([len(sent.tokens) != len(sent.words)
for sent in doc.sentences]):
raise Exception('stanza returns MutliWordTokens. '
'This is not allowed for Dutch.')
term_id_mapping = create_term_layer(doc, in_obj, id_to_tokenid)
create_dependency_layer(doc, in_obj, term_id_mapping)
in_obj = add_linguistic_processors(in_obj, added_text_layer, treebank)
return in_obj
| 7,158 | 2,381 |
"""
*Measure*
The measure type.
"""
from abc import ABCMeta
__all__ = ["Measure"]
class Measure:
__metaclass__ = ABCMeta
| 138 | 55 |
from micro_center_price_monitor.scraper import MicroCenterScraper
from micro_center_price_monitor.mail import Email
import datetime, time
class PriceChecker:
"""
PriceChecker:
Manages execution flow for:
-> Retrieving search results list
-> Selected wanted product
-> Monitoring price
-> Sending product email
"""
def search(self):
try:
# Prompt to enter a product name
search_for = input('Enter a product:\n')
# Init scraper obj, passing user input for search term
scraper = MicroCenterScraper(search_term=search_for)
# GET request to retrieve first page results
scraper.search_for_products()
# Print search results
scraper.get_products()
# Prompt to search for one of list items
product_selection = int(input('Select a product:\n'))
# Selects product from list
scraper.select_product(product_selection)
# Prompt to enter expected price at discount
expected_price = float(input('Enter your expected price\n'))
while True:
# update pricing info
scraper.check_product_price()
# Get float value of price attribute
price = float(str(scraper.key_product.price).replace(',',''))
# currency symbol for output (e.g., "$"" for USD)
currency_symbol = scraper.key_product.currency
# Print current time and price
print('Price at: %s -> %s%s' % (datetime.datetime.now(),
currency_symbol,
str(price)))
# Email if the price is beneath expected threshold. Otherwise, continue to loop.
if price <= expected_price:
print('Price at or below %s%.2f at %s%.2f' % (currency_symbol, expected_price, currency_symbol, price))
print('Sending email now...')
email = Email(scraper.key_product.name,
currency_symbol + scraper.key_product.price,
scraper.data.product_url)
email.send_email()
break
# sleep for n seconds
time.sleep(scraper.data.REFRESH_SECS)
except ValueError:
print('Invalid product selection value provided. Please try again later.')
except IndexError:
print('Unable to find any search results. Please try again.')
except Exception as e:
print('Unexpected error has occured. %s' % e)
| 2,749 | 658 |
# Per Google's recommendation [1], this is copied from [2], with
# the line ending match adjusted to find spans of whitespace.
#
# The original [2] is used under the Apache License, Version 2.0:
#
# Diff Match and Patch
# Copyright 2018 The diff-match-patch Authors.
# https://github.com/google/diff-match-patch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# [1] https://github.com/google/diff-match-patch/wiki/Line-or-Word-Diffs#word-mode
# [2] https://github.com/google/diff-match-patch/blob/858b3812cc02e7d48da4beebb21d4d80dc1d3062/python3/diff_match_patch.py
from typing import Dict, Tuple
import re
def diff_wordsToChars(text1: str, text2: str) -> Tuple[str, str, object]:
"""Split two texts into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Args:
text1: First string.
text2: Second string.
Returns:
Three element tuple, containing the encoded text1, the encoded text2 and
the array of unique strings. The zeroth element of the array of unique
strings is intentionally blank.
"""
lineArray = [] # e.g. lineArray[4] == "Hello\n"
lineHash: Dict[str, int] = {} # e.g. lineHash["Hello\n"] == 4
# "\x00" is a valid character, but various debuggers don't like it.
# So we'll insert a junk entry to avoid generating a null character.
lineArray.append('')
def next_word_end(text: str, start: int) -> int:
"""Find the next word end (any whitespace) after `start`.
"""
pattern = re.compile(r"([^ \t\n]+)[ \t\n]")
match = pattern.search(text, start)
if not match:
return -1
return start + len(match.group(1))
def diff_linesToCharsMunge(text: str) -> str:
"""Split a text into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Modifies linearray and linehash through being a closure.
Args:
text: String to encode.
Returns:
Encoded string.
"""
chars = []
# Walk the text, pulling out a substring for each line.
# text.split('\n') would would temporarily double our memory footprint.
# Modifying text would create many large strings to garbage collect.
lineStart = 0
lineEnd = -1
while lineEnd < len(text) - 1:
lineEnd = next_word_end(text, lineStart)
if lineEnd == -1:
lineEnd = len(text) - 1
line = text[lineStart:lineEnd + 1]
if line in lineHash:
chars.append(chr(lineHash[line]))
else:
if len(lineArray) == maxLines:
# Bail out at 1114111 because chr(1114112) throws.
line = text[lineStart:]
lineEnd = len(text)
lineArray.append(line)
lineHash[line] = len(lineArray) - 1
chars.append(chr(len(lineArray) - 1))
lineStart = lineEnd + 1
return "".join(chars)
# Allocate 2/3rds of the space for text1, the rest for text2.
maxLines = 666666
chars1 = diff_linesToCharsMunge(text1)
maxLines = 1114111
chars2 = diff_linesToCharsMunge(text2)
return (chars1, chars2, lineArray)
# flake8: noqa
| 3,613 | 1,203 |
from mayan.apps.converter.layers import layer_saved_transformations
from mayan.apps.converter.permissions import (
permission_transformation_delete, permission_transformation_edit
)
from mayan.apps.converter.tests.mixins import LayerTestMixin
from mayan.apps.documents.tests.literals import TEST_MULTI_PAGE_TIFF
from mayan.apps.file_caching.events import event_cache_partition_purged
from mayan.apps.file_caching.models import CachePartitionFile
from mayan.apps.file_caching.permissions import permission_cache_partition_purge
from mayan.apps.file_caching.tests.mixins import CachePartitionViewTestMixin
from ..events import (
event_document_file_deleted, event_document_file_downloaded,
event_document_file_edited,
)
from ..permissions import (
permission_document_file_delete, permission_document_file_download,
permission_document_file_edit, permission_document_file_print,
permission_document_file_view
)
from .base import GenericDocumentViewTestCase
from .mixins.document_file_mixins import (
DocumentFileTestMixin, DocumentFileTransformationTestMixin,
DocumentFileTransformationViewTestMixin, DocumentFileViewTestMixin
)
class DocumentFileViewTestCase(
DocumentFileTestMixin, DocumentFileViewTestMixin,
GenericDocumentViewTestCase
):
def test_document_file_delete_no_permission(self):
first_file = self.test_document.file_latest
self._upload_new_file()
test_document_file_count = self.test_document.files.count()
self._clear_events()
response = self._request_test_document_file_delete_view(
document_file=first_file
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document.files.count(), test_document_file_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_delete_with_access(self):
first_file = self.test_document.file_latest
self._upload_new_file()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_delete
)
test_document_file_count = self.test_document.files.count()
self._clear_events()
response = self._request_test_document_file_delete_view(
document_file=first_file
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_document.files.count(), test_document_file_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document)
self.assertEqual(events[0].verb, event_document_file_deleted.id)
def test_trashed_document_file_delete_with_access(self):
first_file = self.test_document.file_latest
self._upload_new_file()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_delete
)
test_document_file_count = self.test_document.files.count()
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_delete_view(
document_file=first_file
)
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document.files.count(), test_document_file_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_delete_multiple_no_permission(self):
self._upload_new_file()
test_document_file_count = self.test_document.files.count()
self._clear_events()
response = self._request_test_document_file_delete_multiple_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document.files.count(), test_document_file_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_delete_multiple_with_access(self):
self._upload_new_file()
self.grant_access(
obj=self.test_document,
permission=permission_document_file_delete
)
test_document_file_count = self.test_document.files.count()
self._clear_events()
response = self._request_test_document_file_delete_multiple_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_document.files.count(), test_document_file_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document)
self.assertEqual(events[0].verb, event_document_file_deleted.id)
def test_document_file_edit_view_no_permission(self):
document_file_comment = self.test_document_file.comment
self._clear_events()
response = self._request_test_document_file_edit_view()
self.assertEqual(response.status_code, 404)
self.test_document_file.refresh_from_db()
self.assertEqual(
self.test_document_file.comment,
document_file_comment
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_edit_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_edit
)
document_file_comment = self.test_document_file.comment
document_file_filename = self.test_document_file.filename
self._clear_events()
response = self._request_test_document_file_edit_view()
self.assertEqual(response.status_code, 302)
self.test_document_file.refresh_from_db()
self.assertNotEqual(
self.test_document_file.comment,
document_file_comment
)
self.assertNotEqual(
self.test_document_file.filename,
document_file_filename
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, self.test_document)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document_file)
self.assertEqual(events[0].verb, event_document_file_edited.id)
def test_trashed_document_file_edit_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_edit
)
document_file_comment = self.test_document_file.comment
document_file_filename = self.test_document_file.filename
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_edit_view()
self.assertEqual(response.status_code, 404)
self.test_document_file.refresh_from_db()
self.assertEqual(
self.test_document_file.comment,
document_file_comment
)
self.assertEqual(
self.test_document_file.filename,
document_file_filename
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_list_no_permission(self):
self._clear_events()
response = self._request_test_document_file_list_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_list_with_access(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_file_view
)
self._clear_events()
response = self._request_test_document_file_list_view()
self.assertContains(
response=response, status_code=200,
text=str(self.test_document_file)
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_file_list_with_access(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_file_view
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_list_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_print_form_view_no_permission(self):
self._clear_events()
response = self._request_test_document_file_print_form_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_print_form_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_print
)
self._clear_events()
response = self._request_test_document_file_print_form_view()
self.assertEqual(response.status_code, 200)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_file_print_form_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_print
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_print_form_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_print_view_no_permission(self):
self._clear_events()
response = self._request_test_document_file_print_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_print_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_print
)
self._clear_events()
response = self._request_test_document_file_print_view()
self.assertEqual(response.status_code, 200)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_file_print_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_print
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_print_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_properties_view_no_permission(self):
self._clear_events()
response = self._request_test_document_file_properties_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_properties_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_view
)
self._clear_events()
response = self._request_test_document_file_properties_view()
self.assertContains(
response=response, text=self.test_document_file.filename,
status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_file_properties_view_with_access(self):
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_view
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_properties_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class DocumentFileDownloadViewTestCase(
DocumentFileViewTestMixin, GenericDocumentViewTestCase
):
def test_document_file_download_view_no_permission(self):
self._clear_events()
response = self._request_test_document_file_download_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_download_view_with_permission(self):
# Set the expected_content_types for
# common.tests.mixins.ContentTypeCheckMixin
self.expected_content_types = (
self.test_document.file_latest.mimetype,
)
self.grant_access(
obj=self.test_document,
permission=permission_document_file_download
)
self._clear_events()
response = self._request_test_document_file_download_view()
self.assertEqual(response.status_code, 200)
with self.test_document.file_latest.open() as file_object:
self.assert_download_response(
response=response, content=file_object.read(),
filename=self.test_document.file_latest.filename,
mime_type=self.test_document.file_latest.mimetype
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, self.test_document)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document_file)
self.assertEqual(events[0].verb, event_document_file_downloaded.id)
def test_trashed_document_file_download_view_with_permission(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_file_download
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_download_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class DocumentFileTransformationViewTestCase(
LayerTestMixin, DocumentFileTransformationTestMixin,
DocumentFileTransformationViewTestMixin, GenericDocumentViewTestCase
):
test_document_filename = TEST_MULTI_PAGE_TIFF
def test_document_file_transformations_clear_view_no_permission(self):
self._create_document_file_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
self._clear_events()
response = self._request_test_document_file_transformations_clear_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), transformation_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_transformations_clear_view_with_access(self):
self._create_document_file_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
self.grant_access(
obj=self.test_document_file,
permission=permission_transformation_delete
)
self._clear_events()
response = self._request_test_document_file_transformations_clear_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), transformation_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_file_transformations_clear_view_with_access(self):
self._create_document_file_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
self.grant_access(
obj=self.test_document_file,
permission=permission_transformation_delete
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_file_transformations_clear_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), transformation_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_multiple_transformations_clear_view_no_permission(self):
self._create_document_file_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
self._clear_events()
response = self._request_test_document_file_multiple_transformations_clear_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), transformation_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_multiple_transformations_clear_view_with_access(self):
self._create_document_file_transformation()
transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
self.grant_access(
obj=self.test_document_file,
permission=permission_document_file_view
)
self.grant_access(
obj=self.test_document_file,
permission=permission_transformation_delete
)
self._clear_events()
response = self._request_test_document_file_multiple_transformations_clear_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), transformation_count - 1,
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_transformations_clone_view_no_permission(self):
self._create_document_file_transformation()
page_first_transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
page_last_transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.last()
).count()
self._clear_events()
response = self._request_test_document_file_transformations_clone_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), page_first_transformation_count
)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.last()
).count(), page_last_transformation_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_transformations_clone_view_with_access(self):
self._create_document_file_transformation()
page_first_transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count()
page_last_transformation_count = layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.last()
).count()
self.grant_access(
obj=self.test_document_file,
permission=permission_transformation_edit
)
self._clear_events()
response = self._request_test_document_file_transformations_clone_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.first()
).count(), page_first_transformation_count
)
self.assertEqual(
layer_saved_transformations.get_transformations_for(
obj=self.test_document_file.pages.last()
).count(), page_last_transformation_count + 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class DocumentFileCachePurgeViewTestCase(
CachePartitionViewTestMixin, GenericDocumentViewTestCase
):
def test_document_file_cache_purge_no_permission(self):
self.test_object = self.test_document_file
self._inject_test_object_content_type()
self.test_document_file.file_pages.first().generate_image()
test_document_file_cache_partitions = self.test_document_file.get_cache_partitions()
cache_partition_file_count = CachePartitionFile.objects.filter(
partition__in=test_document_file_cache_partitions
).count()
self._clear_events()
response = self._request_test_object_file_cache_partition_purge_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
CachePartitionFile.objects.filter(
partition__in=test_document_file_cache_partitions
).count(), cache_partition_file_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_file_cache_purge_with_access(self):
self.test_object = self.test_document_file
self._inject_test_object_content_type()
self.grant_access(
obj=self.test_document_file,
permission=permission_cache_partition_purge
)
self.test_document_file.file_pages.first().generate_image()
test_document_file_cache_partitions = self.test_document_file.get_cache_partitions()
cache_partition_file_count = CachePartitionFile.objects.filter(
partition__in=test_document_file_cache_partitions
).count()
self._clear_events()
cache_partitions = self.test_document_file.get_cache_partitions()
response = self._request_test_object_file_cache_partition_purge_view()
self.assertEqual(response.status_code, 302)
self.assertNotEqual(
CachePartitionFile.objects.filter(
partition__in=test_document_file_cache_partitions
).count(), cache_partition_file_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 2)
self.assertEqual(events[0].action_object, self.test_document_file)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, cache_partitions[0])
self.assertEqual(events[0].verb, event_cache_partition_purged.id)
self.assertEqual(events[1].action_object, self.test_document_file)
self.assertEqual(events[1].actor, self._test_case_user)
self.assertEqual(events[1].target, cache_partitions[1])
self.assertEqual(events[1].verb, event_cache_partition_purged.id)
| 24,304 | 7,103 |
# -*- coding: utf-8 -*-
import json
import logging
from elasticsearch_dsl import Q, SF
from sqlalchemy.orm.exc import NoResultFound
from tg.decorators import expose, redirect, paginate
from tg.exceptions import HTTPNotFound
from pyjobsweb.model import JobAlchemy
from pyjobsweb.model import JobElastic
from pyjobsweb.model.data import SOURCES
from pyjobsweb.lib.base import BaseController
from pyjobsweb.lib.elasticsearch_ import PaginatedSearch
from pyjobsweb.forms.research_forms import JobsResearchForm
class SearchJobsController(BaseController):
items_per_page = 10
def __init__(self, items_per_page=10):
self.items_per_page = items_per_page
@staticmethod
def _compute_keyword_queries(terms):
search_on = dict(
description=[
'description',
'description.technologies'
],
title=[
'title',
'title.technologies'
],
company=['company']
)
description_query = Q(
'multi_match',
type='most_fields',
query=terms,
fields=search_on['description'],
fuzziness='AUTO',
operator='or',
minimum_should_match='1<2 2<2 3<3 4<3 5<4 6<5 7<5 8<6 9<6',
boost=len(terms.split(','))
)
title_query = Q(
'multi_match',
type='most_fields',
query=terms,
fields=search_on['title'],
fuzziness='AUTO',
operator='or',
minimum_should_match='1<1',
boost=20 - len(terms.split(',')) + 1
)
company_name_query = Q(
'multi_match',
type='best_fields',
query=terms,
fields=search_on['company'],
fuzziness='AUTO',
operator='or',
minimum_should_match='1<1',
boost=50
)
keyword_queries = Q(
'bool',
must=[
company_name_query
],
should=[
title_query,
description_query
]
) | Q(
'bool',
must=[
description_query
],
should=[
title_query,
company_name_query
]
)
return keyword_queries
@staticmethod
def _compute_decay_functions():
decay_function = SF(
'gauss',
publication_datetime=dict(
origin='now',
scale='30d',
offset='7d',
decay='0.1'
)
)
return [decay_function]
@staticmethod
def _apply_geolocation_filters(query, (lat, lon), radius):
query = query.filter(
'geo_distance',
geolocation=[lon, lat],
distance='%skm' % float(radius)
)
query = query.filter(
'term',
geolocation_is_valid=True
)
return query
@staticmethod
def _apply_date_sort(query):
query = query.sort(
'-publication_datetime',
'-_score'
)
return query
@expose('pyjobsweb.templates.jobs.list')
@paginate('jobs', items_per_page=items_per_page)
def index(self, query=None, radius=None, center=None, sort_by=None,
*args, **kwargs):
if not query and not radius and not center:
redirect('/jobs')
search_query = JobElastic().search()
relevance_sort = sort_by == 'scores'
if query:
keyword_queries = self._compute_keyword_queries(query)
decay_functions = self._compute_decay_functions()
search_query.query = Q(
'function_score',
query=keyword_queries,
functions=decay_functions
)
else:
relevance_sort = False
try:
geoloc_query = json.loads(center)
coordinates = geoloc_query['coordinates']
lat, lon = (coordinates['lat'], coordinates['lon'])
except (ValueError, TypeError):
# One of the following case has occurred:
# - Center wasn't a valid json string
# - Radius couldn't be converted to float
# Since both these information are required to set a geolocation
# filter are required, we ignore it.
pass
else:
search_query = self._apply_geolocation_filters(
search_query, (lat, lon), radius if radius else 5.0)
date_sort = not relevance_sort
if date_sort:
search_query = self._apply_date_sort(search_query)
return dict(sources=SOURCES, jobs=PaginatedSearch(search_query),
job_offer_search_form=JobsResearchForm)
class JobsController(BaseController):
items_per_page = 10
search = SearchJobsController(items_per_page)
@expose('pyjobsweb.templates.jobs.list')
@paginate('jobs', items_per_page=items_per_page)
def index(self, *args, **kwargs):
try:
job_offers = JobAlchemy.get_all_job_offers()
except NoResultFound:
job_offers = None
return dict(sources=SOURCES, jobs=job_offers,
job_offer_search_form=JobsResearchForm)
@expose('pyjobsweb.templates.jobs.details')
def details(self, offer_id, *args, **kwargs):
try:
job = JobAlchemy.get_job_offer(offer_id)
except NoResultFound:
raise HTTPNotFound()
except Exception as exc:
logging.getLogger(__name__).log(logging.ERROR, exc)
raise HTTPNotFound()
else:
return dict(
job=job,
sources=SOURCES
)
| 5,872 | 1,663 |
# coding: utf-8
from django.utils.translation import gettext as _
from market.core.urls import api
from urljects import U, slug, url
from market.checkout.views import cart, checkout, order, payment
action = slug.replace("slug", "action")
def active(user):
"""Check if the user is active with verified email."""
from allauth.account.models import EmailAddress
return (
user.is_authenticated() and
not EmailAddress.objects.filter(user=user, verified=False).exists()
)
urlpatterns = [
# cart views to allow user to gather their goods into a shopping cart
api(U / _('cart/'), cart.Cart, name='cart'),
api(U / _('cart/') / _('item'), cart.CartItem, name='cart-item'),
# checkout to facilitate pro porcess of payments and selecting shipping
# first part with address form and shipping/payment selection
url(U / _('select.html'), checkout.Selection, name="checkout-selection"),
# second step of the checkout process
url(U / _('shipping.html'), checkout.Shipping, name='checkout-shipping'),
# third step of the checkout process
url(U / _('payment.html'), checkout.Payment, name='checkout-payment'),
# canceling of current order
url(U / _('cancel.html'), checkout.abort_checkout, name='checkout-cancel'),
# confirm email and unconfirmed orders
url(U / _('thank-you/') / (slug + '.html'), checkout.thank_you, name='checkout-thanks'),
# if any
url(U / _('thank-you.html'), checkout.thank_you, name='checkout-thanks'),
# order views gives user ad vendor to see orders
url(U / _('order.html'), order.OrderList, name="order-list"),
url(U / _('order/') / (slug + '.html'), order.OrderDetail, name="order-detail"),
url(U / _('order/') / _('change-status.json'), order.change_order_status, name='order-change-status'),
# TODO: checkout-order-shipped order=order.uid
url(U / _('download/') / slug / '(?P<document>invoice|proforma)', order.Download, name="order-download"),
# payment backends (other can be in separate apps)
url(U / _('payment/') / _('cash.html'), payment.pay_on_delivery, name='checkout-pay-on-delivery'),
]
| 2,143 | 667 |
from django import forms
class DismissNotificationForm(forms.Form):
pk = forms.IntegerField(required=True, min_value=1)
| 126 | 39 |
from .cli import cli
from .runner import DispatchRunner
from .script import load_pji_script
| 92 | 26 |
from collections import Sequence
from django.conf import settings
from openstack_dashboard.api import nova
from openstack_dashboard.api.base import Quota
from openstack_dashboard.api.nova import flavor_list
from openstack_dashboard.api.nova import novaclient
from openstack_dashboard.api.nova import server_list
from openstack_dashboard.api.nova import tenant_floating_ip_list
from openstack_dashboard.api.nova import tenant_quota_get
from horizon.utils.memoized import memoized
from tukey.cloud_attribute import get_cloud
from collections import OrderedDict
class NovaUsage(nova.NovaUsage):
_attrs = ['start', 'server_usages', 'stop', 'tenant_id',
'total_local_gb_usage', 'total_memory_mb_usage',
'total_vcpus_usage', 'total_hours',
'cloud_cores', 'cloud_du', 'cloud_ram', 'hadoop_jobs',
'hadoop_hdfsdu'] + settings.USAGE_ATTRIBUTES.values()
def get_summary(self):
#TODO: find some way to make this ordered oh well it is not
# going to happen :(
return OrderedDict([('instances', self.total_active_instances),
('memory_mb', self.memory_mb),
('vcpus', getattr(self, "total_vcpus_usage", 0)),
('vcpu_hours', self.vcpu_hours),
('local_gb', self.local_gb),
('disk_gb_hours', self.disk_gb_hours),
('cloud_cores', getattr(self, "cloud_cores", -1)),
('cloud_du', getattr(self, "cloud_du", -1)),
('hadoop_hdfsdu', getattr(self, "hadoop_hdfsdu", -1)),
('hadoop_jobs', getattr(self, "hadoop_jobs", -1)),
('Cloud Core Hours', getattr(self, "cloud_cores", -1)),
('Cloud Disk Usage (GB)', getattr(self, "cloud_du", -1)),
('Cloud RAM Hours (GB Hours)', getattr(self, "cloud_ram", -1)),
('Hadoop Disk Usage (GB)', getattr(self, "hadoop_hdfsdu", -1)),
('Hadoop Job Hours', getattr(self, "hadoop_jobs", -1))]
+ [(key, getattr(self, value, -1)) for key, value in
settings.USAGE_ATTRIBUTES.items()])
class QuotaSet2(Sequence):
"""
Wrapper for client QuotaSet objects which turns the individual quotas
into Quota objects for easier handling/iteration.
`QuotaSet` objects support a mix of `list` and `dict` methods; you can use
the bracket notiation (`qs["my_quota"] = 0`) to add new quota values, and
use the `get` method to retrieve a specific quota, but otherwise it
behaves much like a list or tuple, particularly in supporting iteration.
"""
def __init__(self, apiresource=None):
self.items = []
if apiresource:
for k, v in apiresource.items():
#for k, v in apiresource._info.items():
if k == 'id':
continue
self[k] = v
def __setitem__(self, k, v):
v = int(v) if v is not None else v
q = Quota(k, v)
self.items.append(q)
def __getitem__(self, index):
return self.items[index]
def __len__(self):
return len(self.items)
def __repr__(self):
return repr(self.items)
def get(self, key, default=None):
match = [quota for quota in self.items if quota.name == key]
return match.pop() if len(match) else Quota(key, default)
def default_quota_get(request, tenant_id):
return cloud_quota(request, novaclient(request).quotas.defaults(tenant_id))
def tenant_quota_get(request, tenant_id):
return cloud_quota(request, novaclient(request).quotas.get(tenant_id))
def cloud_quota(request, quotas):
cloud = None
if 'cloud' in request.GET:
cloud = request.GET['cloud']
elif 'cloud' in request.POST:
cloud = request.POST['cloud']
if cloud is not None:
quotas = quotas._info[cloud]
del(quotas['cloud'])
else:
# "sum" the quotas!
# The attributes not to sum
ignore = ['cloud', 'id']
if hasattr(quotas, '_info'):
clouds = quotas._info.keys()
if 'cloud' in quotas._info[clouds[0]]:
keys = []
for cloud in clouds:
keys += quotas._info[cloud].keys()
quotas = {key:
reduce(
lambda s, c: s + quotas._info[c][key] if key in quotas._info[c]
else 0, [0] + clouds) for key in keys if key not in ignore}
return QuotaSet2(quotas)
| 4,498 | 1,408 |
"""Construct blocks.
A block consists of a collection of transactions (of some limited number),
plus additional metadata in a header. Blocks are generated with a proof-of-work
hash.
"""
import math # type: ignore
from toycoin import hash, merkle, transaction, utils # type: ignore
from typing import List, Optional, Tuple, TypedDict # type: ignore
################################################################################
Transactions = List[transaction.Transaction]
class BlockHeader(TypedDict):
timestamp: bytes
previous_hash: hash.Hash
nonce: bytes
merkle_root: hash.Hash
this_hash: hash.Hash
class Block(TypedDict):
header: BlockHeader
txns: Transactions
Blockchain = List[Block]
################################################################################
# Constructor
GENESIS = hash.hash(b'genesis') # genesis block previous hash
BLOCK_MAX_TXNS = 10 # magic number: max transactions per block
def gen_block(previous_hash: hash.Hash,
txns: Transactions,
difficulty: int
) -> Tuple[Optional[Block], Transactions]:
"""Attempt to generate a block from transactions.
Return a block (or None if failure), and remainder transactions.
"""
if not txns:
return None, []
txns_, rest = txns[:BLOCK_MAX_TXNS], txns[BLOCK_MAX_TXNS:]
tree = gen_merkle(txns_)
header = proof_of_work(previous_hash, tree.label, difficulty)
block : Block = {'header': header,
'txns': txns_}
return block, rest
def gen_merkle(txns: Transactions) -> merkle.MerkleTree:
"""Generate Merkle Tree given (non-empty) transactions."""
tree = merkle.from_list([transaction.hash_txn(txn) for txn in txns])
assert tree is not None
return tree
################################################################################
# Proof of Work
def next_difficulty(length: int) -> int:
"""Determine difficulty of next block, given length of current chain."""
return 1 if length < 1 else 1 + int(math.log2(length))
def proof_of_work(p: hash.Hash,
root: hash.Hash,
difficulty: int
) -> BlockHeader:
"""Naive POW solver."""
now = utils.int_to_bytes(utils.timestamp())
nonce = 0
h = b''
while not solved(h, difficulty):
nonce += 1
h = hash.hash(now + p + utils.int_to_bytes(nonce) + root)
return {'timestamp': now,
'previous_hash': p,
'nonce': utils.int_to_bytes(nonce),
'merkle_root': root,
'this_hash': h}
def solved(h: hash.Hash, n: int) -> bool:
"""Check if first n bytes are zeros."""
return h[:n] == bytes(n)
################################################################################
# Block Validation
def valid_blockchain(chain: Blockchain) -> bool:
"""Check validity of blockchain."""
pairs = zip(chain[1:], chain)
v1 = all(valid_hash_pair(b1, b0) for b1, b0 in pairs)
v2 = all(valid_block(block, next_difficulty(i))
for i, block in enumerate(chain))
v3 = chain[0]['header']['previous_hash'] == GENESIS
v4 = all(valid_timestamp(b1, b0) for b1, b0 in pairs)
return v1 and v2 and v3 and v4
def valid_block(block: Block, difficulty: int) -> bool:
"""Check if block transactions and header hashes are valid."""
tree = gen_merkle(block['txns'])
return (valid_header(block['header'], difficulty) and
tree.label == block['header']['merkle_root'])
def valid_header(header: BlockHeader, difficulty: int) -> bool:
"""Check if block hash matches header data."""
h = hash.hash(header['timestamp'] +
header['previous_hash'] +
header['nonce'] +
header['merkle_root'])
return (header['this_hash'] == h and
solved(header['this_hash'], difficulty))
def valid_hash_pair(b1: Block, b0: Block) -> bool:
"""B1 previous hash matches B0 hash."""
return b1['header']['previous_hash'] == b0['header']['this_hash']
def valid_timestamp(b1: Block, b0: Block) -> bool:
"""B1 has later timestmap than B0."""
return b1['header']['timestamp'] > b0['header']['timestamp']
# Token & Blockchain Validation
def valid_tokens(tokens: List[transaction.Token], chain: Blockchain) -> bool:
"""Tokens are unique and all come from prior txns in the blockchain."""
# backdoor for coinbase tokens
return (transaction.unique_tokens(tokens) and
all(valid_token(token, chain) for token in tokens))
def valid_token(token: transaction.Token, chain: Blockchain):
"""Search blockchain backwards for txn source of token."""
for block in chain[::-1]:
txns = block['txns']
for txn in txns:
if transaction.valid_token(txn, token):
return True
return False
| 4,881 | 1,460 |
"""Main module."""
import logging
from datetime import datetime
from urllib.parse import urlparse
from typing import Any, Dict, List
import requests
def check_domain(domain_url: str) -> Dict[str, Any]:
try:
current_time = datetime.now()
session = requests.Session()
response = session.get(domain_url)
return {
"healthy": response.ok,
"latency": response.elapsed.microseconds // 1000,
"content_type": response.headers.get("Content-Type"),
"current_time": int(current_time.timestamp()),
"domain_url": domain_url,
"domain": urlparse(domain_url).hostname,
}
except Exception:
return {
"healthy": False,
"latency": 0,
"current_time": int(current_time.timestamp()),
"domain_url": domain_url,
"domain": urlparse(domain_url).hostname,
}
def access_domains(domains: List[str]) -> Dict[str, Any]:
responses = []
for domain_url in domains:
try:
responses.append(check_domain(domain_url))
except Exception:
pass
return responses
| 1,172 | 332 |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
@dace.program
def add(A: dace.complex64[5, 5], B: dace.float64[5, 5]):
return A + B
def test_add():
A = np.random.randint(0, high=10, size=(5, 5), dtype=np.uint64).astype(np.complex64)
B = np.random.randint(-10, high=0, size=(5, 5), dtype=np.int32).astype(np.float64)
C = add(A, B)
assert(np.linalg.norm(C - A - B) / np.linalg.norm(A + B) < 1e-12)
@dace.program
def complex_conversion(a: dace.complex128[1], b: dace.int32):
return a[0] + b
def test_complex_conversion():
a = np.zeros((1,), dtype=np.complex128)
a[0] = 5 + 6j
b = 7
c = complex_conversion(a=a, b=b)
assert(c[0] == 12 + 6j)
@dace.program
def float_conversion(a: dace.float32, b: dace.int64):
return a + b
def test_float_conversion():
a = np.float32(5.2)
b = np.int64(7)
c = float_conversion(a=a, b=b)
assert(c[0] == a + b)
if __name__ == "__main__":
test_add()
test_complex_conversion()
test_float_conversion()
| 1,071 | 499 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Type
from overrides import overrides
from archai.common.common import get_conf
from archai.nas.exp_runner import ExperimentRunner
from archai.nas.arch_trainer import ArchTrainer, TArchTrainer
from archai.algos.darts.bilevel_arch_trainer import BilevelArchTrainer
from archai.algos.gumbelsoftmax.gs_arch_trainer import GsArchTrainer
from .divnas_model_desc_builder import DivnasModelDescBuilder
from .divnas_finalizers import DivnasFinalizers
from .divnas_rank_finalizer import DivnasRankFinalizers
from archai.nas.finalizers import Finalizers
class DivnasExperimentRunner(ExperimentRunner):
@overrides
def model_desc_builder(self)->DivnasModelDescBuilder:
return DivnasModelDescBuilder()
@overrides
def trainer_class(self)->TArchTrainer:
conf = get_conf()
trainer = conf['nas']['search']['divnas']['archtrainer']
if trainer == 'bilevel':
return BilevelArchTrainer
elif trainer == 'noalpha':
return ArchTrainer
else:
raise NotImplementedError
@overrides
def finalizers(self)->Finalizers:
conf = get_conf()
finalizer = conf['nas']['search']['finalizer']
if finalizer == 'mi':
return DivnasFinalizers()
elif finalizer == 'mi_ranked':
return DivnasRankFinalizers()
else:
return super().finalizers()
| 1,484 | 429 |
"""
Django settings for Seeder project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import re
from django.utils.translation import ugettext_lazy as _
# Import version to be displayed further
from .version import VERSION, VERSION_DATETIME
# that double dirname is necessary since setting is in folder...
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'postgres',
},
'legacy_seeder': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'legacy_seeder',
'USER': 'root',
'PASSWORD': 'legacy'
}
}
ADMINS = (
('Visgean Skeloru', 'visgean@gmail.com'),
('Petr Manas', 'peter@petermanas.com'),
)
IGNORABLE_404_URLS = (
re.compile(r'\.(php|cgi)$'),
re.compile(r'^/phpmyadmin/'),
)
# Application definition
INSTALLED_APPS = (
'raven.contrib.django.raven_compat',
'dal',
'dal_select2',
'modeltranslation',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.postgres',
# 'djangobower', # everything is on cdn
'django_extensions',
'django_tables2',
'django_filters',
'bootstrap3',
'mptt',
'formtools',
'reversion',
'ckeditor',
'ckeditor_uploader',
'debug_toolbar',
'django_crontab',
'sorl.thumbnail',
'rest_framework',
'rest_framework.authtoken',
'captcha',
'ordered_model',
# 'haystack',
# 'elasticstack',
'core',
'publishers',
'source',
'voting',
'comments',
'contracts',
'legacy_db',
'harvests',
'blacklists',
'qa',
'www',
'search_blob',
)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'reversion.middleware.RevisionMiddleware',
'django.middleware.locale.LocaleMiddleware',
)
SESSION_COOKIE_NAME = 'seeder_sessionid'
# In seconds, 14400 = 4 * 60 * 60 (4 hours)
try:
SESSION_COOKIE_AGE = int(os.environ.get("SESSION_COOKIE_AGE", "14400"))
except:
SESSION_COOKIE_AGE = 14400
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'djangobower.finders.BowerFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.template.context_processors.request',
'core.context_processors.core_processor',
)
},
},
]
# APP_DIRS = True
#
# TEMPLATES = {
# 'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': TEMPLATE_DIRS,
# 'APP_DIRS': True,
# 'OPTIONS': {
# 'context_processors': TEMPLATE_CONTEXT_PROCESSORS
# }
# }
LANGUAGES = (
('cs', _('Czech')),
('en', _('English')),
)
CALENDAR_LANGUAGES = {
'cs': 'cs-CZ',
'en': 'en-US'
}
MODELTRANSLATION_DEFAULT_LANGUAGE = 'cs'
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
BOWER_COMPONENTS_ROOT = BASE_DIR
BOWER_INSTALLED_APPS = () # everything is on CDN now
LOGIN_URL = '/seeder/auth/login/'
LOGOUT_URL = '/seeder/auth/logout/'
LOGIN_REDIRECT_URL = '/'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList', 'Link'],
],
},
'mini': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList', 'Link'],
],
'width': 800,
'height': 100,
},
}
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'core.utils.show_toolbar',
}
CRONJOBS = [
('1 * * * *', 'source.screenshots.take_screenshots'),
('10 * * * *', 'voting.cron.revive_postponed_rounds'),
('20 * * * *', 'contracts.cron.expire_contracts'),
('30 * * * *', 'contracts.cron.send_emails'),
]
# * * * * * command to be executed
# - - - - -
# | | | | |
# | | | | +----- day of week (0 - 6) (Sunday=0)
# | | | +------- month (1 - 12)
# | | +--------- day of month (1 - 31)
# | +----------- hour (0 - 23)
# +------------- min (0 - 59)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
]
}
if DEBUG:
REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = [
'rest_framework.permissions.AllowAny'
]
WAKAT_URL = 'http://forpsi.kitakitsune.org:8080/?url_id={id}'
WAYBACK_URL = "http://wayback.webarchiv.cz/wayback/query?type=urlquery&url={url}"
SEEDS_EXPORT_DIR = 'seeds'
MANET_URL = '127.0.0.1:8891'
QA_EVERY_N_MONTHS = 24
LEGACY_URL = 'http://intranet.webarchiv.cz/wadmin/tables/resources/view/{pk}'
LEGACY_SCREENSHOT_URL = 'http://www.webarchiv.cz/images/resource/thumb/small_{id}_{date}.jpg'
LEGACY_SCREENSHOT_URL_PNG = 'http://www.webarchiv.cz/images/resource/thumb/small_{id}_{date}.png'
WEBARCHIV_EMAIL = 'webarchiv@nkp.cz'
# RECAPTCHA_PUBLIC_KEY = ''
# RECAPTCHA_PRIVATE_KEY = ''
NOCAPTCHA = True
| 7,612 | 2,852 |
import argparse
from src import passwordops
from src import fileops
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--password", help="Password operations")
parser.add_argument("--file", help="File operations")
parser.add_argument("--network", help="Network operations")
args = parser.parse_args()
if args.password and args.password == "create":
passwordops.creator()
elif args.password and args.password == "read":
passwordops.reader()
elif args.file and args.file == "info":
fileops.information()
elif args.file and args.file == "size":
fileops.size()
elif args.file and args.file == "rename":
fileops.rename()
elif args.file and args.file == "compare":
fileops.compare()
elif args.file and args.file == "backup":
fileops.backup()
else:
print("Pytool -> CLI tools for password, file and network management.")
| 915 | 265 |
from dataclasses import dataclass
from thenewboston_node.core.utils.dataclass import cover_docstring, revert_docstring
from .serializable import SerializableMixin
@revert_docstring
@dataclass
@cover_docstring
class MetadataMixin(SerializableMixin):
pass
| 262 | 82 |
import datetime
from dashboard.mixins import DashboardMixin
class DashboardContainer(object):
title = ""
date = None
content = ""
url = "";
author = ""
def __init__(self, title, content, date, url, author):
self.title = title
self.content = content
self.date = date
self.url = url
self.author = author
| 369 | 108 |
#!/usr/bin/env python
import glob
import serial
import sys
from internal.common import Result
import time
WIFI_SSID = 'ZEROROOT'
WIFI_PASSWORD = 'zeroroot'
class TestResultCollector:
def __init__(self, usb_device=None):
if usb_device is None:
usb_device = self.get_usb_tty_number()
self.serial = self.create_serial(usb_device)
def get_usb_tty_number(self):
ttyUSBs = glob.glob('/sys/class/tty/ttyUSB*')
if len(ttyUSBs) == 0:
print('TizenRT is not connected')
exit(1)
return '/dev/{}'.format(ttyUSBs[0].split('/')[-1])
def create_serial(self, usb_device):
return serial.Serial(usb_device, 115200, timeout=70)
def collect(self, options=''):
time.sleep(2)
self.write_connecting_wifi_command()
command = 'iot_rt_unittest ' + options + '\n'
self.serial.write(command)
return self.read_serial_output()
def write_connecting_wifi_command(self):
self.serial.write('wifi startsta\n')
time.sleep(2)
self.serial.write('wifi join {} {} wpa2_aes\n'.format(WIFI_SSID, WIFI_PASSWORD))
time.sleep(2)
self.serial.write('ifconfig wl1 dhcp\n')
time.sleep(2)
def read_serial_output(self):
while True:
line = self.serial.readline()
if line == '':
print('Timeout')
return Result(exitcode=1,
message='timeout: Core Dump may occur')
sys.stdout.write(line)
if self.is_test_result(line):
return Result(
exitcode=self.get_test_exitcode(line),
message=line)
if self.is_core_dump(line):
return Result(exitcode=1, message=line)
def get_test_exitcode(self, line):
arr = line.split(' ')
if arr[2] == '0':
return 0
return 1
def is_test_result(self, line):
return 'Tests' in line and 'Failure' in line and 'Ignored' in line
def is_core_dump(self, line):
return '(core dumped)' in line
def test_get_usb_tty_number():
assert '/dev/ttyUSB1' == TestResultCollector().get_usb_tty_number()
def test_create_serial():
assert None != TestResultCollector().create_serial('/dev/ttyUSB1')
def test_is_core_dump():
assert True == TestResultCollector().is_core_dump('Aborted (core dumped)')
| 2,437 | 788 |
# !/usr/bin/env python
# coding: utf-8
print int('123', base = 8)
def int2(x, base = 2):
return int(x, base)
print int2('10010')
import functools
int2 = functools.partial(int, base = 2)
print int2('10010')
| 216 | 102 |
"""Module for sampling from a distribution of regression environments."""
from collections import OrderedDict
from functools import partial
import sklearn.datasets
from .data_environment import DataEnvironment
from ..data_types import FeatureType, TargetType, DataSourceType
def envs(n=5, test_size=None, random_state=None, verbose=False):
# TODO: limit number of envs by n
return OrderedDict([
("sklearn.boston", DataEnvironment(
name="sklearn.boston",
source=DataSourceType.SKLEARN,
target_type=TargetType.REGRESSION,
feature_types=[
FeatureType.CONTINUOUS, # CRIM
FeatureType.CONTINUOUS, # ZN
FeatureType.CONTINUOUS, # INDUS
FeatureType.CATEGORICAL, # CHAS
FeatureType.CONTINUOUS, # NOX
FeatureType.CONTINUOUS, # RM
FeatureType.CONTINUOUS, # AGE
FeatureType.CONTINUOUS, # DIS
FeatureType.CONTINUOUS, # RAD
FeatureType.CONTINUOUS, # TAX
FeatureType.CONTINUOUS, # PTRATIO
FeatureType.CONTINUOUS, # B
FeatureType.CONTINUOUS, # LSTAT
],
feature_indices=range(13),
fetch_training_data=partial(
sklearn.datasets.load_boston, return_X_y=True),
fetch_test_data=None,
test_size=test_size,
random_state=random_state,
target_preprocessor=None,
scorer=None)),
("sklearn.diabetes", DataEnvironment(
name="sklearn.diabetes",
source=DataSourceType.SKLEARN,
target_type=TargetType.REGRESSION,
feature_types=[FeatureType.CONTINUOUS for _ in range(10)],
feature_indices=range(10),
fetch_training_data=partial(
sklearn.datasets.load_diabetes, return_X_y=True),
fetch_test_data=None,
test_size=test_size,
random_state=random_state,
target_preprocessor=None,
scorer=None)),
("sklearn.linnerud", DataEnvironment(
name="sklearn.linnerud",
source=DataSourceType.SKLEARN,
target_type=TargetType.MULTIREGRESSION,
feature_types=[FeatureType.CONTINUOUS for _ in range(3)],
feature_indices=range(3),
fetch_training_data=partial(
sklearn.datasets.load_linnerud, return_X_y=True),
fetch_test_data=None,
test_size=test_size,
random_state=random_state,
target_preprocessor=None,
scorer=None)),
])
| 2,685 | 791 |
from __future__ import print_function
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import IntegrityError
from django.forms.models import model_to_dict
from .cli_colors import BOLD, pretty_output, FG_RED, FG_GREEN
from .cli_helpers import add_geoserver_rest_to_endpoint
from builtins import input
SERVICES_CREATE = 'create'
SERVICES_CREATE_PERSISTENT = 'persistent'
SERVICES_CREATE_SPATIAL = 'spatial'
SERVICES_LINK = 'link'
SERVICES_LIST = 'list'
class FormatError(Exception):
def __init__(self):
Exception.__init__(self)
def services_create_persistent_command(args):
"""
Interact with Tethys Services (Spatial/Persistent Stores) to create them and/or link them to existing apps
"""
from tethys_services.models import PersistentStoreService
name = None
try:
name = args.name
connection = args.connection
parts = connection.split('@')
cred_parts = parts[0].split(':')
store_username = cred_parts[0]
store_password = cred_parts[1]
url_parts = parts[1].split(':')
host = url_parts[0]
port = url_parts[1]
new_persistent_service = PersistentStoreService(name=name, host=host, port=port,
username=store_username, password=store_password)
new_persistent_service.save()
with pretty_output(FG_GREEN) as p:
p.write('Successfully created new Persistent Store Service!')
except IndexError:
with pretty_output(FG_RED) as p:
p.write('The connection argument (-c) must be of the form "<username>:<password>@<host>:<port>".')
except IntegrityError:
with pretty_output(FG_RED) as p:
p.write('Persistent Store Service with name "{0}" already exists. Command aborted.'.format(name))
def services_remove_persistent_command(args):
from tethys_services.models import PersistentStoreService
persistent_service_id = None
try:
persistent_service_id = args.service_uid
force = args.force
try:
persistent_service_id = int(persistent_service_id)
service = PersistentStoreService.objects.get(pk=persistent_service_id)
except ValueError:
service = PersistentStoreService.objects.get(name=persistent_service_id)
if force:
service.delete()
with pretty_output(FG_GREEN) as p:
p.write('Successfully removed Persistent Store Service {0}!'.format(persistent_service_id))
exit(0)
else:
proceed = input('Are you sure you want to delete this Persistent Store Service? [y/n]: ')
while proceed not in ['y', 'n', 'Y', 'N']:
proceed = input('Please enter either "y" or "n": ')
if proceed in ['y', 'Y']:
service.delete()
with pretty_output(FG_GREEN) as p:
p.write('Successfully removed Persistent Store Service {0}!'.format(persistent_service_id))
exit(0)
else:
with pretty_output(FG_RED) as p:
p.write('Aborted. Persistent Store Service not removed.')
exit(0)
except ObjectDoesNotExist:
with pretty_output(FG_RED) as p:
p.write('A Persistent Store Service with ID/Name "{0}" does not exist.'.format(persistent_service_id))
exit(0)
def services_create_spatial_command(args):
"""
Interact with Tethys Services (Spatial/Persistent Stores) to create them and/or link them to existing apps
"""
from tethys_services.models import SpatialDatasetService
name = None
try:
name = args.name
connection = args.connection
parts = connection.split('@')
cred_parts = parts[0].split(':')
service_username = cred_parts[0]
service_password = cred_parts[1]
endpoint = parts[1]
public_endpoint = args.public_endpoint or ''
apikey = args.apikey or ''
if 'http' not in endpoint or '://' not in endpoint:
raise IndexError()
if public_endpoint and 'http' not in public_endpoint or '://' not in public_endpoint:
raise FormatError()
endpoint = add_geoserver_rest_to_endpoint(endpoint)
if public_endpoint:
public_endpoint = add_geoserver_rest_to_endpoint(public_endpoint)
new_persistent_service = SpatialDatasetService(name=name, endpoint=endpoint, public_endpoint=public_endpoint,
apikey=apikey, username=service_username,
password=service_password)
new_persistent_service.save()
with pretty_output(FG_GREEN) as p:
p.write('Successfully created new Spatial Dataset Service!')
except IndexError:
with pretty_output(FG_RED) as p:
p.write('The connection argument (-c) must be of the form '
'"<username>:<password>@<protocol>//<host>:<port>".')
except FormatError:
with pretty_output(FG_RED) as p:
p.write('The public_endpoint argument (-p) must be of the form '
'"<protocol>//<host>:<port>".')
except IntegrityError:
with pretty_output(FG_RED) as p:
p.write('Spatial Dataset Service with name "{0}" already exists. Command aborted.'.format(name))
def services_remove_spatial_command(args):
from tethys_services.models import SpatialDatasetService
spatial_service_id = None
try:
spatial_service_id = args.service_uid
force = args.force
try:
spatial_service_id = int(spatial_service_id)
service = SpatialDatasetService.objects.get(pk=spatial_service_id)
except ValueError:
service = SpatialDatasetService.objects.get(name=spatial_service_id)
if force:
service.delete()
with pretty_output(FG_GREEN) as p:
p.write('Successfully removed Spatial Dataset Service {0}!'.format(spatial_service_id))
exit(0)
else:
proceed = input('Are you sure you want to delete this Persistent Store Service? [y/n]: ')
while proceed not in ['y', 'n', 'Y', 'N']:
proceed = input('Please enter either "y" or "n": ')
if proceed in ['y', 'Y']:
service.delete()
with pretty_output(FG_GREEN) as p:
p.write('Successfully removed Spatial Dataset Service {0}!'.format(spatial_service_id))
exit(0)
else:
with pretty_output(FG_RED) as p:
p.write('Aborted. Spatial Dataset Service not removed.')
exit(0)
except ObjectDoesNotExist:
with pretty_output(FG_RED) as p:
p.write('A Spatial Dataset Service with ID/Name "{0}" does not exist.'.format(spatial_service_id))
exit(0)
def services_list_command(args):
"""
Interact with Tethys Services (Spatial/Persistent Stores) to create them and/or link them to existing apps
"""
from tethys_services.models import SpatialDatasetService, PersistentStoreService
list_persistent = False
list_spatial = False
if not args.spatial and not args.persistent:
list_persistent = True
list_spatial = True
elif args.spatial:
list_spatial = True
elif args.persistent:
list_persistent = True
if list_persistent:
persistent_entries = PersistentStoreService.objects.order_by('id').all()
if len(persistent_entries) > 0:
with pretty_output(BOLD) as p:
p.write('\nPersistent Store Services:')
is_first_entry = True
for entry in persistent_entries:
model_dict = model_to_dict(entry)
if is_first_entry:
with pretty_output(BOLD) as p:
p.write('{0: <3}{1: <50}{2: <25}{3: <6}'.format('ID', 'Name', 'Host', 'Port'))
is_first_entry = False
print('{0: <3}{1: <50}{2: <25}{3: <6}'.format(model_dict['id'], model_dict['name'],
model_dict['host'], model_dict['port']))
if list_spatial:
spatial_entries = SpatialDatasetService.objects.order_by('id').all()
if len(spatial_entries) > 0:
with pretty_output(BOLD) as p:
p.write('\nSpatial Dataset Services:')
is_first_entry = True
for entry in spatial_entries:
model_dict = model_to_dict(entry)
if is_first_entry:
with pretty_output(BOLD) as p:
p.write('{0: <3}{1: <50}{2: <50}{3: <50}{4: <30}'.format('ID', 'Name', 'Endpoint',
'Public Endpoint', 'API Key'))
is_first_entry = False
print('{0: <3}{1: <50}{2: <50}{3: <50}{4: <30}'.format(model_dict['id'], model_dict['name'],
model_dict['endpoint'],
model_dict['public_endpoint'],
model_dict['apikey'] if model_dict['apikey']
else "None"))
| 9,561 | 2,689 |
"""Neural networks utilities"""
import numpy as np
from ..coreDataContainers import Variable
from ..operations.activationOperations import *
from ..operations.costOperations import *
from ..operations.twoInputOperations import *
from ..operations.singleInputOperations import *
from ..operations.convolutionOperation import *
from ..operations.transformationOperations import *
from ..operations.multipleInputOperations import *
from .misc import generateRandomVariable, generateZeroVariable
def addDenseLayer(mainGraph, nOutputNodes,
inputOperation=None,
activation=ReLUActivation,
dropoutRate=0,
batchNormalisation=False):
"""Append a dense layer to the graph
Parameters
----------
mainGraph : ga.Graph
computation graph to which append the dense layer
nOutputNodes : int
Number of output nodes
inputOperation : ga.Operation
operation feeding the data to the layer
activation : ga.SingleInputOperation
activatin operation of choice
dropoutRate : float
dropout rate at the end of this layer
batchNormalisation: bool
Whether to use Batch normalisation
w : np.array
weigthts in shape (nOutputNodes, nFeatures)
if None randomly initialized
b : np.array
biases, in shape (nOutputNodes, )
if None, randomly initialized
Returns
-------
ga.Operation
Last operation of the dense layer
"""
N, D = inputOperation.shape
if (inputOperation is None):
inputOperation = mainGraph.operations[-1]
w = generateRandomVariable(shape=(nOutputNodes, D),
transpose=True, nInputs=D)
b = generateRandomVariable(shape=nOutputNodes,
transpose=False, nInputs=1)
wo = mainGraph.addOperation(w, doGradient=True)
bo = mainGraph.addOperation(b, doGradient=True)
mmo = mainGraph.addOperation(MatMatmulOperation(inputOperation, wo),
doGradient=False,
finalOperation=False)
addo = mainGraph.addOperation(AddOperation(mmo, bo),
doGradient=False,
finalOperation=False)
if (dropoutRate > 0):
dpo = mainGraph.addOperation(DropoutOperation(addo, dropoutRate),
doGradient=False,
finalOperation=False)
else:
dpo = addo
if (batchNormalisation):
beta = mainGraph.addOperation(generateRandomVariable((1, nOutputNodes)), doGradient=True)
gamma = mainGraph.addOperation(generateRandomVariable((1, nOutputNodes)), doGradient=True)
bnorm = mainGraph.addOperation(BatchNormalisationOperation(dpo, beta, gamma))
else:
bnorm = dpo
acto = mainGraph.addOperation(activation(bnorm),
doGradient=False,
finalOperation=False)
return acto
def addConv2dLayer(mainGraph,
inputOperation=None,
nFilters=1,
filterHeigth=2,
filterWidth=2,
padding="SAME",
convStride=1,
activation=ReLUActivation,
batchNormalisation=False,
pooling=MaxPoolOperation,
poolHeight=2,
poolWidth=2,
poolStride=2):
"""Append a convolution2D layer with pooling
Parameters
----------
mainGraph : ga.Graph
computation graph to which append the dense layer
inputOperation : ga.Operation
operation feeding the data to the layer
nFilters : int
number of filter to be applied for the convolution
filterHeigth : int
convolution filter heigth
filterWidth : int
convolution filter width
padding: "SAME" or "VALID"
padding method for the convolution
convStride : int
stride for the convolution filter
activation : ga.SingleInputOperation
activatin operation of choice
batchNormalisation: bool
Whether to use Batch normalisation
pooling : ga.SingleInputOperation
pooling operation of choice
poolHeight : int
heigth of the pooling filter
poolWidth : int
width of the pooling filter
poolStride : int
stride of the pooling operation
Returns
-------
ga.Operation
Last operation of the dense layer
"""
N, C, H, W = inputOperation.shape
w = generateRandomVariable(shape=(nFilters, C, filterHeigth, filterWidth),
transpose=False, nInputs=(filterHeigth * filterWidth * C))
b = generateRandomVariable(shape=(1, nFilters, 1, 1), transpose=False, nInputs=1)
filterWop = mainGraph.addOperation(w, doGradient=True, feederOperation=False)
opConv2d = mainGraph.addOperation(Conv2dOperation(
inputOperation, filterWop, stride=convStride, paddingMethod=padding))
filterBop = mainGraph.addOperation(b, doGradient=True, feederOperation=False)
addConv2d = mainGraph.addOperation(AddOperation(opConv2d, filterBop))
if (batchNormalisation):
beta = mainGraph.addOperation(generateRandomVariable((1, *addConv2d.shape[1:])), doGradient=True)
gamma = mainGraph.addOperation(generateRandomVariable((1, *addConv2d.shape[1:])), doGradient=True)
bnorm = mainGraph.addOperation(BatchNormalisationOperation(addConv2d, beta, gamma))
else:
bnorm = addConv2d
actop = mainGraph.addOperation(activation(bnorm),
doGradient=False,
finalOperation=False)
poolOP = mainGraph.addOperation(pooling(inputA=actop,
poolHeight=poolHeight,
poolWidth=poolWidth,
stride=poolStride))
return poolOP
| 6,078 | 1,590 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 1 17:14:58 2020
@author: mengmi
"""
import IPython.display
# cd into virtualhome repo
import sys
sys.path.append('../simulation/')
from unity_simulator.comm_unity import UnityCommunication
import PIL
import numpy as np
from collections import defaultdict
import cv2
import os
import math
import pickle
import random
def display_grid_img(images_old, nrows=1):
images = [x for x in images_old]
h, w, _ = images[0].shape
ncols = int((len(images)+nrows-1)/nrows)
missing = ncols - (len(images)%ncols)
for m in range(missing):
images.append(np.zeros((h, w, 3)).astype(np.uint8))
img_final = []
for it_r in range(nrows):
init_ind = it_r * ncols
end_ind = init_ind + ncols
images_take = [images[it] for it in range(init_ind, end_ind)]
img_final.append(np.concatenate(images_take, 1))
img_final = np.concatenate(img_final, 0)
img_final = PIL.Image.fromarray(img_final[:,:,::-1])
return img_final
def display_scene_modalities(img_height, img_width,
comm, ids, modalities=['normal', 'seg_class', 'seg_inst', 'depth'], nrows=1):
# Check the number of cameras
_, ncameras = comm.camera_count()
#print(ncameras)
cameras_select = list(range(ncameras))
cameras_select = [cameras_select[x] for x in ids]
imgs_modality = []
for mode_name in modalities:
(ok_img, imgs) = comm.camera_image(cameras_select, mode=mode_name, image_width=img_height, image_height=img_width)
#print(imgs)
if mode_name == 'depth':
#imgs = [((x/np.max(x))*255.).astype(np.uint8) for x in imgs]
imgs = [(x*255.).astype(np.uint8) for x in imgs]
imgs_modality += imgs
img_final = display_grid_img(imgs_modality, nrows=nrows)
return img_final
def find_nodes(graph, **kwargs):
if len(kwargs) == 0:
return None
else:
k, v = next(iter(kwargs.items()))
return [n for n in graph['nodes'] if n[k] == v]
def find_nodes_byclassname(graph, classname):
return [n for n in graph['nodes'] if n['class_name'] == classname]
def find_nodes_byid(graph, idnum):
return [n for n in graph['nodes'] if n['id'] == idnum]
def find_edges(graph, **kwargs):
if len(kwargs) == 0:
return None
else:
k, v = next(iter(kwargs.items()))
return [n for n in graph['edges'] if n[k] == v]
def find_allRooms(graph):
return [n for n in graph['nodes'] if n['category'] == 'Rooms']
def find_rooms(graph, fromnode):
roomnodes = find_allRooms(graph)
if fromnode['category'] != 'Rooms':
for node in roomnodes:
bboxroom = node['bounding_box']
bboxobj = fromnode['bounding_box']
status = checkTwo3DBboxOverlap(bboxobj, bboxroom)
if status:
return node['class_name']
return fromnode['class_name']
def find_rooms_graphedges(graph, fromnode):
while fromnode['category'] != 'Rooms':
objedge = find_edges(graph, from_id = fromnode['id'])[0]
fromnode_id = objedge['to_id']
fromnode = find_nodes_byid(graph, fromnode_id)[0]
return fromnode
def displayAllBbox(img_height, img_width, JasonData, img):
#convert to cv2 image and ready to draw
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for infor in JasonData.items():
left = infor[1]['bbox'][2]
top = infor[1]['bbox'][0]
right = infor[1]['bbox'][3]
bottom = infor[1]['bbox'][1]
color = (0, 0, 255)
thick = 3
label = infor[1]['class_name'] +', ' + infor[1]['roomtype']
cv2.rectangle(img,(left, top), (right, bottom), color, thick)
cv2.putText(img, label, (left, top - 12), 0, 1e-3 * img_width, color, thick//3)
status = True
return status, img
def displayTargetBbox(img_height, img_width, JasonData, img, targetid, textflag, boxflag):
#convert to cv2 image and ready to draw
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
status = False
for infor in JasonData.items():
if infor[1]['prefab_id'] == targetid:
left = infor[1]['bbox'][2]
top = infor[1]['bbox'][0]
right = infor[1]['bbox'][3]
bottom = infor[1]['bbox'][1]
targetbbox = [left, top, right, bottom]
color = (0, 0, 255)
thick = 3
label = infor[1]['class_name'] +', ' + infor[1]['roomtype']
if boxflag:
cv2.rectangle(img,(left, top), (right, bottom), color, thick)
if textflag:
cv2.putText(img, label, (left, top - 12), 0, 1e-3 * img_width, color, thick//3)
status = True
targetarea = infor[1]['area']#(bottom - top)*(right - left)
return status, targetarea, targetbbox, img
return status, 0, 0, img
def extractColorInstanceTable(graph, message_color):
ColorInstLookUpTab = {}
for prefab_id in message_color:
prefab_id = int(prefab_id)
#print(type(prefab_id))
objcolor_sm = message_color.get(str(prefab_id)) #color range from [0,1]
#print(objcolor_sm)
objcolor = np.round(np.array(objcolor_sm['Item1'], dtype=np.float32)*255.0).astype(np.uint8) #color range from [0,255]
objcolor = tuple(objcolor)
objnode = find_nodes_byid(graph, prefab_id)[0]
infor = {}
infor['prefab_id'] = prefab_id
infor['prefab_name'] = objnode['prefab_name']
infor['class_name'] = objnode['class_name']
infor['category'] = objnode['category']
roomname = find_rooms(graph, objnode)
infor['roomtype'] = roomname
ColorInstLookUpTab[objcolor] = infor
return ColorInstLookUpTab
def extractJasonInstanceTable(img_inst_pil, img_inst_np, ColorInstLookUpTab):
img_inst_color_tab = defaultdict(int)
for pixel in img_inst_pil.getdata():
img_inst_color_tab[pixel] +=1
[imgw, imgh, imgc] = img_inst_np.shape
#consolidate all objects infor on image and output jasondata for this image
JasonData = {}
for pixel in img_inst_color_tab:
if pixel in ColorInstLookUpTab.keys():
X,Y = np.where(np.all(img_inst_np==np.asarray(pixel),axis=2))
bbox = [min(X), max(X), min(Y), max(Y)]
instinfor = ColorInstLookUpTab.get(pixel)
infor = {}
infor['prefab_id'] = instinfor['prefab_id']
infor['prefab_name'] = instinfor['prefab_name']
infor['class_name'] = instinfor['class_name']
infor['roomtype'] = instinfor['roomtype']
infor['category'] = instinfor['category']
infor['bbox'] = bbox
infor['color'] = pixel
infor['area'] = img_inst_color_tab.get(pixel)*1.0/(imgw*imgh) #ratio of isntance area on the entire image
JasonData[pixel] = infor
return JasonData
def convertPILImageToNumpyImage(img_all_pil, img_height, img_width):
#img contains modalities=['normal', 'seg_class', 'seg_inst'], nrows=3
#split into three images (normal, seg_class, seg_instance)
img_ori_pil = img_all_pil.crop((0, img_width*0, img_height, img_width*1))
img_class_pil = img_all_pil.crop((0, img_width*1, img_height, img_width*2))
img_inst_pil = img_all_pil.crop((0, img_width*2, img_height, img_width*3))
#convert to numpy array
img_ori_np = np.array(img_ori_pil)
img_class_np = np.array(img_class_pil)
img_inst_np = np.array(img_inst_pil)
return img_ori_pil, img_class_pil, img_inst_pil, img_ori_np, img_class_np, img_inst_np
def IsHighContrast(img_height, img_width, ThresContrast, RatioCroppedContrast, JasonData, img, targetid):
#convert to cv2 image and ready to draw
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
imgY = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)[:,:,0]
status = False
for infor in JasonData.items():
if infor[1]['prefab_id'] == targetid:
left = infor[1]['bbox'][2]
top = infor[1]['bbox'][0]
right = infor[1]['bbox'][3]
bottom = infor[1]['bbox'][1]
#print(infor[1]['bbox'])
width = bottom - top
height = right - left
if int(left-RatioCroppedContrast*height) <0:
left = 0
else:
left = int(left-RatioCroppedContrast*height)
if int(right+RatioCroppedContrast*height) > (img_height-1):
right = img_height - 1
else:
right = int(right+RatioCroppedContrast*height)
if int(top-RatioCroppedContrast*width) <0:
top = 0
else:
top = int(top-RatioCroppedContrast*width)
if int(bottom+RatioCroppedContrast*width) > (img_width-1):
bottom = img_width-1
else:
bottom = int(bottom+RatioCroppedContrast*width)
cropped_imgY = imgY[top:bottom, left:right]
# compute min and max of Y
#print(cropped_imgY.shape)
if cropped_imgY.shape[0] == 0 or cropped_imgY.shape[1] == 0:
return False
Ymin = np.min(cropped_imgY)
Ymax = np.max(cropped_imgY)
#print(Ymin)
#print(Ymax)
# compute contrast
contrast = (Ymax-Ymin)/(Ymax+Ymin)
#print(contrast)
if contrast > ThresContrast:
status = True
return status
def checkCameraImageFitness(JasonData, targetprefabid, ThresRoomArea):
#two criterias for a good pic:
#1. the target object is on the pic
#2. the camera is mostly looking at one room (not crossing two rooms); ThresRoomArea
statusTarget = False #cond1 flag
statusRoom = False #cond2 flag
#keep track of total areas for each room type
roomarea = defaultdict(float)
for infor in JasonData.items():
roomarea[infor[1]['roomtype']] += infor[1]['area']
if infor[1]['prefab_id'] == targetprefabid:
statusTarget = True
targetroom = infor[1]['roomtype']
if not statusTarget:
#print('Target not in pic')
return False
else:
return True
# otherarea = 0.0
# for roomtype in roomarea:
# if roomtype != targetroom:
# otherarea += roomarea.get(roomtype)
# if otherarea <= ThresRoomArea:
# statusRoom = True
#
# if not statusRoom:
# print('contain too many rooms!')
# return statusTarget & statusRoom
def checkCameraImageBlackSky(img_ori_np, ThresBlackSkyArea):
[imgw, imgh, imgc] = img_ori_np.shape
X,Y = np.where(np.all(img_ori_np==np.asarray([0,0,0]),axis=2))
area = len(X)*1.0/(imgw*imgh)
if area >= ThresBlackSkyArea:
return False
else:
return True
def IsTargetCollision(JasonData, graph, target_id):
targetnode = find_nodes_byid(graph, target_id)[0]
targetbbox = targetnode['bounding_box']
for infor in JasonData.items():
if infor[1]['prefab_id'] == target_id:
continue
elif infor[1]['category'] == 'Rooms':
continue
else:
objbbox = find_nodes_byid(graph, infor[1]['prefab_id'])[0]['bounding_box']
status = checkTwo3DBboxOverlap(targetbbox, objbbox) or checkTwo3DBboxOverlap(objbbox, targetbbox)
if status:
print("collided with: " + infor[1]['prefab_name'] + "; from: " + infor[1]['category'])
return True #collision is happening
return False
def checkTwo3DBboxOverlap(bbox1, bbox2):
#get 8 vertex of bbox1
vertexlist = []
for i in [-1,1]:
for j in [-1,1]:
for k in [-1,1]:
point = np.array([bbox1['center'][0]+i*bbox1['size'][0]/2, bbox1['center'][1]+j*bbox1['size'][1]/2, bbox1['center'][2]+k*bbox1['size'][2]/2])
vertexlist.append(point)
#check wehtehr each point is within bbox2
for i in range(8):
status = isPointInsideBox(vertexlist[i], bbox2)
if status:
return True
return False
def checkCamCollision(cam_pos, graph):
status = False
for node in graph['nodes']:
if node['category'] == 'Rooms' or node['category'] == 'Walls':
continue
else:
bbox = node['bounding_box']
statusInside = isPointInsideBox(cam_pos,bbox)
if statusInside:
status = True
print(node['prefab_name'])
return status
return status
def isPointInsideBox(point, bbox):
#get bbox2 boundaries
minX = bbox['center'][0] - bbox['size'][0]/2
maxX = bbox['center'][0] + bbox['size'][0]/2
minY = bbox['center'][1] - bbox['size'][1]/2
maxY = bbox['center'][1] + bbox['size'][1]/2
minZ = bbox['center'][2] - bbox['size'][2]/2
maxZ = bbox['center'][2] + bbox['size'][2]/2
return (point[0] >= minX and point[0] <= maxX) and (point[1] >= minY and point[1] <= maxY) and (point[2] >= minZ and point[2] <= maxZ)
def FindOptimalCamTargetConfig_original(targetSz, targetYpos, NumRes):
if targetSz <0.5:
Radius = np.sqrt(2)
elif targetSz <1:
Radius = 1.5*np.sqrt(2)
elif targetSz <2:
Radius = 2.5*np.sqrt(2)
else:
Radius = 4*np.sqrt(2)
if targetYpos > 1.4:
camYStepSz = 0
targetYStepSz = -0.25
elif targetYpos>0.7:
targetYStepSz = 0.25
camYStepSz = 0.5
else:
targetYStepSz = 0.25
camYStepSz = 1
circ = CircleTrajectory(Radius, NumRes)
return circ, camYStepSz, targetYStepSz
def FindOptimalCamTargetConfig_size(targetSz, sizeMult, targetYpos, NumRes):
if targetSz <0.5:
Radius = 2*np.sqrt(2)
elif targetSz <1:
Radius = 1.5*2*np.sqrt(2)
elif targetSz <2:
Radius = 2.5*1.5*np.sqrt(2)
else:
Radius = 4*np.sqrt(2)
if targetYpos > 1.4:
camYStepSz = 0
targetYStepSz = -0.25-0.2
elif targetYpos>0.7:
targetYStepSz = 0.25
camYStepSz = 0.5+0.3
else:
targetYStepSz = 0.25
camYStepSz = 1+0.5
circ = CircleTrajectory(Radius, NumRes)
return circ, camYStepSz, targetYStepSz
#objects in their original place
def FindOptimalCamTargetConfig_gravity(targetSz, targetYpos, NumRes):
if targetSz <0.5:
Radius = np.sqrt(2)
elif targetSz <1:
Radius = 1.5*np.sqrt(2)
elif targetSz <2:
Radius = 2.5*np.sqrt(2)
else:
Radius = 4*np.sqrt(2)
if targetYpos > 1.4:
camYStepSz = 0
targetYStepSz = -0.25
elif targetYpos>0.7:
targetYStepSz = 0.25
camYStepSz = 0.5
else:
targetYStepSz = 0.25
camYStepSz = 1
circ = CircleTrajectory(Radius, NumRes)
return circ, camYStepSz, targetYStepSz
def FindOptimalCamTargetConfig_trained(targetSz, targetYpos, NumRes):
if targetSz <0.5:
Radius = 1*np.sqrt(2)
elif targetSz <1:
Radius = 1.5*np.sqrt(2)
elif targetSz <2:
Radius = 2*np.sqrt(2)
else:
Radius = 2.5*np.sqrt(2)
if targetYpos > 1.4:
pitch = [np.pi/2 + np.pi/9, 7*np.pi/18] #pitch angle in radians [-20, 20]
elif targetYpos>0.7:
pitch = [7*np.pi/18, np.pi/6] #pitch angle in radians [20, 60]
else:
pitch = [np.pi/3, np.pi/9] #pitch angle in radians [30, 70]
circ = SphereTrajectory(Radius, pitch, NumRes)
return circ
def SphereTrajectory(radius, pitch, Res):
#takes in radius and how many uniformly sampled points on the circle
#generate list of tuple (x,y) coordinates on the circle equally spaced
circ = list()
for p in pitch:
for j in range(Res):
circ.append( ( radius* np.sin(p) * np.cos(j* 2 * np.pi / Res), radius*np.cos(p), radius* np.sin(p) * np.sin(j* 2 * np.pi / Res) ))
return circ
def FindOptimalCamTargetConfig_trained2(targetSz, targetYpos, NumRes):
Resolution = 2.0 # 1 deg angle resolution
radius = []
pitch = []
yaw = []
for i in range(NumRes):
RandSzTimes = random.randrange(2,7) #random int from [2,10] inclusive
radius.append(1.0*RandSzTimes*targetSz)
yaw.append( random.randrange(0, int(360/Resolution), Resolution)*Resolution/360 * math.pi*2)
if targetYpos > 1.4:
pitch.append( random.randrange(-int(35/Resolution), int(55/Resolution), Resolution)*Resolution/90 * math.pi/2)
else:
pitch.append( random.randrange(int(10/Resolution), int(90/Resolution), Resolution)*Resolution/90 * math.pi/2)
# print(radius)
# print(pitch)
# print(yaw)
circ = SphereTrajectory2(radius, pitch, yaw)
return circ, radius, pitch, yaw
def FindOptimalCamTargetConfig_trained3(targetSz, targetYpos, NumRes):
Resolution = 2.0 # 1 deg angle resolution
radius = []
pitch = []
yaw = []
for i in range(NumRes):
RandSzTimes = random.randrange(1,10) #random int from [2,10] inclusive
radius.append(1.0*RandSzTimes*0.5)
yaw.append( random.randrange(0, int(360/Resolution), Resolution)*Resolution/360 * math.pi*2)
if targetYpos > 1.4:
pitch.append( random.randrange(-int(35/Resolution), int(55/Resolution), Resolution)*Resolution/90 * math.pi/2)
else:
pitch.append( random.randrange(int(10/Resolution), int(90/Resolution), Resolution)*Resolution/90 * math.pi/2)
# print(radius)
# print(pitch)
# print(yaw)
circ = SphereTrajectory2(radius, pitch, yaw)
return circ, radius, pitch, yaw
def SphereTrajectory2(radius, pitch, yaw):
#takes in radius and how many uniformly sampled points on the circle
#generate list of tuple (x,y) coordinates on the circle equally spaced
circ = list()
for i, R in enumerate(radius):
p = pitch[i]
y = yaw[i]
circ.append( ( R* np.sin(p) * np.cos(y), R*np.cos(p), R* np.sin(p) * np.sin(y) ))
return circ
def CircleTrajectory(radius, Res):
#takes in radius and how many uniformly sampled points on the circle
#generate list of tuple (x,y) coordinates on the circle equally spaced
circ = list()
for j in range(Res):
circ.append( ( radius* np.cos(j* 2 * np.pi / Res), radius* np.sin(j* 2 * np.pi / Res) ))
return circ
def saveImgList(writedir, writedirjason, imageprefix, imgformat, sort_index, CamMImg, CamMID, TargetInfor, propFirstN, saveJasonflag):
N = int(propFirstN * len(sort_index))
for index in sort_index[:N]:
count_camview = CamMID[index]
img_inst_target_cv2 = CamMImg[index]
print(writedir + imageprefix + str(count_camview) + imgformat)
cv2.imwrite(writedir + imageprefix + str(count_camview) + imgformat, img_inst_target_cv2)
if saveJasonflag:
storeinfor = TargetInfor[index]
#storeinfor_json = json.dumps(storeinfor)
f = open(writedirjason + imageprefix + str(count_camview) + ".pkl","wb")
pickle.dump(storeinfor,f)
f.close()
def saveImgList_train(writedir, writedirjason, imageprefix, imgformat, sort_index, CamMImg, CamMID, TargetInfor, propFirstN, saveJasonflag):
N = int(propFirstN * len(sort_index))
for index in sort_index[:N]:
count_camview = CamMID[index]
img_inst_target_cv2 = CamMImg[index]
print(writedir + imageprefix + str(count_camview) + imgformat)
img_inst_target_cv2 = cv2.resize(img_inst_target_cv2, (640, 512))
cv2.imwrite(writedir + imageprefix + str(count_camview) + imgformat, img_inst_target_cv2)
if saveJasonflag:
storeinfor = TargetInfor[index]
#storeinfor_json = json.dumps(storeinfor)
f = open(writedirjason + imageprefix + str(count_camview) + ".pkl","wb")
pickle.dump(storeinfor,f)
f.close()
def findAllPossibleDestNodes(targetclass, wantedClass, ItemToRoom, SurfaceToRoom, RoomList, SurfaceList, graph):
destnodesIDs = []
destPrefabs = []
destTargetRooms = []
destSurfaceList=[]
destRooms = []
for i in np.where(ItemToRoom[wantedClass.index(targetclass)] == 1)[0]:
destRooms.append(RoomList[i])
destSurface = []
for dstR in destRooms:
for i in np.where( SurfaceToRoom[:, RoomList.index(dstR)] == 1)[0]:
destSurface.append(SurfaceList[i])
destSurface = set(destSurface)
destSurface = list(destSurface)
for node in graph['nodes']:
if node['class_name'] not in destSurface:
continue
roomIn = find_rooms(graph, node)
if roomIn not in destRooms:
#print("warning! " + roomIn + " doesnt belong to any rooms!")
continue
destnodesIDs.append(node['id'])
destPrefabs.append(node['prefab_name'])
destTargetRooms.append(roomIn)
destSurfaceList.append(node['class_name'])
return destnodesIDs, destPrefabs, destTargetRooms, destSurfaceList
def findAllPossibleDestNodes_anomaly(targetclass, wantedClass, ItemToRoom, RoomList, SurfaceList, graph):
destnodesIDs = []
destPrefabs = []
destTargetRooms = []
destSurfaceList=[]
destRoomNode = []
destWallNode = []
destSurface = []
for i in np.where(ItemToRoom[wantedClass.index(targetclass)] == 1)[0]:
surfacename = SurfaceList[i]
if 'floor_' in surfacename:
surfacename = surfacename[6:]
destSurface.append(surfacename)
else:
destSurface.append(surfacename)
destSurface = set(destSurface)
destSurface = list(destSurface)
#find all wall surfaces and their corresponding room
# wallnodes=[]
# wallroom = []
# for node in graph['nodes']:
# if node['class_name'] == 'wall':
# sz = node['bounding_box']['size']
# if all(x > 2 for x in sz):
# continue;
# else:
# roomIn = find_rooms_graphedges(graph, node)
# wallroom.append(roomIn)
# wallnodes.append(node)
for node in graph['nodes']:
if node['class_name'] != 'wall':
if node['class_name'] not in destSurface:
continue
if node['class_name'] in RoomList:
roomIn = node['class_name']
else:
roomIn = find_rooms(graph, node)
destnodesIDs.append(node['id'])
destPrefabs.append(node['prefab_name'])
destTargetRooms.append(roomIn)
destSurfaceList.append(node['class_name'])
destRoomNode.append(float("nan"))
destWallNode.append(float("nan"))
else:
sz = node['bounding_box']['size']
if all(x > 2 for x in sz):
continue;
else:
roomNode = find_rooms_graphedges(graph, node)
roomIn = roomNode['class_name']
destsurf = 'wall_' + roomIn
if destsurf in destSurface:
destnodesIDs.append(node['id'])
destPrefabs.append(node['prefab_name'])
destTargetRooms.append(roomIn)
destSurfaceList.append(node['class_name'])
destRoomNode.append(roomNode)
destWallNode.append(node)
return destnodesIDs, destPrefabs, destTargetRooms, destSurfaceList, destRoomNode, destWallNode
def add_node(graph, n):
graph['nodes'].append(n)
def add_edge(graph, fr_id, rel, to_id):
graph['edges'].append({'from_id': fr_id, 'relation_type': rel, 'to_id': to_id})
def deleteGraphByClassname(graph, target_classname):
#print(graph)
ToDeleteList = find_nodes_byclassname(graph, target_classname)
#print(ToDeleteList)
ToDeleteIDList = []
for i, mc in enumerate(ToDeleteList):
ToDeleteIDList.append(mc['id'])
#del mc['obj_transform']
#del mc['bounding_box']
flagAll = True
while flagAll:
for i, node in enumerate(graph['nodes']):
if node['class_name'] == target_classname:
del graph['nodes'][i]
flagAll = True
break
else:
flagAll = False
#print(ToDeleteIDList)
#for idDelete in ToDeleteIDList:
graph['edges'] = [edge for edge in graph['edges'] if (edge['from_id'] not in ToDeleteIDList) and (edge['to_id'] not in ToDeleteIDList)]
return graph
def computeMoveNodeOffset_anomaly(destwallnode, destroomnode, targetnode):
wallcenter = destwallnode['bounding_box']['center']
roomcenter = destroomnode['bounding_box']['center']
if destwallnode['bounding_box']['size'][0]<2:
alongaxis = 0
else:
alongaxis = 2
if wallcenter[alongaxis] - roomcenter[alongaxis] > 0:
axisorient = -1
else:
axisorient = 1
desiredpos = wallcenter.copy()
desiredpos[alongaxis] = wallcenter[alongaxis] + axisorient*targetnode['bounding_box']['size'][alongaxis]/2
movenode_offset = desiredpos.copy()
for dim in range(3):
movenode_offset[dim] = desiredpos[dim] - targetnode['bounding_box']['center'][dim]
return movenode_offset
def find_destsurfnode_byclassname(graph, targetnode, destsurf):
targetid = targetnode['id']
destsurflist = find_nodes_byclassname(graph, destsurf)
destsurfidlist = [node['id'] for node in destsurflist]
targetsurfidlist = [edge['to_id'] for edge in graph['edges'] if edge['from_id'] == targetid and edge['relation_type'] == 'ON']
surfnode = []
if len(destsurfidlist)>0 and len(targetsurfidlist)>0 :
counter = 0
for did in destsurfidlist:
if did in targetsurfidlist:
surfnode.append(destsurflist[counter])
break
counter = counter + 1
return surfnode
def computePossibleLocationsOnSurf(targetnode, surfnode, scaleStepSz):
targetSzX = targetnode['bounding_box']['size'][0]
targetSzZ = targetnode['bounding_box']['size'][2]
leftBoundSurfX = surfnode['bounding_box']['center'][0] - surfnode['bounding_box']['size'][0]/2 + targetSzX/2
rightBoundSurfX = surfnode['bounding_box']['center'][0] + surfnode['bounding_box']['size'][0]/2 - targetSzX/2
leftBoundSurfZ = surfnode['bounding_box']['center'][2] - surfnode['bounding_box']['size'][2]/2 + targetSzZ/2
rightBoundSurfZ = surfnode['bounding_box']['center'][2] + surfnode['bounding_box']['size'][2]/2 - targetSzZ/2
x = np.arange(leftBoundSurfX,rightBoundSurfX,scaleStepSz*targetSzX)
z = np.arange(leftBoundSurfZ,rightBoundSurfZ,scaleStepSz*targetSzZ)
# x = np.arange(leftBoundSurfX,rightBoundSurfX,0.1)
# z = np.arange(leftBoundSurfZ,rightBoundSurfZ,0.1)
xpos, zpos = np.meshgrid(x,z)
xpos = xpos.flatten()
zpos = zpos.flatten()
xoffset = xpos - targetnode['bounding_box']['center'][0]
zoffset = zpos - targetnode['bounding_box']['center'][2]
return xoffset, zoffset
def segmentTargetBbox(img_height, img_width, JasonData, img, targetid):
#convert to cv2 image and ready to draw
#img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
seg = np.zeros((img_width, img_height)).astype('uint8')
status = False
for infor in JasonData.items():
if infor[1]['prefab_id'] == targetid:
pixel = infor[1]['color']
X,Y = np.where(np.all(img==np.asarray(pixel),axis=2))
left = infor[1]['bbox'][2]
top = infor[1]['bbox'][0]
right = infor[1]['bbox'][3]
bottom = infor[1]['bbox'][1]
targetbbox = [left, top, right, bottom]
seg[X,Y] = 255
status = True
targetarea = infor[1]['area']#(bottom - top)*(right - left)
seg = cv2.cvtColor(seg, cv2.COLOR_GRAY2BGR)
return status, targetarea, targetbbox, seg
return status, 0,0, img
| 28,835 | 9,973 |
import utils
import json
import os
import logging
import subprocess as sp
import re
import datetime
logger = logging.getLogger(__name__)
class Job(object):
"""Parent class for backup jobs."""
def __init__(self,params):
super(Job, self).__init__()
logger.debug("Entering Job constructor.")
try:
self.target = params['target']
self.raw_sources = params['sources']
except KeyError as e:
raise utils.JobDescriptionKeyError(e.message)
try:
self.global_excludes = params['global_excludes']
except KeyError:
self.global_excludes = []
self.rsync_base_options = ['--stats','--chmod=ugo=rwX','--compress']
if not utils.config['is_pythonw']:
self.rsync_base_options += ['--verbose']
target_drive, target_path = os.path.splitdrive(self.target)
if utils.is_net_drive(target_drive):
unc = utils.net_drives[target_drive]
self.target = unc + target_path
logger.debug("Replacing target drive {} with UNC path {}".format(target_drive, unc))
self.cygtarget = utils.get_cygwin_path(self.target)
if not os.path.isdir(self.target):
raise utils.TargetNotFoundError(self.target)
self.sources = {}
for s in self.raw_sources:
drive, path = os.path.splitdrive(s['path'])
if utils.is_net_drive(drive):
logger.warning("The source path {} is a mounted net drive (ignoring source).".format(drive+path))
elif not os.path.isdir(drive+path):
logger.warning("The source directory {} does not exist (ignoring source).".format(drive+path))
else:
relative_source = {
'path': path,
'excludes': []
}
if 'excludes' in s:
relative_source['excludes'] = s['excludes']
if drive in self.sources:
self.sources[drive].append(relative_source)
else:
self.sources[drive] = [relative_source]
self.stats = {}
def run(self):
raise NotImplementedError("Run method of job was not implemented.")
def excludes_to_options(self,excludes):
"""Convert a list of strings to a list of exclude options to rsync.
:param excludes: List of excludes.
"""
options = []
for excl in excludes:
options.append("--exclude={}".format(excl))
return options
def run_rsync(self):
rsync_options = self.rsync_base_options + self.rsync_options
rsync_process = utils.Rsync(self.rsync_source,self.rsync_target,rsync_options)
rsync_process.wait()
if rsync_process.returncode != 0:
# Appropriate exception type?
raise IOError("rsync returned with exit code {}.".format(rsync_process.returncode))
else:
logger.info("rsync finished successfully.")
# Parse rsync stats output, typically finde the numbers in lines like:
# Number of files: 211009
# Number of files transferred: 410
# Total file size: 903119614118 bytes
# Total transferred file size: 9046197739 bytes
pattern_dict = {
"num_files": re.compile("Number of files:\s+(\d+)"),
"files_transferred": re.compile("Number of files transferred:\s+(\d+)"),
"tot_file_size": re.compile("Total file size:\s+(\d+)"),
"file_size_transferred": re.compile("Total transferred file size:\s+(\d+)")
}
for line in rsync_process.output_buffer:
for key,pattern in pattern_dict.items():
match = pattern.match(line)
if match:
value = float(match.group(1))
if key in self.stats:
self.stats[key] += value
else:
self.stats[key] = value
class BaseSyncJob(Job):
"""Base class for sync-type jobs."""
def __init__(self,params):
super(BaseSyncJob, self).__init__(params)
self.rsync_base_options += ['--archive']
def run(self):
"""Run rsync to sync one or more sources with one target directory."""
self.rsync_base_options += self.excludes_to_options(self.global_excludes)
for drive,sources in self.sources.items():
logger.info("Backing up sources on {}".format(drive))
with utils.volume_shadow(drive) as shadow_root:
for s in sources:
logger.info("Backing up {}{} to {}".format(drive,s['path'],self.target))
logger.debug("Drive root is found at {} and source path is {}.".format(shadow_root,s['path']))
drive_letter = drive[0]
self.rsync_source = '{}/./{}{}'.format(
utils.get_cygwin_path(shadow_root),
drive_letter,
utils.get_cygwin_path(s['path']))
self.rsync_target = self.cygtarget
self.rsync_options = self.excludes_to_options(s['excludes'])
self.run_rsync()
class SyncJob(BaseSyncJob):
"""Simple backup syncing multiple sources to a target directory with full tree structure."""
def __init__(self,params):
super(SyncJob, self).__init__(params)
logger.debug("SyncJob constructor.")
# Delete option (also excluded) to keep up-to-date with sources
# Relative option to create directory tree at target
self.rsync_base_options += ['--delete','--delete-excluded','--relative']
class AdditiveJob(BaseSyncJob):
"""Updating target with new files from sources."""
def __init__(self,params):
super(AdditiveJob, self).__init__(params)
logger.debug("AdditiveJob constructor.")
for s in self.sources:
s['path'] += '/'
# enumerate all possible job types and their constructors
job_types = {
'sync': SyncJob,
'add': AdditiveJob
}
def create_job_from_file(job_file):
"""Creates a job from a JSON job specification.
:param job_file: Path to job file.
:type job_file: str
:returns: Job object of specified type.
"""
logger.info("Creating Job from {}.".format(job_file))
with open(job_file) as f:
params = json.loads(f.read())
try:
if not params['type'] in job_types:
raise utils.JobDescriptionValueError('Job type {} is not valid.'.format(params['type']))
except KeyError as e:
raise utils.JobDescriptionKeyError(e.message)
params['job_file'] = job_file
return job_types[params['type']](params)
| 6,820 | 1,950 |
#coding:utf-8
import sys
sys.path.append("..")
from connectDB import connectDB
testDB = connectDB
testWeatherRecord = {
"collectTime": '2016-10-16',
"ctemp":'22'
}
testconnection = testDB.connectMongo()
testDB.saveWeather(testWeatherRecord,testconnection)
testDB.printWeather(testconnection) | 316 | 114 |
# Generated by Django 2.0.5 on 2018-07-15 14:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("babybuddy", "0002_add_settings"),
]
operations = [
migrations.AlterField(
model_name="settings",
name="dashboard_refresh_rate",
field=models.DurationField(
blank=True,
choices=[
(None, "disabled"),
(datetime.timedelta(0, 60), "1 min."),
(datetime.timedelta(0, 120), "2 min."),
(datetime.timedelta(0, 180), "3 min."),
(datetime.timedelta(0, 240), "4 min."),
(datetime.timedelta(0, 300), "5 min."),
(datetime.timedelta(0, 600), "10 min."),
(datetime.timedelta(0, 900), "15 min."),
(datetime.timedelta(0, 1800), "30 min."),
],
default=datetime.timedelta(0, 60),
help_text="This setting will only be used when a browser does not support refresh on focus.",
null=True,
verbose_name="Refresh rate",
),
),
]
| 1,249 | 391 |
import face_model
import argparse
import cv2
import sys
import numpy as np
import datetime
import os
import csv
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--image', default='Tom_Hanks_54745.png', help='')
parser.add_argument('--model', default='gender-age/model/model,0', help='path to load model.')
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
parser.add_argument('--det', default=0, type=int, help='mtcnn option, 1 means using R+O, 0 means detect from begining')
args = parser.parse_args()
gender_csv = open('face_crop/cat_1/gender_age_list.csv', "wb")
writer = csv.writer(gender_csv, delimiter=",", quoting=csv.QUOTE_MINIMAL)
model = face_model.FaceModel(args)
#img = cv2.imread('Tom_Hanks_54745.png')
for im in os.listdir('./face_crop/cat_1')
img = cv2.imread(im)
# img = cv2.resize(img,(112,112))
img = model.get_input(img)
# f1 = model.get_feature(img)
# print(f1[0:10])
gender, age = model.get_ga(img)
if gender == 0:
gen = 'female'
else:
gen = 'male'
row =[im, gender, gen, age]
writer.writerow(row)
# for _ in range(5):
# gender, age = model.get_ga(img)
# time_now = datetime.datetime.now()
# count = 200
# for _ in range(count):
# gender, age = model.get_ga(img)
# time_now2 = datetime.datetime.now()
# diff = time_now2 - time_now
# print('time cost', diff.total_seconds()/count)
# print('gender is',gender)
# print('age is', age)
gender_csv.close() | 1,553 | 600 |
# Analyze the ground truth hand annotated audio features with error presence/segment presence. (Level II)
# [What kind of mistake types there are? What are the types of audio that are labeled there?]
# TODO: bar plots indicating how frequent errors/segment types are
# TODO: What is the distribution of human audio types for the different error/segment types
# frequency count of different speech types during precision, non-precision, no-seg chunks
# frequency count of different speech types during seg, no-seg chunks
import librosa
import os
import pickle as pkl
import argparse
import csv
class SegmentAnalysis():
def __init__(self, args):
self.seg_ann = args.segmentation_annotator
self.utt_ann = args.utterances_annotator
self.demo_dir = '../../'
self.audio_dir = '../../data/demo_audio'
# self.seg_dir = os.path.join(self.demo_dir, 'annotations/A4')
self.tasks = ['box', 'cutting']
self.demo_types = ['video', 'kt']
self.users = ['user2', 'user3', 'user4', 'user5', 'user6', 'user7', 'user8', 'user9', 'user10',\
'user11', 'user12','user14', 'user15', 'user16', 'user17', 'user18', 'user19', 'user20']
with open('../../data/seg_'+self.seg_ann+'.pkl', 'rb') as fp:
self.seg_annot = pkl.load(fp)
with open('../../data/audio_'+self.utt_ann+'.pkl', 'rb') as fp:
self.utt_annot = pkl.load(fp)
self.box_time, self.cutting_time = 0, 0
self.box_pr_time, self.box_non_pr_time = 0, 0
self.cutting_pr_time, self.cutting_non_pr_time = 0, 0
self.box_utt_time, self.cutting_utt_time = 0, 0
self.box_pr_utt_time, self.box_non_pr_utt_time, self.box_non_seg_utt_time = 0, 0, 0
self.cutting_pr_utt_time, self.cutting_non_pr_utt_time, self.cutting_non_seg_utt_time = 0, 0, 0
def get_precision_labels(self):
# segment list for precise, no-precise subtasks
with open('../../data/box_precise.pkl', 'rb') as fp:
self.box_precise = pkl.load(fp)
with open('../../data/box_not-precise.pkl', 'rb') as fp:
self.box_not_precise = pkl.load(fp)
with open('../../data/cutting_precise.pkl', 'rb') as fp:
self.cutting_precise = pkl.load(fp)
with open('../../data/cutting_not-precise.pkl', 'rb') as fp:
self.cutting_not_precise = pkl.load(fp)
def get_demo_time(self,demo_id):
user_id, task, demo_type = demo_id.split('_')
# total demo time (not just speech time)
audio_path = os.path.join(self.audio_dir,user_id,task,demo_type,'env.wav')
audio, sr = librosa.load(audio_path)
demo_len = (audio.shape[0])/sr
return demo_len
def get_stats(self):
for demo_id in self.seg_annot:
# What % of a demonstration is not a segment or an error?
# get total demo time from wav file, get err/seg duration from annotated json
demo_time = self.get_demo_time(demo_id)
user_id, task, demo_type = demo_id.split('_')
# utt start and stop times for each annotation
utt_start = self.utt_annot[demo_id]['start']
utt_stop = self.utt_annot[demo_id]['stop']
utt_duration = self.utt_annot[demo_id]['duration']
if task=='box':
self.box_time+=demo_time
if task=='cutting':
self.cutting_time+=demo_time
for k,l,d in zip(utt_start,utt_stop,utt_duration):
assert(d==0 or d==l-k)
if task=='box':
self.box_utt_time+=d
if task=='cutting':
self.cutting_utt_time+=d
# start and stop times for each annotated segment
seg_start = self.seg_annot[demo_id]['start']
seg_stop = self.seg_annot[demo_id]['stop']
segments = self.seg_annot[demo_id]['seg_label']
self.get_precision_labels()
for i,j,s in zip(seg_start,seg_stop,segments):
dur = j-i
if dur>0:
if task=='box':
if s in self.box_precise:
# precision_label = 'precision'
self.box_pr_time+=dur
# During such parts of a demonstration, what % of time are people talking?
# would require to find overlap of both seg annot and utt annot
# how much people talk in precision, non-precision, and no-seg chunks?
for k,l,d in zip(utt_start,utt_stop,utt_duration):
if d>0:
# utt completely inside seg
if k>=i and k<=j and l>=i and l<=j:
self.box_pr_utt_time+=d
# utt stop inside seg
elif k<i and k<j and l>=i and l<=j:
assert(l-i>=0)
self.box_pr_utt_time+=(l-i)
# utt start inside seg
elif k>=i and k<=j and l>i and l>j:
assert(j-k>=0)
self.box_pr_utt_time+=(j-k)
# seg completely inside utt
elif k>=i and k<=j and l>=i and l<=j:
self.box_pr_utt_time+=dur
elif s in self.box_not_precise:
# precision_label = 'non-precision'
self.box_non_pr_time+=dur
# During such parts of a demonstration, what % of time are people talking?
# would require to find overlap of both seg annot and utt annot
# how much people talk in precision, non-precision, and no-seg chunks?
for k,l,d in zip(utt_start,utt_stop,utt_duration):
if d>0:
# utt completely inside seg
if k>=i and k<=j and l>=i and l<=j:
self.box_non_pr_utt_time+=d
# utt stop inside seg
elif k<i and k<j and l>=i and l<=j:
assert(l-i>=0)
self.box_non_pr_utt_time+=(l-i)
# utt start inside seg
elif k>=i and k<=j and l>i and l>j:
assert(j-k>=0)
self.box_non_pr_utt_time+=(j-k)
# seg completely inside utt
elif k>=i and k<=j and l>=i and l<=j:
self.box_non_pr_utt_time+=dur
elif task=='cutting':
if s in self.cutting_precise:
# precision_label = 'precision'
self.cutting_pr_time+=dur
# During such parts of a demonstration, what % of time are people talking?
# would require to find overlap of both seg annot and utt annot
# how much people talk in precision, non-precision, and no-seg chunks?
for k,l,d in zip(utt_start,utt_stop,utt_duration):
if d>0:
# utt completely inside seg
if k>=i and k<=j and l>=i and l<=j:
self.cutting_pr_utt_time+=d
# utt stop inside seg
elif k<i and k<j and l>=i and l<=j:
assert(l-i>=0)
self.cutting_pr_utt_time+=(l-i)
# utt start inside seg
elif k>=i and k<=j and l>i and l>j:
assert(j-k>=0)
self.cutting_pr_utt_time+=(j-k)
# seg completely inside utt
elif k>=i and k<=j and l>=i and l<=j:
self.cutting_pr_utt_time+=dur
elif s in self.cutting_not_precise:
# precision_label = 'non-precision'
self.cutting_non_pr_time+=dur
# During such parts of a demonstration, what % of time are people talking?
# would require to find overlap of both seg annot and utt annot
# how much people talk in precision, non-precision, and no-seg chunks?
for k,l,d in zip(utt_start,utt_stop,utt_duration):
if d>0:
# utt completely inside seg
if k>=i and k<=j and l>=i and l<=j:
self.cutting_non_pr_utt_time+=d
# utt stop inside seg
elif k<i and k<j and l>=i and l<=j:
assert(l-i>=0)
self.cutting_non_pr_utt_time+=(l-i)
# utt start inside seg
elif k>=i and k<=j and l>i and l>j:
assert(j-k>=0)
self.cutting_non_pr_utt_time+=(j-k)
# seg completely inside utt
elif k>=i and k<=j and l>=i and l<=j:
self.cutting_non_pr_utt_time+=dur
# utt during non-seg parts = total utt_time in demo - utt_time during seg
self.box_non_seg_utt_time = self.box_utt_time - self.box_pr_utt_time - self.box_non_pr_utt_time
# utt during non-seg parts = total utt_time in demo - utt_time during seg
self.cutting_non_seg_utt_time = self.cutting_utt_time - self.cutting_pr_utt_time - self.cutting_non_pr_utt_time
def write_csv(self):
self.get_stats()
exp_file = open('seg_stats.csv', mode='w')
writer = csv.writer(exp_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
column_labels = ['Box Precision Seg Time', 'Box Non-Precision Seg Time', 'Box Total Demo Time','',\
'Cutting Precision Seg Time', 'Cutting Non-Precision Seg Time', 'Cutting Total Demo Time']
writer.writerow(column_labels)
column_values = [self.box_pr_time, self.box_non_pr_time, self.box_time, '',\
self.cutting_pr_time, self.cutting_non_pr_time, self.cutting_time]
writer.writerow(column_values)
writer.writerow([])
column_labels = ['Box Total Utterance Time', 'Box Precision Utterance Time', 'Box Non-Precision Utt Time',\
'Box Total Seg Utt Time', 'Box Total Non-Seg Utt Time', '', 'Cutting Total Utterance Time',\
'Cutting Precision Utt Time', 'Cutting Non-Precision Utt Time', 'Cutting Total Seg Utt Time',\
'Cutting Total Non-Seg Utt Time']
writer.writerow(column_labels)
column_values = [self.box_utt_time, self.box_pr_utt_time, self.box_non_pr_utt_time,\
self.box_pr_utt_time+self.box_non_pr_utt_time, self.box_non_seg_utt_time, '',\
self.cutting_utt_time, self.cutting_pr_utt_time, self.cutting_non_pr_utt_time,\
self.cutting_pr_utt_time+self.cutting_non_pr_utt_time, self.cutting_non_seg_utt_time]
writer.writerow(column_values)
exp_file.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--segmentation-annotator',type=str,default='A4')
parser.add_argument('-u', '--utterances-annotator',type=str,default='A2')
args = parser.parse_args()
analysis = SegmentAnalysis(args)
analysis.write_csv()
if __name__ == '__main__':
main() | 12,683 | 3,658 |
class BadRepetitionException(Exception):
"""
exception raised for a bad repetition
"""
def __init__(self, *args):
super(BadRepetitionException, self).__init__(*args)
class NoneRepetitionException(Exception):
"""
exception raised for repetition ended without movements
"""
def __init__(self, *args):
super(NoneRepetitionException, self).__init__(*args)
class CompleteExerciseException(Exception):
"""
exception raised when all the good repetitions of an exercise have been executed
"""
def __init__(self, *args):
super(CompleteExerciseException, self).__init__(*args)
class GoodRepetitionException(Exception):
"""
exception raised for a good repetition
"""
def __init__(self, *args):
super(GoodRepetitionException, self).__init__(*args)
class NotFoundPersonException(Exception):
"""
exception raised when a person is not found during the pose estimation
"""
def __init__(self, *args):
super(NotFoundPersonException, self).__init__(*args)
class FeetException(Exception):
"""
exception raised when feet can't be found in the frame
"""
def __init__(self, *args):
super(FeetException, self).__init__(*args) | 1,255 | 345 |
"""I3D feature extration using a tensorflow model.
Copyright 2018 Mitsubishi Electric Research Labs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import h5py
import numpy as np
import tensorflow as tf
import time
import os
import scipy.io as sio
import skimage.io
from skimage.transform import rescale, resize, downscale_local_mean
from random import randint
import cv2
import i3d
from i3d import Unit3D
import sonnet as snt
import skvideo.io
import pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', default='data/Charades_v1_rgb', type=str,
help='Directory that includes image files')
parser.add_argument('--net_output', default='Mixed_5c',
type=str, help="layer used as output features")
parser.add_argument('--feature_dim', '-f', default=2048, type=int,
help='output feature dimension')
parser.add_argument('--model_path', default='data/i3d_model/data/checkpoints/rgb_imagenet', type=str, help='model path')
parser.add_argument('--stride', default=4, type=int, help='stride of frame features')
parser.add_argument('--output',default='data/Charades/i3d_rgb', type=str,
help='output pickle file of feature vectors')
parser.add_argument('--seq_length', default=16, type=int, help='window size of frame features')
args = parser.parse_args()
_IMAGE_SIZE = 224
_NUM_CLASSES = 400
def train():
print (args.model_path)
model_path = args.model_path
pose_net_path = os.path.join(model_path, 'model.ckpt')
tf.reset_default_graph()
with tf.variable_scope('RGB'):
rgb_input = tf.placeholder(tf.float32, [None, args.seq_length, _IMAGE_SIZE, _IMAGE_SIZE, 3])
rgb_y = tf.placeholder(tf.float32, [None, _NUM_CLASSES])
lr = tf.placeholder("float")
drop_out_prob = tf.placeholder("float")
i3d_model = i3d.InceptionI3d(num_classes=_NUM_CLASSES, final_endpoint='Mixed_5c')
net, end_points = i3d_model(rgb_input, is_training=False, dropout_keep_prob=drop_out_prob)
rgb_variable_map = {}
for variable in tf.global_variables():
if variable.name.split('/')[0] == 'RGB':
rgb_variable_map[variable.name.replace(':0', '')] = variable
tf_config = tf.ConfigProto()
restorer = tf.train.Saver(var_list=rgb_variable_map, reshape=True)
with tf.Session(config=tf_config) as sess:
restorer.restore(sess, pose_net_path)
lr_s = 0.0001
drop_out = 1
save_folder = args.output
root_folder = args.input
num_seq = len(os.listdir(root_folder))
for f1 in os.listdir(root_folder):
seq = os.listdir(os.path.join(root_folder, f1))
f_exit = os.listdir(save_folder)
if f1 not in f_exit:
os.mkdir(os.path.join(save_folder, f1))
else:
if os.listdir(os.path.join(save_folder, f1)) !=[]:
continue
num_frame = len(seq)
if num_frame < args.seq_length:
print("There should be at least",args.seq_length," frames")
num_sample = num_frame//args.stride
features = np.zeros(shape=[num_sample, args.feature_dim])
for i in range(0, num_sample):
Start_f = i*args.stride + 1
input = np.zeros(shape=[1, args.seq_length, _IMAGE_SIZE, _IMAGE_SIZE, 3])
gth_label = np.zeros(shape=[1, _NUM_CLASSES])
for j in range(0, args.seq_length):
pick_f = Start_f + j
if pick_f > num_frame:
pick_f = Start_f
im = cv2.imread(os.path.join(root_folder, f1, (f1 + '-' + ("%06d" % pick_f) + '.jpg')))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = cv2.resize(im, (_IMAGE_SIZE, _IMAGE_SIZE))
im = (im - 128)/128
input[:, j, :, :, :] = im
gth_label[0] = 1
feed_dict = {
rgb_input: input,
rgb_y: gth_label,
lr: lr_s,
drop_out_prob: drop_out
}
logits, net_feature = sess.run([net, end_points], feed_dict)
Mix5c = net_feature[args.net_output]
feature = Mix5c.mean(axis=(2,3))
feature = feature.reshape((1, 2048))
features[i, :] = feature
pickle.dump(features, open(os.path.join(save_folder, f1) + '/feature.pkl', 'wb'), 2)
def main(argv=None):
train()
if __name__ == '__main__':
tf.app.run()
| 4,786 | 1,568 |
import csv
import collections # iterator and counter libraries
with open("../MIT_WT_datafiles/country_names.csv", 'r') as cntry, open("../MIT_WT_datafiles/country_lat_lon_from_google.csv", 'r') as ll, open("../MIT_WT_datafiles/cntry_lat_lon_combined.csv", 'w') as output:
reader = csv.reader(cntry) #,delimiter='\t') #... was a tsv file
llread = csv.reader(ll)
writer = csv.writer(output)
next(reader)
next(llread)
writer.writerow(["id", "id_3char","name","latitude","longitude"])
count = 0
latlon = dict()
for row in llread:
print(row)
latlon[row[3].casefold()]=(row[1],row[2]) # make a dictionary with country name as key - row[3].
# casefold makes all letters lowercase.
for row in reader:
if row[2].casefold() in set(latlon.keys()):
#country_count[row[4]] += 1
writer.writerow([ row[0].casefold(), row[1].casefold(), row[2].casefold(), latlon[row[2].casefold()][0], latlon[row[2].casefold()][1]] )
else:
writer.writerow([ row[0].casefold(), row[1].casefold(), row[2].casefold() ])
| 1,037 | 411 |
import logging
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.security import generate_password_hash
from ermaket.api.database import DBConn
from ermaket.api.models import Models, NamesConverter
from ermaket.api.system.hierarchy import AccessRight, PrebuiltPageType
from ermaket.utils import Singleton
from .hierarchy_manager import HierachyManager
__all__ = ['UserManager']
def get_or_create(session, model, defaults=None, **kwargs):
try:
return session.query(model).filter_by(**kwargs).one()
except NoResultFound:
if defaults is not None:
kwargs.update(defaults)
with session.begin_nested():
instance = model(**kwargs)
session.add(instance)
return instance
class UserManager(metaclass=Singleton):
def __init__(self):
self.models = Models()
self._User = self.models['system']['User']
self._Token = self.models['system']['Token']
self._hierarchy_mgr = HierachyManager(save=False)
DBConn()
@property
def hierarchy(self):
return self._hierarchy_mgr.hierarchy
def check_user(self, login, password, db=None):
User = self._User
with DBConn.ensure_session(db) as db:
user = db.query(User).filter(User.login == login).first()
if not user:
return
if not user.check_password(password):
return
return user
def add_roles(self, db, role_names=None):
Role = self.models['system']['Role']
roles = set(db.query(Role).filter(Role.is_default))
if role_names:
for name in role_names:
roles.add(get_or_create(db, Role, name=name))
return list(roles)
def add_user(self, login, password, db=None, role_names=None):
with DBConn.ensure_session(db) as db:
user = self._User(login=login)
user.set_password(password)
db.add(user)
roles = self.add_roles(db, role_names)
user.roles = roles
db.commit()
return user
def add_register_token(self, name, roles=None, **kwargs):
if roles is None:
roles = []
description = {'purpose': 'registration', 'roles': roles}
return self._add_token(name, description, **kwargs)
def add_reset_password_token(self, name, login, **kwargs):
description = {'purpose': 'password', 'login': login}
return self._add_token(name, description, **kwargs)
def _add_token(
self,
name,
description,
roles=None,
uses=None,
time_limit=None,
db=None
):
with DBConn.ensure_session(db) as db:
token_obj = self._Token(
name=name,
infinite=uses is None,
uses=uses,
time_limit=time_limit,
description=description
)
token_hash = token_obj.get_token()
db.add(token_obj)
db.commit()
return token_hash
def register_user(self, token, login, password, db=None):
with DBConn.ensure_session(db) as db:
token_hash = generate_password_hash(token, method='plain')
token = db.query(self._Token).get(token_hash)
if (
token is None or not token.valid or
not token.description['purpose'] == 'registration'
):
return None
user = self.add_user(
login, password, role_names=token.description['roles'], db=db
)
if user is not None:
token.use()
db.commit()
return user
def reset_password(self, token, login, password, db=None):
with DBConn.ensure_session(db) as db:
token_hash = generate_password_hash(token, method='plain')
token = db.query(self._Token).get(token_hash)
if (
token is None or not token.valid or
not token.description['purpose'] == 'password' or
token.description['login'] != login
):
return False
user = db.query(self._User).get(login)
user.set_password(password)
token.use()
db.commit()
return True
def add_role(self, db=None, **kwargs):
Role = self.models['system']['Role']
with DBConn.ensure_session(db) as db:
role = Role(**kwargs)
db.add(role)
db.commit()
return role
def login_user(self, user, session):
roles = [role.name for role in user.roles]
extracted = self._hierarchy_mgr.hierarchy.extract(roles)
session['hierarchy'] = extracted.to_object()
session['rights'] = extracted.extract_rights(roles)
session['profile_forms'] = self._extract_profile_forms(user)
self._set_sql(user, session, extracted)
session.modified = True
def _extract_profile_forms(self, user):
models = [
self.models[role.linked_entity_schema][NamesConverter.class_name(
role.linked_entity_schema, role.linked_entity_name
)] for role in user.roles if role.linked_entity_name is not None
]
return [
self._hierarchy_mgr.hierarchy.get_table_entry(
model.__table__.schema, model.__tablename__
).formDescription.to_object() for model in models
]
def _set_sql(self, user, session, extracted):
roles = [role.name for role in user.roles]
has_sql = any([role.has_sql_access for role in user.roles])
if not has_sql:
return
sql_page = next(
(
page for page in extracted.prebuiltPages
if page.type == PrebuiltPageType.SQL
), None
)
if sql_page is None:
logging.warning(
f'User {user} is set to have sql access, but has no sql'
'console page in the hirerarchy'
)
else:
if sql_page.accessRights.has(roles, AccessRight.CHANGE):
session['sql_user'] = 'sql'
elif sql_page.accessRights.has(roles, AccessRight.VIEW):
session['sql_user'] = 'view'
| 6,354 | 1,789 |
import dataclasses
@dataclasses.dataclass
class QueryParameter:
name: str
map_to: str
explode: bool
| 114 | 37 |
import typing as t
from abc import ABCMeta, abstractmethod
if t.TYPE_CHECKING:
from ..request import Request
_T_DISABLED_SESSION_ID = t.TypeVar('_T_DISABLED_SESSION_ID', bound='DisableSessionId')
T = t.TypeVar('T')
class LaunchDataStorage(t.Generic[T]):
__metaclass__ = ABCMeta
_request = None # type: t.Optional[Request]
_session_id = None # type: t.Optional[str]
_session_cookie_name = 'session-id' # type: str
_prefix = 'lti1p3-' # type: str
def __init__(self, *args, **kwargs):
# type: (*t.Any, **t.Any) -> None
pass
def set_request(self, request):
# type: (Request) -> None
self._request = request
def get_session_cookie_name(self):
# type: () -> t.Optional[str]
return self._session_cookie_name
def get_session_id(self):
# type: () -> t.Optional[str]
return self._session_id
def set_session_id(self, session_id):
# type: (str) -> None
self._session_id = session_id
def remove_session_id(self):
# type: () -> None
self._session_id = None
def _prepare_key(self, key):
# type: (str) -> str
if self._session_id:
if key.startswith(self._prefix):
key = key[len(self._prefix):]
return self._prefix + self._session_id + '-' + key
else:
if not key.startswith(self._prefix):
key = self._prefix + key
return key
@abstractmethod
def can_set_keys_expiration(self):
# type: () -> bool
raise NotImplementedError
@abstractmethod
def get_value(self, key):
# type: (str) -> T
raise NotImplementedError
@abstractmethod
def set_value(self, key, value, exp=None):
# type: (str, T, t.Optional[int]) -> None
raise NotImplementedError
@abstractmethod
def check_value(self, key):
# type: (str) -> bool
raise NotImplementedError
class DisableSessionId(object):
_session_id = None # type: t.Optional[str]
_launch_data_storage = None # type: t.Optional[LaunchDataStorage]
def __init__(self, launch_data_storage):
# type: (t.Optional[LaunchDataStorage]) -> None
self._launch_data_storage = launch_data_storage
if launch_data_storage:
self._session_id = launch_data_storage.get_session_id()
def __enter__(self):
# type: (_T_DISABLED_SESSION_ID) -> _T_DISABLED_SESSION_ID
if self._launch_data_storage:
self._launch_data_storage.remove_session_id()
return self
def __exit__(self, *args):
# type: (*t.Any) -> None
if self._launch_data_storage and self._session_id:
self._launch_data_storage.set_session_id(self._session_id)
| 2,791 | 891 |
# -*- coding: utf-8 -*-
"""
sphinxcontrib.confluencebuilder.translator.shared
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2016-2018 by the contributors (see AUTHORS file).
:license: BSD, see LICENSE.txt for details.
"""
from __future__ import (absolute_import, print_function, unicode_literals)
from ..std.sphinx import DEFAULT_HIGHLIGHT_STYLE
from docutils import nodes
from sphinx.writers.text import TextTranslator
import sys
# supported confluence list types
class ConflueceListType(object):
BULLET = 1
ENUMERATED = 2
# abstract sphinx translator for sphinxcontrib.confluencebuilder
class ConfluenceTranslator(TextTranslator):
def __init__(self, document, builder):
TextTranslator.__init__(self, document, builder)
if self.builder.config.highlight_language:
self._highlight = self.builder.config.highlight_language
else:
self._highlight = DEFAULT_HIGHLIGHT_STYLE
self._linenothreshold = sys.maxsize
def visit_centered(self, node):
# centered is deprecated; ignore
# http://www.sphinx-doc.org/en/stable/markup/para.html#directive-centered
pass
def depart_centered(self, node):
pass
def visit_highlightlang(self, node):
# update the translator's highlight language from the defined directive
# http://www.sphinx-doc.org/en/stable/markup/code.html#directive-highlight
self._highlight = node.get('lang', DEFAULT_HIGHLIGHT_STYLE)
self._linenothreshold = node.get('linenothreshold', sys.maxsize)
raise nodes.SkipNode
def visit_start_of_file(self, node):
# ignore managing state of inlined documents
pass
def depart_start_of_file(self, node):
pass
def visit_meta(self, node):
# always ignore meta nodes as they are html-specific
# http://docutils.sourceforge.net/docs/ref/rst/directives.html#meta
raise nodes.SkipNode
def unknown_visit(self, node):
raise NotImplementedError('unknown node: ' + node.__class__.__name__)
| 2,096 | 652 |
from collections import UserList
class CaseInsensitiveList(UserList):
"""
Simple list type for storing and comparing strings case-insensitively
"""
def __contains__(self, item: object) -> bool:
if isinstance(item, str):
return any(item.casefold() == x.casefold() for x in self.data)
return item in self.data
def append(self, item: object) -> None:
if isinstance(item, str):
self.data.append(item.casefold())
else:
self.data.append(item)
| 545 | 165 |
#!/usr/bin/env python
import sys
from datetime import datetime, timedelta
from pinnwand.models import Base, engine, session, Paste
def main():
args = sys.argv[1:]
if args:
if args[0] == "init_db":
Base.metadata.create_all(engine)
if args[0] == "add":
paste = Paste("<html>hi</html>", lexer="html", expiry=timedelta(seconds=5))
session.add(paste)
session.commit()
if args[0] == "remove":
paste = session.query(Paste).filter(Paste.id == int(args[1])).first()
session.delete(paste)
session.commit()
if args[0] == "list":
for paste in session.query(Paste).all():
print(paste)
if args[0] == "reap":
pastes = session.query(Paste).filter(Paste.exp_date < datetime.utcnow()).all()
for paste in pastes:
session.delete(paste)
session.commit()
print("Reaped {} expired pastes".format(len(pastes)))
if __name__ == "__main__":
main()
| 1,062 | 329 |
import numpy as np
def lerp(a, b, t):
return a * (1 - t) + b * t
def normalize(vector):
return vector / np.linalg.norm(vector)
def random_unit_vector():
point = np.random.rand(3)
while np.dot(point, point) >= 1:
point = np.random.rand(3)
return normalize(point)
| 296 | 115 |
import os
import sys
from pprint import pprint
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from smartphone_connector import Connector
from examples.server_address import SERVER_ADDRESS
device = Connector(SERVER_ADDRESS, 'FooBar')
device.set_grid([1, 2, 3, 4], base_color='blue')
device.sleep(1)
device.disconnect()
| 357 | 130 |
""" Unit tests for the Exact explainer.
"""
# pylint: disable=missing-function-docstring
import numpy as np
import pandas as pd
import shap
def test_create_and_run():
X = pd.DataFrame({"feature1": np.ones(5), "feature2": np.ones(5)})
class IncreaseFeature1(shap.actions.Action):
""" Sample action.
"""
def __init__(self, amount):
self.amount = amount
self.cost = 5 * amount
def __call__(self, X):
X["feature1"] += self.amount
def __str__(self):
return f"Improve feature1 by {self.amount}."
action = IncreaseFeature1(4)
action.__repr__()
assert not (action < action)
action(X.iloc[0])
assert X["feature1"][0] == 5
| 736 | 240 |
import json
from datetime import datetime
from typing import Optional
import requests
from ..const import CUSTOM_HEADER
def get_dividend(msg: str) -> Optional[float]:
if not msg:
return None
left, right = 0, len(msg) - 1
while not msg[left].isdigit() or not msg[right].isdigit():
if not msg[left].isdigit():
left += 1
if not msg[right].isdigit():
right -= 1
return float(msg[left : right + 1])
def fund_history_data(fund_code: str) -> list:
url = f"http://fund.eastmoney.com/pingzhongdata/{fund_code}.js"
text = requests.get(url, headers=CUSTOM_HEADER).text
text = text[
text.find("Data_netWorthTrend") + 21 : text.find("Data_ACWorthTrend") - 15
]
res_list = []
dividend_sum = 0.0
growth_rate_factor = 1.0
for item in json.loads(text):
dividend = get_dividend(item["unitMoney"])
unit_nv = item["y"]
if dividend is not None:
dividend_sum += dividend
growth_rate_factor *= (unit_nv + dividend) / unit_nv
res_list.append(
{
"日期": datetime.fromtimestamp(item["x"] // 1000).strftime("%Y%m%d"),
"单位净值": unit_nv,
"累计净值": unit_nv + dividend_sum,
"复权净值": unit_nv * growth_rate_factor,
"日涨幅": item["equityReturn"],
"分红送配": dividend,
}
)
return res_list
def fund_history_profit_dict(fund_code: str) -> dict:
fund_history_list = fund_history_data(fund_code)
res_dic = {}
for i in range(1, len(fund_history_list)):
item = fund_history_list[i]
last_item = fund_history_list[i - 1]
res_dic[item["日期"]] = item["复权净值"] / last_item["复权净值"] - 1
return res_dic
| 1,781 | 646 |
from django.conf import settings
from django.test import TestCase
from django.contrib.auth import get_user_model
# Create your tests here.
User = get_user_model()
class RegistrationTestCase(TestCase):
# Setup for our Tests
def setUp(self):
user_a = User(email='abcd@invalid.com')
user_a_pw = 'some_strong_123_pass'
self.user_a_pw = user_a_pw
user_a.set_password(user_a_pw)
user_a.save()
self.user_a = user_a
# Test for user's existence
def test_user_exists(self):
user_count = User.objects.all().count()
self.assertEqual(user_count, 1) # ==
self.assertNotEqual(user_count, 0) # !=
# Test for Signup and redirecting routes
def test_signup_url(self):
register_url = settings.REGISTER_URL
data = {"email": "abcd@invalid.com",
"password": "some_strong_123_pass"}
response = self.client.post(register_url, data, follow=True)
status_code = response.status_code
redirect_path = response.request.get("PATH_INFO")
# checks redirected route
#self.assertEqual(redirect_path, settings.REGISTER_REDIRECT_URL)
self.assertEqual(status_code, 200)
| 1,221 | 401 |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 15:06:49 2019
@author: smcdeid
"""
from .cat import Gdbcat | 110 | 59 |
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
import regex # pip install regex
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(2)
request.addfinalizer(wd.quit)
return wd
def test_tittle_collor_and_font(driver):
driver.get("http://localhost/litecart/")
driver.find_element(By.XPATH, "//a[contains(., 'Campaign Products')]").click()
tested_item_main_page = driver.find_element(By.CSS_SELECTOR, '#campaign-products .col-xs-halfs.col-sm-thirds.col-md-fourths.col-lg-fifths:first-child')
# On the main page.
item_name_on_the_main_page = tested_item_main_page.find_element(By.CSS_SELECTOR, '.info .name').text
regular_price_on_the_main_page = tested_item_main_page.find_element(By.CSS_SELECTOR, '.regular-price')
regular_price_text_on_the_main_page = regular_price_on_the_main_page.text
regular_price_color_on_the_main_page = regular_price_on_the_main_page.value_of_css_property('color')
regular_price_R_color_on_the_main_page = regex.search('(?<=\()\d+', regular_price_color_on_the_main_page).group()
regular_price_G_color_on_the_main_page = regex.search('\(\d+\,\s\K\d+', regular_price_color_on_the_main_page).group()
regular_price_B_color_on_the_main_page = regex.search('\(\d+\,\s\d+\,\s\K\d+', regular_price_color_on_the_main_page).group()
assert regular_price_R_color_on_the_main_page == regular_price_G_color_on_the_main_page == regular_price_B_color_on_the_main_page # Color is gray
regular_price_decoration_on_the_main_page = regex.search('(blink|line-through|overline|underline|none|inherit)',
regular_price_on_the_main_page.value_of_css_property('text-decoration')).group()
assert regular_price_decoration_on_the_main_page == 'line-through' # text-decoration:line-through
regular_price_size_on_the_main_page = regular_price_on_the_main_page.value_of_css_property('font-size')
campaign_price_on_the_main_page = tested_item_main_page.find_element(By.CSS_SELECTOR, '.campaign-price')
campaign_price_text_on_the_main_page = campaign_price_on_the_main_page.text
campaign_price_color_on_the_main_page = campaign_price_on_the_main_page.value_of_css_property('color')
campaign_price_G_color_on_the_main_page = regex.search('\(\d+\,\s\K\d+', campaign_price_color_on_the_main_page).group()
campaign_price_B_color_on_the_main_page = regex.search('\(\d+\,\s\d+\,\s\K\d+', campaign_price_color_on_the_main_page).group()
assert int(campaign_price_G_color_on_the_main_page) == int(campaign_price_B_color_on_the_main_page) == 0 # Color is red
campaign_price_decoration_on_the_main_page = campaign_price_on_the_main_page.value_of_css_property('font-weight')
try:
assert campaign_price_decoration_on_the_main_page == 'bold' # font-weight:bold . Chrome
except AssertionError:
try:
assert campaign_price_decoration_on_the_main_page == '700' # font-weight:700 . FF
except AssertionError:
assert False
campaign_price_size_on_the_main_page = campaign_price_on_the_main_page.value_of_css_property('font-size')
assert regular_price_size_on_the_main_page < campaign_price_size_on_the_main_page # The font size of the campaign price is larger than the regular price
# On the item's page
tested_item_main_page.click()
item_name_on_the_item_page = driver.find_element(By.CSS_SELECTOR, 'h1').text
assert item_name_on_the_main_page == item_name_on_the_item_page # The same item name on the main and on the item pages
regular_price_on_the_item_page = driver.find_element(By.CSS_SELECTOR, '.regular-price')
assert regular_price_text_on_the_main_page == regular_price_on_the_item_page.text # The same regular item's price on the main and on the item pages
regular_price_color_on_the_item_page = regular_price_on_the_item_page.value_of_css_property('color')
regular_price_R_color_on_the_item_page = regex.search('(?<=\()\d+', regular_price_color_on_the_item_page).group()
regular_price_G_color_on_the_item_page = regex.search('\(\d+\,\s\K\d+', regular_price_color_on_the_item_page).group()
regular_price_B_color_on_the_item_page = regex.search('\(\d+\,\s\d+\,\s\K\d+', regular_price_color_on_the_item_page).group()
assert regular_price_R_color_on_the_item_page == regular_price_G_color_on_the_item_page == regular_price_B_color_on_the_item_page # Color is gray
regular_price_decoration_on_the_item_page = regex.search('(blink|line-through|overline|underline|none|inherit)',
regular_price_on_the_item_page.value_of_css_property('text-decoration')).group()
assert regular_price_decoration_on_the_item_page == 'line-through' # text-decoration:line-through
regular_price_size_on_the_item_page = regular_price_on_the_item_page.value_of_css_property('font-size')
campaign_price_on_the_item_page = driver.find_element(By.CSS_SELECTOR, '.campaign-price')
assert campaign_price_text_on_the_main_page == campaign_price_on_the_item_page.text # The same campaign item's price on the main and on the item pages
campaign_price_color_on_the_item_page = campaign_price_on_the_item_page.value_of_css_property('color')
campaign_price_G_color_on_the_item_page = regex.search('\(\d+\,\s\K\d+', campaign_price_color_on_the_item_page).group()
campaign_price_B_color_on_the_item_page = regex.search('\(\d+\,\s\d+\,\s\K\d+', campaign_price_color_on_the_item_page).group()
assert int(campaign_price_G_color_on_the_item_page) == int(campaign_price_B_color_on_the_item_page) == 0 # Color is red
campaign_price_decoration_on_the_item_page = campaign_price_on_the_item_page.value_of_css_property('font-weight')
try:
assert campaign_price_decoration_on_the_item_page == 'bold' # font-weight:bold . Chrome
except AssertionError:
try:
assert campaign_price_decoration_on_the_item_page == '700' # font-weight:700 . FF
except AssertionError:
assert False
campaign_price_size_on_the_item_page = campaign_price_on_the_item_page.value_of_css_property('font-size')
assert regular_price_size_on_the_item_page < campaign_price_size_on_the_item_page # The font size of the campaign price is larger than the regular price
| 6,405 | 2,255 |
from django.apps import AppConfig
class NetAutomationConfig(AppConfig):
name = 'net_automation'
| 102 | 31 |
import urllib
import os
from dacy.download import models_url
from dacy.load import load
def test_urls():
for m, url in models_url.items():
print(m)
req = urllib.request.Request(url, method="HEAD")
f = urllib.request.urlopen(req)
assert f.status == 200
print("\t Status:", f.status)
size = int(f.headers["Content-Length"]) / 1e6
assert size > 20
print("\t File Size:", round(size), "mb")
def test_load():
models = ["da_dacy_medium_tft-0.0.0"]
for m in models:
nlp = load(m)
nlp("Dette er en test tekst")
| 599 | 209 |
# USDA_CoA_Cropland.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
import json
import numpy as np
import pandas as pd
from flowsa.common import *
from flowsa.flowbyfunctions import assign_fips_location_system, sector_disaggregation
def CoA_Cropland_URL_helper(build_url, config, args):
"""This helper function uses the "build_url" input from flowbyactivity.py, which is a base url for coa cropland data
that requires parts of the url text string to be replaced with info specific to the usda nass quickstats API.
This function does not parse the data, only modifies the urls from which data is obtained. """
# initiate url list for coa cropland data
urls = []
# call on state acronyms from common.py (and remove entry for DC)
state_abbrevs = abbrev_us_state
state_abbrevs = {k: v for (k, v) in state_abbrevs.items() if k != "DC"}
# replace "__aggLevel__" in build_url to create three urls
for x in config['agg_levels']:
for y in config['sector_levels']:
# at national level, remove the text string calling for state acronyms
if x == 'NATIONAL':
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("&state_alpha=__stateAlpha__", "")
if y == "ECONOMICS":
url = url.replace(
"AREA HARVESTED&statisticcat_desc=AREA IN PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc=AREA BEARING %26 NON-BEARING",
"AREA&statisticcat_desc=AREA OPERATED")
else:
url = url.replace("&commodity_desc=AG LAND&commodity_desc=FARM OPERATIONS", "")
url = url.replace(" ", "%20")
urls.append(url)
else:
# substitute in state acronyms for state and county url calls
for z in state_abbrevs:
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("__stateAlpha__", z)
if y == "ECONOMICS":
url = url.replace(
"AREA HARVESTED&statisticcat_desc=AREA IN PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc=AREA BEARING %26 NON-BEARING",
"AREA&statisticcat_desc=AREA OPERATED")
else:
url = url.replace("&commodity_desc=AG LAND&commodity_desc=FARM OPERATIONS", "")
url = url.replace(" ", "%20")
urls.append(url)
return urls
def coa_cropland_call(url, coa_response, args):
cropland_json = json.loads(coa_response.text)
df_cropland = pd.DataFrame(data=cropland_json["data"])
return df_cropland
def coa_cropland_parse(dataframe_list, args):
"""Modify the imported data so it meets the flowbyactivity criteria and only includes data on harvested acreage
(irrigated and total). """
df = pd.concat(dataframe_list, sort=False)
# specify desired data based on domain_desc
df = df[~df['domain_desc'].isin(['ECONOMIC CLASS', 'FARM SALES', 'IRRIGATION STATUS', 'CONCENTRATION',
'ORGANIC STATUS', 'NAICS CLASSIFICATION', 'PRODUCERS'])]
df = df[df['statisticcat_desc'].isin(['AREA HARVESTED', 'AREA IN PRODUCTION', 'AREA BEARING & NON-BEARING',
'AREA', 'AREA OPERATED'])]
# drop rows that subset data into farm sizes (ex. 'area harvested: (1,000 to 1,999 acres)
df = df[~df['domaincat_desc'].str.contains(' ACRES')].reset_index(drop=True)
# drop Descriptions that contain certain phrases, as these data are included in other categories
df = df[~df['short_desc'].str.contains('FRESH MARKET|PROCESSING|ENTIRE CROP|NONE OF CROP|PART OF CROP')]
# drop Descriptions that contain certain phrases - only occur in AG LAND data
df = df[~df['short_desc'].str.contains('INSURANCE|OWNED|RENTED|FAILED|FALLOW|IDLE')].reset_index(drop=True)
# Many crops are listed as their own commodities as well as grouped within a broader category (for example, orange
# trees are also part of orchards). As this dta is not needed, takes up space, and can lead to double counting if
# included, want to drop these unused columns
# subset dataframe into the 5 crop types and land in farms and drop rows
# crop totals: drop all data
# field crops: don't want certain commodities and don't want detailed types of wheat, cotton, or sunflower
df_fc = df[df['group_desc'] == 'FIELD CROPS']
df_fc = df_fc[~df_fc['commodity_desc'].isin(['GRASSES', 'GRASSES & LEGUMES, OTHER', 'LEGUMES', 'HAY', 'HAYLAGE'])]
df_fc = df_fc[~df_fc['class_desc'].str.contains('SPRING|WINTER|TRADITIONAL|OIL|PIMA|UPLAND', regex=True)]
# fruit and tree nuts: only want a few commodities
df_ftn = df[df['group_desc'] == 'FRUIT & TREE NUTS']
df_ftn = df_ftn[df_ftn['commodity_desc'].isin(['BERRY TOTALS', 'ORCHARDS'])]
df_ftn = df_ftn[df_ftn['class_desc'].isin(['ALL CLASSES'])]
# horticulture: only want a few commodities
df_h = df[df['group_desc'] == 'HORTICULTURE']
df_h = df_h[df_h['commodity_desc'].isin(['CUT CHRISTMAS TREES', 'SHORT TERM WOODY CROPS'])]
# vegetables: only want a few commodities
df_v = df[df['group_desc'] == 'VEGETABLES']
df_v = df_v[df_v['commodity_desc'].isin(['VEGETABLE TOTALS'])]
# only want ag land and farm operations in farms & land & assets
df_fla = df[df['group_desc'] == 'FARMS & LAND & ASSETS']
df_fla = df_fla[df_fla['short_desc'].str.contains("AG LAND|FARM OPERATIONS")]
# drop the irrigated acreage in farms (want the irrigated harvested acres)
df_fla = df_fla[((df_fla['domaincat_desc'] == 'AREA CROPLAND, HARVESTED:(ANY)') &
(df_fla['domain_desc'] == 'AREA CROPLAND, HARVESTED ') &
(df_fla['short_desc'] == 'AG LAND, IRRIGATED - ACRES'))]
# concat data frames
df = pd.concat([df_fc, df_ftn, df_h, df_v, df_fla], sort=False).reset_index(drop=True)
# drop unused columns
df = df.drop(columns=['agg_level_desc', 'location_desc', 'state_alpha', 'sector_desc',
'country_code', 'begin_code', 'watershed_code', 'reference_period_desc',
'asd_desc', 'county_name', 'source_desc', 'congr_district_code', 'asd_code',
'week_ending', 'freq_desc', 'load_time', 'zip_5', 'watershed_desc', 'region_desc',
'state_ansi', 'state_name', 'country_name', 'county_ansi', 'end_code', 'group_desc'])
# create FIPS column by combining existing columns
df.loc[df['county_code'] == '', 'county_code'] = '000' # add county fips when missing
df['Location'] = df['state_fips_code'] + df['county_code']
df.loc[df['Location'] == '99000', 'Location'] = US_FIPS # modify national level fips
# address non-NAICS classification data
# use info from other columns to determine flow name
df.loc[:, 'FlowName'] = df['statisticcat_desc'] + ', ' + df['prodn_practice_desc']
df.loc[:, 'FlowName'] = df['FlowName'].str.replace(", ALL PRODUCTION PRACTICES", "", regex=True)
df.loc[:, 'FlowName'] = df['FlowName'].str.replace(", IN THE OPEN", "", regex=True)
# combine column information to create activity information, and create two new columns for activities
df['Activity'] = df['commodity_desc'] + ', ' + df['class_desc'] + ', ' + df['util_practice_desc'] # drop this column later
df['Activity'] = df['Activity'].str.replace(", ALL CLASSES", "", regex=True) # not interested in all data from class_desc
df['Activity'] = df['Activity'].str.replace(", ALL UTILIZATION PRACTICES", "", regex=True) # not interested in all data from class_desc
df['ActivityProducedBy'] = np.where(df["unit_desc"] == 'OPERATIONS', df["Activity"], None)
df['ActivityConsumedBy'] = np.where(df["unit_desc"] == 'ACRES', df["Activity"], None)
# rename columns to match flowbyactivity format
df = df.rename(columns={"Value": "FlowAmount", "unit_desc": "Unit",
"year": "Year", "CV (%)": "Spread",
"short_desc": "Description"})
# drop remaining unused columns
df = df.drop(columns=['Activity', 'class_desc', 'commodity_desc', 'domain_desc', 'state_fips_code', 'county_code',
'statisticcat_desc', 'prodn_practice_desc', 'domaincat_desc', 'util_practice_desc'])
# modify contents of units column
df.loc[df['Unit'] == 'OPERATIONS', 'Unit'] = 'p'
# modify contents of flowamount column, "D" is supressed data, "z" means less than half the unit is shown
df['FlowAmount'] = df['FlowAmount'].str.strip() # trim whitespace
df.loc[df['FlowAmount'] == "(D)", 'FlowAmount'] = withdrawn_keyword
df.loc[df['FlowAmount'] == "(Z)", 'FlowAmount'] = withdrawn_keyword
df['FlowAmount'] = df['FlowAmount'].str.replace(",", "", regex=True)
# USDA CoA 2017 states that (H) means CV >= 99.95, therefore replacing with 99.95 so can convert column to int
# (L) is a CV of <= 0.05
df['Spread'] = df['Spread'].str.strip() # trim whitespace
df.loc[df['Spread'] == "(H)", 'Spread'] = 99.95
df.loc[df['Spread'] == "(L)", 'Spread'] = 0.05
df.loc[df['Spread'] == "", 'Spread'] = None # for instances where data is missing
df.loc[df['Spread'] == "(D)", 'Spread'] = withdrawn_keyword
# add location system based on year of data
df = assign_fips_location_system(df, args['year'])
# Add hardcoded data
df['Class'] = np.where(df["Unit"] == 'ACRES', "Land", "Other")
df['SourceName'] = "USDA_CoA_Cropland"
df['MeasureofSpread'] = "RSD"
df['DataReliability'] = None
df['DataCollection'] = 2
return df
def coa_irrigated_cropland_fba_cleanup(fba):
"""
When using irrigated cropland, aggregate sectors to cropland and total ag land. Doing this because published values
for irrigated harvested cropland do not include the water use for vegetables, woody crops, berries.
:param fba:
:return:
"""
fba = fba[~fba['ActivityConsumedBy'].isin(['AG LAND', 'AG LAND, CROPLAND, HARVESTED'])]
return fba
def disaggregate_coa_cropland_to_6_digit_naics(fba_w_sector, attr):
"""
Disaggregate usda coa cropland to naics 6
:param fba_w_sector:
:param attr:
:return:
"""
# use ratios of usda 'land in farms' to determine animal use of pasturelands at 6 digit naics
fba_w_sector = disaggregate_pastureland(fba_w_sector, attr)
# use ratios of usda 'harvested cropland' to determine missing 6 digit naics
fba_w_sector = disaggregate_cropland(fba_w_sector, attr)
return fba_w_sector
def disaggregate_pastureland(fba_w_sector, attr):
"""
The USDA CoA Cropland irrigated pastureland data only links to the 3 digit NAICS '112'. This function uses state
level CoA 'Land in Farms' to allocate the county level acreage data to 6 digit NAICS.
:param fba_w_sector: The CoA Cropland dataframe after linked to sectors
:return: The CoA cropland dataframe with disaggregated pastureland data
"""
import flowsa
from flowsa.flowbyfunctions import allocate_by_sector, clean_df, flow_by_activity_fields, \
fba_fill_na_dict
# subset the coa data so only pastureland
p = fba_w_sector.loc[fba_w_sector['Sector'] == '112']
# add temp loc column for state fips
p.loc[:, 'Location_tmp'] = p['Location'].apply(lambda x: str(x[0:2]))
# load usda coa cropland naics
df_f = flowsa.getFlowByActivity(flowclass=['Land'],
years=[attr['allocation_source_year']],
datasource='USDA_CoA_Cropland_NAICS')
df_f = clean_df(df_f, flow_by_activity_fields, fba_fill_na_dict)
# subset to land in farms data
df_f = df_f[df_f['FlowName'] == 'FARM OPERATIONS']
# subset to rows related to pastureland
df_f = df_f.loc[df_f['ActivityConsumedBy'].apply(lambda x: str(x[0:3])) == '112']
# drop rows with "&'
df_f = df_f[~df_f['ActivityConsumedBy'].str.contains('&')]
# create sector column
df_f.loc[:, 'Sector'] = df_f['ActivityConsumedBy']
# create proportional ratios
df_f = allocate_by_sector(df_f, 'proportional')
# drop naics = '11
df_f = df_f[df_f['Sector'] != '11']
# drop 000 in location
df_f.loc[:, 'Location'] = df_f['Location'].apply(lambda x: str(x[0:2]))
# merge the coa pastureland data with land in farm data
df = p.merge(df_f[['Sector', 'Location', 'FlowAmountRatio']], how='left',
left_on="Location_tmp", right_on="Location")
# multiply the flowamount by the flowratio
df.loc[:, 'FlowAmount'] = df['FlowAmount'] * df['FlowAmountRatio']
# drop columns and rename
df = df.drop(columns=['Location_tmp', 'Sector_x', 'Location_y', 'FlowAmountRatio'])
df = df.rename(columns={"Sector_y": "Sector",
"Location_x": 'Location'})
# drop rows where sector = 112 and then concat with original fba_w_sector
fba_w_sector = fba_w_sector[fba_w_sector['Sector'].apply(lambda x: str(x[0:3])) != '112'].reset_index(drop=True)
fba_w_sector = pd.concat([fba_w_sector, df], sort=False).reset_index(drop=True)
return fba_w_sector
def disaggregate_cropland(fba_w_sector, attr):
"""
In the event there are 4 (or 5) digit naics for cropland at the county level, use state level harvested cropland to
create ratios
:param fba_w_sector:
:param attr:
:return:
"""
import flowsa
from flowsa.flowbyfunctions import generalize_activity_field_names, sector_aggregation,\
fbs_default_grouping_fields, clean_df, fba_fill_na_dict, add_missing_flow_by_fields
from flowsa.mapping import add_sectors_to_flowbyactivity
# drop pastureland data
crop = fba_w_sector.loc[fba_w_sector['Sector'].apply(lambda x: str(x[0:3])) != '112'].reset_index(drop=True)
# drop sectors < 4 digits
crop = crop[crop['Sector'].apply(lambda x: len(x) > 3)].reset_index(drop=True)
# create tmp location
crop.loc[:, 'Location_tmp'] = crop['Location'].apply(lambda x: str(x[0:2]))
# load the relevant state level harvested cropland by naics
naics_load = flowsa.getFlowByActivity(flowclass=['Land'],
years=[attr['allocation_source_year']],
datasource="USDA_CoA_Cropland_NAICS").reset_index(drop=True)
# clean df
naics = clean_df(naics_load, flow_by_activity_fields, fba_fill_na_dict)
# subset the harvested cropland by naics
naics = naics[naics['FlowName'] == 'AG LAND, CROPLAND, HARVESTED'].reset_index(drop=True)
# add sectors
naics = add_sectors_to_flowbyactivity(naics, sectorsourcename='NAICS_2012_Code', levelofSectoragg='agg')
# add missing fbs fields
naics = add_missing_flow_by_fields(naics, flow_by_sector_fields)
# aggregate sectors to create any missing naics levels
naics = sector_aggregation(naics, fbs_default_grouping_fields)
# add missing naics5/6 when only one naics5/6 associated with a naics4
naics = sector_disaggregation(naics)
# drop rows where sector consumed by is none and FlowAmount 0
naics = naics[naics['SectorConsumedBy'].notnull()]
naics = naics.loc[naics['FlowAmount'] != 0]
# create ratios
naics = sector_ratios(naics)
# drop sectors < 4 digits
#naics = naics[naics['SectorConsumedBy'].apply(lambda x: len(x) > 3)].reset_index(drop=True)
# create temporary sector column to match the two dfs on
naics.loc[:, 'Location_tmp'] = naics['Location'].apply(lambda x: str(x[0:2]))
# for loop through naics lengths to determine naics 4 and 5 digits to disaggregate
for i in range(4, 6):
# subset df to sectors with length = i and length = i + 1
crop_subset = crop.loc[crop['Sector'].apply(lambda x: i+1 >= len(x) >= i)]
crop_subset.loc[:, 'Sector_tmp'] = crop_subset['Sector'].apply(lambda x: x[0:i])
# if duplicates drop all rows
df = crop_subset.drop_duplicates(subset=['Location', 'Sector_tmp'], keep=False).reset_index(drop=True)
# drop sector temp column
df = df.drop(columns=["Sector_tmp"])
# subset df to keep the sectors of length i
df_subset = df.loc[df['Sector'].apply(lambda x: len(x) == i)]
# subset the naics df where naics length is i + 1
naics_subset = naics.loc[naics['SectorConsumedBy'].apply(lambda x: len(x) == i+1)].reset_index(drop=True)
naics_subset.loc[:, 'Sector_tmp'] = naics_subset['SectorConsumedBy'].apply(lambda x: x[0:i])
# merge the two df based on locations
df_subset = pd.merge(df_subset, naics_subset[['SectorConsumedBy', 'FlowAmountRatio', 'Sector_tmp', 'Location_tmp']],
how='left', left_on=['Sector', 'Location_tmp'], right_on=['Sector_tmp', 'Location_tmp'])
# create flow amounts for the new NAICS based on the flow ratio
df_subset.loc[:, 'FlowAmount'] = df_subset['FlowAmount'] * df_subset['FlowAmountRatio']
# drop rows of 0 and na
df_subset = df_subset[df_subset['FlowAmount'] != 0]
df_subset = df_subset[~df_subset['FlowAmount'].isna()].reset_index(drop=True)
# drop columns
df_subset = df_subset.drop(columns=['Sector', 'FlowAmountRatio', 'Sector_tmp'])
# rename columns
df_subset = df_subset.rename(columns={"SectorConsumedBy": "Sector"})
# add new rows of data to crop df
crop = pd.concat([crop, df_subset], sort=True).reset_index(drop=True)
# clean up df
crop = crop.drop(columns=['Location_tmp'])
# pasture data
pasture = fba_w_sector.loc[fba_w_sector['Sector'].apply(lambda x: str(x[0:3])) == '112'].reset_index(drop=True)
# concat crop and pasture
fba_w_sector = pd.concat([pasture, crop], sort=True).reset_index(drop=True)
return fba_w_sector
def sector_ratios(df):
# find the longest length sector
length = max(df['SectorConsumedBy'].apply(lambda x: len(x)).unique())
# for loop in reverse order longest length naics minus 1 to 2
# appends missing naics levels to df
sector_ratios = []
for i in range(length, 3, -1):
# subset df to sectors with length = i and length = i + 1
df_subset = df.loc[df['SectorConsumedBy'].apply(lambda x: len(x) == i)]
# create column for sector grouping
df_subset.loc[:, 'Sector_group'] = df_subset['SectorConsumedBy'].apply(lambda x: x[0:i-1])
# subset df to create denominator
df_denom = df_subset[['FlowAmount', 'Location', 'Sector_group']]
df_denom = df_denom.groupby(['Location', 'Sector_group'], as_index=False)[["FlowAmount"]].agg("sum")
df_denom = df_denom.rename(columns={"FlowAmount": "Denominator"})
# merge the denominator column with fba_w_sector df
ratio_df = df_subset.merge(df_denom, how='left')
# calculate ratio
ratio_df.loc[:, 'FlowAmountRatio'] = ratio_df['FlowAmount'] / ratio_df['Denominator']
ratio_df = ratio_df.drop(columns=['Denominator', 'Sector_group']).reset_index()
sector_ratios.append(ratio_df)
# concat list of dataframes (info on each page)
df_w_ratios = pd.concat(sector_ratios, sort=True).reset_index(drop=True)
return df_w_ratios
| 19,503 | 6,709 |
# Regular Expressions (RegEx) are used to
# 1. Search for a specific string in a large amount of data
# 2. Verify that a string has the proper format (Email, Phone #)
# 3. Find a string and replace it with another string
# 4. Format data into the proper form for importing for example
import re
if re.search("ape", "The ape was at the apex"):
print("There is an ape")
allApes = re.findall("ape.", "The ape was at the apex")
for i in allApes:
print(i)
theStr = "The ape was at the apex"
for i in re.finditer("ape.", theStr):
locTuple = i.span()
print(locTuple)
print(theStr[locTuple[0]:locTuple[1]])
print()
animalStr = "Cat rat mat pat"
allAnimals = re.findall("[Ccrmfp]at", animalStr)
for i in allAnimals:
print(i)
print()
# Find anything that starts with letters between C-M and c-m, ending in at, in animalStr
someAnimals = re.findall("[c-mC-M]at", animalStr)
for i in someAnimals:
print(i)
print()
anotherAnimals = re.findall("[^Cr]at", animalStr)
for i in anotherAnimals:
print(i) | 1,032 | 377 |
def kafka(msg):
print(msg) | 29 | 16 |
#!/usr/bin/env python
# coding: utf-8
# # Basic Examples with Different Protocols
#
# ## Prerequisites
#
# * A kubernetes cluster with kubectl configured
# * curl
# * grpcurl
# * pygmentize
#
# ## Examples
#
# * [Seldon Protocol](#Seldon-Protocol-Model)
# * [Tensorflow Protocol](#Tensorflow-Protocol-Model)
# * [KFServing V2 Protocol](#KFServing-V2-Protocol-Model)
#
#
# ## Setup Seldon Core
#
# Use the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html) to setup Seldon Core with an ingress - either Ambassador or Istio.
#
# Then port-forward to that ingress on localhost:8003 in a separate terminal either with:
#
# * Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`
# * Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:8080`
# In[1]:
get_ipython().system('kubectl create namespace seldon')
# In[2]:
get_ipython().system('kubectl config set-context $(kubectl config current-context) --namespace=seldon')
# In[3]:
import json
import time
# In[4]:
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def writetemplate(line, cell):
with open(line, 'w') as f:
f.write(cell.format(**globals()))
# In[5]:
VERSION = get_ipython().getoutput('cat ../version.txt')
VERSION=VERSION[0]
VERSION
# ## Seldon Protocol Model
# We will deploy a REST model that uses the SELDON Protocol namely by specifying the attribute `protocol: seldon`
# In[17]:
get_ipython().run_cell_magic('writetemplate', 'resources/model_seldon.yaml', 'apiVersion: machinelearning.seldon.io/v1\nkind: SeldonDeployment\nmetadata:\n name: example-seldon\nspec:\n protocol: seldon\n predictors:\n - componentSpecs:\n - spec:\n containers:\n - image: seldonio/mock_classifier:{VERSION}\n name: classifier\n graph:\n name: classifier\n type: MODEL\n name: model\n replicas: 1')
# In[18]:
get_ipython().system('kubectl apply -f resources/model_seldon.yaml')
# In[20]:
get_ipython().system('kubectl wait --for condition=ready --timeout=300s sdep --all -n seldon')
# In[21]:
X = get_ipython().getoutput('curl -s -d \'{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}\' -X POST http://localhost:8003/seldon/seldon/example-seldon/api/v1.0/predictions -H "Content-Type: application/json"')
d=json.loads(X[0])
print(d)
assert(d["data"]["ndarray"][0][0] > 0.4)
# In[22]:
X = get_ipython().getoutput('cd ../executor/proto && grpcurl -d \'{"data":{"ndarray":[[1.0,2.0,5.0]]}}\' -rpc-header seldon:example-seldon -rpc-header namespace:seldon -plaintext -proto ./prediction.proto 0.0.0.0:8003 seldon.protos.Seldon/Predict')
d=json.loads("".join(X))
print(d)
assert(d["data"]["ndarray"][0][0] > 0.4)
# In[23]:
get_ipython().system('kubectl delete -f resources/model_seldon.yaml')
# ## Tensorflow Protocol Model
# We will deploy a model that uses the TENSORLFOW Protocol namely by specifying the attribute `protocol: tensorflow`
# In[25]:
get_ipython().run_cell_magic('writefile', 'resources/model_tfserving.yaml', 'apiVersion: machinelearning.seldon.io/v1\nkind: SeldonDeployment\nmetadata:\n name: example-tfserving\nspec:\n protocol: tensorflow\n predictors:\n - componentSpecs:\n - spec:\n containers:\n - args: \n - --port=8500\n - --rest_api_port=8501\n - --model_name=halfplustwo\n - --model_base_path=gs://seldon-models/tfserving/half_plus_two\n image: tensorflow/serving\n name: halfplustwo\n ports:\n - containerPort: 8501\n name: http\n protocol: TCP\n - containerPort: 8500\n name: grpc\n protocol: TCP\n graph:\n name: halfplustwo\n type: MODEL\n endpoint:\n httpPort: 8501\n grpcPort: 8500\n name: model\n replicas: 1')
# In[26]:
get_ipython().system('kubectl apply -f resources/model_tfserving.yaml')
# In[27]:
get_ipython().system('kubectl wait --for condition=ready --timeout=300s sdep --all -n seldon')
# In[28]:
X = get_ipython().getoutput('curl -s -d \'{"instances": [1.0, 2.0, 5.0]}\' -X POST http://localhost:8003/seldon/seldon/example-tfserving/v1/models/halfplustwo/:predict -H "Content-Type: application/json"')
d=json.loads("".join(X))
print(d)
assert(d["predictions"][0] == 2.5)
# In[29]:
X = get_ipython().getoutput('cd ../executor/proto && grpcurl -d \'{"model_spec":{"name":"halfplustwo"},"inputs":{"x":{"dtype": 1, "tensor_shape": {"dim":[{"size": 3}]}, "floatVal" : [1.0, 2.0, 3.0]}}}\' -rpc-header seldon:example-tfserving -rpc-header namespace:seldon -plaintext -proto ./prediction_service.proto 0.0.0.0:8003 tensorflow.serving.PredictionService/Predict')
d=json.loads("".join(X))
print(d)
assert(d["outputs"]["x"]["floatVal"][0] == 2.5)
# In[30]:
get_ipython().system('kubectl delete -f resources/model_tfserving.yaml')
# ## KFServing V2 Protocol Model
# We will deploy a REST model that uses the KFServing V2 Protocol namely by specifying the attribute `protocol: kfserving`
# In[31]:
get_ipython().run_cell_magic('writefile', 'resources/model_v2.yaml', 'apiVersion: machinelearning.seldon.io/v1alpha2\nkind: SeldonDeployment\nmetadata:\n name: triton\nspec:\n protocol: kfserving\n predictors:\n - graph:\n children: []\n implementation: TRITON_SERVER\n modelUri: gs://seldon-models/trtis/simple-model\n name: simple\n name: simple\n replicas: 1')
# In[32]:
get_ipython().system('kubectl apply -f resources/model_v2.yaml')
# In[33]:
get_ipython().system('kubectl wait --for condition=ready --timeout=300s sdep --all -n seldon')
# In[34]:
X = get_ipython().getoutput('curl -s -d \'{"inputs":[{"name":"INPUT0","data":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],"datatype":"INT32","shape":[1,16]},{"name":"INPUT1","data":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],"datatype":"INT32","shape":[1,16]}]}\' -X POST http://0.0.0.0:8003/seldon/seldon/triton/v2/models/simple/infer -H "Content-Type: application/json"')
d=json.loads(X[0])
print(d)
assert(d["outputs"][0]["data"][0]==2)
# In[35]:
X = get_ipython().getoutput('cd ../executor/api/grpc/kfserving/inference && grpcurl -d \'{"model_name":"simple","inputs":[{"name":"INPUT0","contents":{"int_contents":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]},"datatype":"INT32","shape":[1,16]},{"name":"INPUT1","contents":{"int_contents":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]},"datatype":"INT32","shape":[1,16]}]}\' -plaintext -proto ./grpc_service.proto -rpc-header seldon:triton -rpc-header namespace:seldon 0.0.0.0:8003 inference.GRPCInferenceService/ModelInfer')
X="".join(X)
print(X)
# In[36]:
get_ipython().system('kubectl delete -f resources/model_v2.yaml')
# ## Tensorflow Protocol Multi-Model
# We will deploy two models that uses the TENSORLFOW Protocol namely by specifying the attribute `protocol: tensorflow`
#
# * The demo half_plus_two model
# * A ResNet32 CIFAR10 image classification model
# In[37]:
get_ipython().run_cell_magic('writefile', 'resources/model_tfserving.yaml', 'apiVersion: machinelearning.seldon.io/v1\nkind: SeldonDeployment\nmetadata:\n name: example-tfserving\nspec:\n protocol: tensorflow\n predictors:\n - componentSpecs:\n - spec:\n containers:\n - args: \n - --port=8500\n - --rest_api_port=8501\n - --model_config_file=/mnt/models/models.config\n image: tensorflow/serving\n name: multi\n ports:\n - containerPort: 8501\n name: http\n protocol: TCP\n - containerPort: 8500\n name: grpc\n protocol: TCP\n graph:\n name: multi\n type: MODEL\n implementation: TENSORFLOW_SERVER\n modelUri: gs://seldon-models/tfserving/multi-model\n endpoint:\n httpPort: 8501\n grpcPort: 8500\n name: model\n replicas: 1')
# In[38]:
get_ipython().system('kubectl apply -f resources/model_tfserving.yaml')
# In[39]:
get_ipython().system('kubectl wait --for condition=ready --timeout=300s sdep --all -n seldon')
# In[40]:
X = get_ipython().getoutput('curl -s -d \'{"instances": [1.0, 2.0, 5.0]}\' -X POST http://localhost:8003/seldon/seldon/example-tfserving/v1/models/half_plus_two/:predict -H "Content-Type: application/json"')
d=json.loads("".join(X))
print(d)
assert(d["predictions"][0] == 2.5)
# In[41]:
X = get_ipython().getoutput('cd ../executor/proto && grpcurl -d \'{"model_spec":{"name":"half_plus_two"},"inputs":{"x":{"dtype": 1, "tensor_shape": {"dim":[{"size": 3}]}, "floatVal" : [1.0, 2.0, 3.0]}}}\' -rpc-header seldon:example-tfserving -rpc-header namespace:seldon -plaintext -proto ./prediction_service.proto 0.0.0.0:8003 tensorflow.serving.PredictionService/Predict')
d=json.loads("".join(X))
print(d)
assert(d["outputs"]["x"]["floatVal"][0] == 2.5)
# In[42]:
import tensorflow as tf
import matplotlib.pyplot as plt
import os
train, test = tf.keras.datasets.cifar10.load_data()
X_test, y_test = test
X_test = X_test.astype('float32') / 255
print(X_test.shape, y_test.shape)
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
# In[43]:
from subprocess import run, Popen, PIPE
import json
import numpy as np
idx=1
test_example=X_test[idx:idx+1].tolist()
payload='{"instances":'+f"{test_example}"+'}'
cmd=f"""curl -s -d '{payload}' -X POST http://localhost:8003/seldon/seldon/example-tfserving/v1/models/cifar10/:predict -H "Content-Type: application/json" """
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
res=json.loads(raw)
arr=np.array(res["predictions"][0])
X = X_test[idx].reshape(1, 32, 32, 3)
plt.imshow(X.reshape(32, 32, 3))
plt.axis('off')
plt.show()
print("class:",class_names[y_test[idx][0]])
print("prediction:",class_names[arr.argmax()])
# In[44]:
get_ipython().system('kubectl delete -f resources/model_tfserving.yaml')
# In[ ]:
| 10,347 | 4,180 |
import torch
from torch import nn
from tabnet_lightning import TabNetClassifier
class IndexEmbTabNetClassifier(TabNetClassifier):
"""test model implementation using index based embeddings"""
def __init__(self, **kwargs):
super(IndexEmbTabNetClassifier, self).__init__(**kwargs)
self.index_embeddings = nn.Embedding(num_embeddings=kwargs["input_size"], embedding_dim=1)
def embeddings(self, inputs: torch.Tensor) -> torch.Tensor:
indices = torch.nonzero(inputs, as_tuple=True) # gets the indices which are active
values = self.index_embeddings(indices[-1]).squeeze()
output = torch.index_put_(inputs, indices, values)
return output
#
# # test
# if __name__ == "__main__":
# inputs = torch.Tensor([
# [0, 0, 1, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 1],
# ])
# e = nn.Embedding(num_embeddings=8, embedding_dim=1)
#
# indices = torch.nonzero(inputs, as_tuple=True)
#
# emb = e(indices[-1]).squeeze()
#
# # indices[..., -1] = emb
#
# inputs = torch.index_put_(inputs, indices, emb)
| 1,129 | 439 |
import ballenv_pygame as BE
#from flat_game import ballgamepyg as BE
import numpy as np
from itertools import count
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
import datetime
from matplotlib import pyplot as plt
import os
env = BE.createBoard(display= True, static_obstacles= 3 , static_obstacle_radius= 20)
AGENT_RAD = 10
gamma = .99
log_interval = 1000
render = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
plt.ion()
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class HistoryBuffer():
def __init__(self,bufferSize = 10):
self.bufferSize = bufferSize
self.buffer = []
#add a state to the history buffer
#each state is assumed to be of shape ( 1 x S )
def addState(self , state):
if len(self.buffer) >= self.bufferSize:
del self.buffer[0] #remove the oldest state
self.buffer.append(state.cpu().numpy())
#returns the 10 states in the buffer in the form of a torch tensor in the order in which they
#were encountered
def getHistory(self):
arrSize = self.buffer[0].shape[1]
#print 'ArraySize',arrSize
arrayHist = np.asarray(self.buffer)
arrayHist = np.reshape(arrayHist , (1,arrSize*self.bufferSize))
state = torch.from_numpy(arrayHist).to(device)
state = state.type(torch.cuda.FloatTensor)
#state = state.unsqueeze(0)
return state
def block_to_arrpos(window_size,x,y):
a = (window_size**2-1)/2
b = window_size
pos = a+(b*y)+x
return int(pos)
def get_state_BallEnv(state):
#state is a list of info where 1st position holds the position of the
#agent, 2nd the position of the goal , 3rd the distance after that,
#the positions of the obstacles in the world
#print(state)
window_size = 5
block_width = 2
window_rows = window_size
row_start = (window_rows-1)/2
window_cols = window_size
col_start = (window_cols-1)/2
ref_state = np.zeros(4+window_size**2)
#print(ref_state.shape)
a = (window_size**2-1)/2
ref_state[a+4] = 1
agent_pos = state[0]
goal_pos = state[1]
diff_x = goal_pos[0] - agent_pos[0]
diff_y = goal_pos[1] - agent_pos[1]
if diff_x >= 0 and diff_y >= 0:
ref_state[1] = 1
elif diff_x < 0 and diff_y >= 0:
ref_state[0] = 1
elif diff_x < 0 and diff_y < 0:
ref_state[3] = 1
else:
ref_state[2] = 1
for i in range(3,len(state)):
#as of now this just measures the distance from the center of the obstacle
#this distance has to be measured from the circumferance of the obstacle
#new method, simulate overlap for each of the neighbouring places
#for each of the obstacles
obs_pos = state[i][0:2]
obs_rad = state[i][2]
for r in range(-row_start,row_start+1,1):
for c in range(-col_start,col_start+1,1):
#c = x and r = y
temp_pos = (agent_pos[0] + c*block_width , agent_pos[1] + r*block_width)
if checkOverlap(temp_pos,AGENT_RAD, obs_pos, obs_rad):
pos = block_to_arrpos(window_size,r,c)
ref_state[pos]=1
#state is as follows:
#first - tuple agent position
#second -
state = torch.from_numpy(ref_state).to(device)
state = state.type(torch.cuda.FloatTensor)
state = state.unsqueeze(0)
return state
#returns true if there is an overlap
def checkOverlap(obj1Pos,obj1rad, obj2Pos, obj2rad):
xdiff = obj1Pos[0]-obj2Pos[0]
ydiff = obj1Pos[1]-obj2Pos[1]
if (np.hypot(xdiff,ydiff)-obj1rad-obj2rad) > 0:
return False
else:
return True
def agent_action_to_WorldActionSimplified(action):
if action==0: #move front
return np.asarray([0,-5])
if action==1: #move right
return np.asarray([5,0])
if action==2: #move down
return np.asarray([0,5])
if action==3: #move left
return np.asarray([-5,0])
class Policy(nn.Module):
def __init__(self, inputSize = 5 , outputSize = 9 , hidden = 128):
super(Policy, self).__init__()
self.fc1 = nn.Linear(inputSize, hidden)
#self.affine1 = nn.Linear(inputSize,512)
#self.affine2 = nn.Linear(512, 128)
self.action_head = nn.Linear(hidden, outputSize)
self.value_head = nn.Linear(hidden, 1)
self.saved_actions = []
self.rewards = []
def forward(self, x):
x = F.relu(self.fc1(x))
#x = F.relu(self.affine2(x))
action_scores = self.action_head(x)
state_values = self.value_head(x)
return F.softmax(action_scores, dim=-1), state_values
historySize = 10
inpsize = 29 * historySize
policy = Policy(inputSize=inpsize , hidden= 1024)
policy.cuda()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
eps = np.finfo(np.float32).eps.item()
def select_action(state,policy):
#state = torch.from_numpy(state).float().unsqueeze(0)
'''
for x in policy.parameters():
#print 'One'
#print 'x : ', torch.norm(x.data)
if x.grad is not None:
print 'x grad ', torch.norm(x.grad)
print 'The state :',state
'''
probs ,state_value = policy(state)
#print 'probs :' , probs
m = Categorical(probs)
action = m.sample()
#print action
policy.saved_actions.append(SavedAction(m.log_prob(action), state_value))
return action.item()
def finish_episode():
R = 0
saved_actions = policy.saved_actions
policy_losses = []
value_losses = []
rewards = []
for r in policy.rewards[::-1]:
R = r + gamma * R
rewards.insert(0, R)
rewards = torch.tensor(rewards).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + eps)
for (log_prob, value), r in zip(saved_actions, rewards):
reward = r - value.item()
policy_losses.append(-log_prob * reward)
#print value.shape
#print torch.tensor([r]).to(device).shape
value_losses.append(F.smooth_l1_loss(value, torch.tensor([r]).to(device).unsqueeze(0)))
optimizer.zero_grad()
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()
loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_actions[:]
def testmodel(modelpath, iterations):
policy = Policy(inputSize=29)
policy.load_state_dict(torch.load(modelpath))
policy.eval()
policy.cuda()
for i in policy.parameters():
print i.shape
env = BE.createBoard(display= True , static_obstacles= 40)
reward_list = []
run_list = []
fig = plt.figure(1)
plt.clf()
for i in range(iterations):
state = env.reset()
state = get_state_BallEnv(state)
#state = env.sensor_readings
done = False
t = 0
while not done and t < 1000:
action = select_action(state,policy)
t+=1
if action != None:
action = move_list[action]
state, reward, done, _ = env.step(action)
#print 'dist :',state[2]
state = get_state_BallEnv(state)
#state = env.sensor_readings
env.render()
run_list.append(i)
reward_list.append(env.total_reward_accumulated)
plt.plot(run_list, reward_list,color='black')
plt.draw()
plt.pause(.0001)
fig.show()
#move_list = [(5,5) , (5,-5) , (5, 0) , (0,5) , (0,-5),(0,0) , (-5,5),(-5,0),(-5,-5)]
move_list = [(1,1) , (1,-1) , (1, 0) , (0,1) , (0,-1),(0,0) , (-1,1),(-1,0),(-1,-1)]
plot_interval = 10
log_interval = 10
def main():
#****information to store the model
historySize = 10
hbuffer = HistoryBuffer(historySize)
#actorCriticWindow-windowsize - state obtained from local window
#actorCriticFeaures - state obtained from features
#actirCriticFeaturesFull - state obtained from using all features
#actorCriticXXXHistory - state obtained from any of the above methods and using a history buffer
filename = 'actorCriticWindow5History'
curDay = str(datetime.datetime.now().date())
curtime = str(datetime.datetime.now().time())
basePath = 'saved-models_trainBlock' +'/evaluatedPoliciesTest/'
subPath = curDay + '/' + curtime + '/'
curDir = basePath + subPath
os.makedirs(curDir)
if os.path.exists(curDir):
print "YES"
#******************************
state = env.reset()
rewardList = []
runList = []
timeList = []
fig = plt.figure(1)
plt.clf()
print eps
for i_episode in range(1000000):
running_reward = eps
state = env.reset()
#env.render()
print 'Starting episode :', i_episode
state = get_state_BallEnv(state)
hbuffer.addState(state)
#state = hbuffer.getHistory()
#state = env.sensor_readings
for t in range(500): # Don't create infinite loop while learning
if t <= historySize:
action = np.random.randint(0,9)
action = move_list[action]
state, reward , done , _ = env.step(action)
state = get_state_BallEnv(state)
hbuffer.addState(state)
else:
state = hbuffer.getHistory()
action = select_action(state,policy)
#print action
if action!=None:
action = move_list[action]
#action = agent_action_to_WorldActionSimplified(action)
#print action
state, reward, done, _ = env.step(action)
state = get_state_BallEnv(state)
#state = env.sensor_readings
hbuffer.addState(state)
#state = hbuffer.getHistory()
if i_episode%log_interval==0:
env.render()
policy.rewards.append(reward)
if done:
break
running_reward += reward
else:
continue
#if t%500==0:
#print "T :",t
#running_reward = running_reward * 0.99 + t * 0.01
rewardList.append(env.total_reward_accumulated)
runList.append(i_episode)
timeList.append(float(t)/500)
plt.plot(runList, rewardList,color='black')
plt.plot(runList , timeList , color= 'red')
plt.draw()
plt.pause(.0001)
fig.show()
if i_episode%plot_interval==0:
plt.savefig('saved_plots/actorCritic/plotNo{}'.format(i_episode))
#print 'The running reward for episode {}:'.format(i_episode),running_reward
if i_episode%log_interval==0:
torch.save(policy.state_dict(),'saved-models_'+ 'trainBlock' +'/evaluatedPoliciesTest/'+subPath+str(i_episode)+'-'+ filename + '-' + str(i_episode) + '.h5', )
#save the model
finish_episode()
#if i_episode+1 % log_interval == 0:
# print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(
# i_episode, t, running_reward))
#env.render()
if __name__ == '__main__':
main()
#model = '/home/abhisek/Study/Robotics/toySocialNav/saved-models_trainBlock/evaluatedPoliciesTest/2018-12-16/11:40:41.466716/610-actorCritic-610.h5'
#testmodel(model,50)
| 11,595 | 4,007 |
import pygame
#Class to store game statistics
class GameStats():
def __init__(self, mod_settings):
#Initialize MOD settings
self.mod_settings = mod_settings
#Number of lives available
self.ninjas_left = self.mod_settings.ninja_limit
#Starts inactive until 'Play' is clicked
self.game_active = False
#Resets statistics
self.reset_stats()
self.high_score = 0
#Resets statistics to appropriate values
def reset_stats(self):
self.ninjas_left = self.mod_settings.ninja_limit
self.score = 0
| 616 | 198 |
# 檢查變數的型態是否正確
isinstance(7,int)
print(isinstance(7,int)) # True
a=7
print(type(a)) # <class 'int'> | 101 | 64 |
import numpy as np
class GraphDataset:
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def get_targets(self):
targets = [d.y.item() for d in self.data]
return np.array(targets)
def get_data(self):
return self.data
def augment(self, v_outs=None, e_outs=None, g_outs=None, o_outs=None):
"""
v_outs must have shape |G|x|V_g| x L x ? x ...
e_outs must have shape |G|x|E_g| x L x ? x ...
g_outs must have shape |G| x L x ? x ...
o_outs has arbitrary shape, it is a handle for saving extra things
where L = |prev_outputs_to_consider|.
The graph order in which these are saved i.e. first axis, should reflect the ones in which
they are saved in the original dataset.
:param v_outs:
:param e_outs:
:param g_outs:
:param o_outs:
:return:
"""
for index in range(len(self)):
if v_outs is not None:
self[index].v_outs = v_outs[index]
if e_outs is not None:
self[index].e_outs = e_outs[index]
if g_outs is not None:
self[index].g_outs = g_outs[index]
if o_outs is not None:
self[index].o_outs = o_outs[index]
class GraphDatasetSubset(GraphDataset):
"""
Subsets the dataset according to a list of indices.
"""
def __init__(self, data, indices):
#print(np.array(indices).max())
#print("597 in indices:", 597 in indices)
self.data = data
self.indices = indices
def __getitem__(self, index):
#print(index)
return self.data[self.indices[index]]
def __len__(self):
return len(self.indices)
def get_targets(self):
targets = [self.data[i].y.item() for i in self.indices]
return np.array(targets)
| 1,985 | 636 |
# -*- encoding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from mysqlparse.grammar.sql_file import sql_file_syntax
class SqlFileSyntaxTest(unittest.TestCase):
def test_multiple_statements(self):
sql_file = sql_file_syntax.parseString("""
CREATE TABLE test_table1 (
test_column1 INT(11) PRIMARY KEY AUTO_INCREMENT NOT NULL,
test_column2 INT(11) NOT NULL
);
ALTER TABLE test_table2 ADD col_no0 BIT(8) NOT NULL DEFAULT 0 FIRST,
ADD col_no1 LONGTEXT NOT NULL,
ADD col_no2 VARCHAR(200) NULL,
ADD col_no3 BIT(8) AFTER col0;
CREATE TABLE test_table3 (
test_column INT(11) PRIMARY KEY AUTO_INCREMENT NOT NULL
);
ALTER TABLE test_table4 ADD col_no0 BIT(8) NOT NULL DEFAULT 0 FIRST,
ADD col_no1 LONGTEXT NOT NULL,
ADD col_no2 VARCHAR(200) NULL,
ADD col_no3 BIT(8) AFTER col0;
""")
self.assertEqual(len(sql_file.statements), 4)
self.assertEqual(sql_file.statements[0].table_name, 'test_table1')
self.assertEqual(sql_file.statements[1].table_name, 'test_table2')
self.assertEqual(sql_file.statements[2].table_name, 'test_table3')
self.assertEqual(sql_file.statements[3].table_name, 'test_table4')
| 1,373 | 459 |
import numpy as np
from pandas import DataFrame
import matplotlib.pyplot as plt
import os
"""
plots the results for each solver and strategy on
the test set as a stacked barchart
Andrew Healy, Aug. 2016
"""
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(1,1,1)
df = DataFrame.from_csv('data_for_second_barchart.csv')
provers = ['Alt-Ergo-0.95.2', 'Alt-Ergo-1.01', 'CVC3', 'CVC4',
'veriT', 'Yices', 'Z3-4.3.2', 'Z3-4.4.1',
'Best','Random','Worst','Where4']
df = df.reindex(columns=provers)
N = len(provers)
valids = list(df.ix['Valid'])
unknown = list(df.ix['Unknown'])
timeout = list(df.ix['Timeout'])
failure = list(df.ix['Failure'])
ind = np.arange(N) # the x locations for the groups
offset = lambda x: 1 if x > 7 else 0
for i,_ in enumerate(ind):
ind[i] += offset(i) # x offset for strategies and Where4
width = 0.35 # the width of the bars
p1 = ax.bar(ind, valids, width, color='1.0')
p2 = ax.bar(ind, unknown, width, color='0.55',
bottom=valids)
bottom = [unknown[i]+valids[i] for i in xrange(N)]
p3 = ax.bar(ind, timeout, width, bottom=bottom, color='0.8')
bottom = [bottom[i]+timeout[i] for i in xrange(N)]
p4 = ax.bar(ind, failure, width, bottom=bottom, color='0.3')
ax.set_ylabel('Number of proof obligations')
ax.set_xticks(ind)
ax.set_xticklabels(provers, rotation = 30)
ax.set_yticks(np.arange(0, 263, 50))
ax.legend((p1[0], p2[0], p3[0], p4[0]),
('Valid', 'Unknown', 'Timeout', 'Failure'),
loc='upper center', ncol=4,
bbox_to_anchor=(0.5, 1.05))
ind = np.arange(N)
for i,v in enumerate(valids):
plt.annotate(str(v), xy=(ind[i]+width+0.05+offset(i),v/2.-0.5))
for i,u in enumerate(unknown):
plt.annotate(str(u), xy=(ind[i]+width+0.05+offset(i),valids[i]+u/2.-0.5))
for i,t in enumerate(timeout):
plt.annotate(str(t), xy=(ind[i]+width+0.05+offset(i),valids[i]+unknown[i]+t/2.-0.5))
for i,f in enumerate(failure):
plt.annotate(str(f), xy=(ind[i]+width+0.05+offset(i),valids[i]+unknown[i]+timeout[i]+f/2.-0.5))
plt.savefig(os.path.join('paper','barcharts2.pdf'), bbox_inches='tight') | 2,061 | 911 |
# Program HitungJarak
# Menghitung jarak (s) berdasarkan kecepatan (v) dan waktu tempuh (t), yaitu: s = v * t
# KAMUS
# s : float
# v : float
# t : float
# ALGORITMA
v = float(input()) # menerima input kecepatan dalam m/s
t = float (input()) # menerima input waktu dalam s
s = v * t # menghitung jarak dalam m
print(s) # menampilkan hasil perhitungan
# NOTASI ALGORITMIK
'''
Program HitungJarak
{Menghitung jarak (s) berdasarkan kecepatan (v) dan waktu tempuh (t), yaitu: s = v * t}
Kamus
s : real
v : real
t : real
ALGORITMA
input(v) {menerima input kecepatan dalam m/s}
input(t) {menerima input waktu dalam s}
s <- v * t {menghitung jarak dalam m}
output(s) {menampilkan hasil perhitungan}
''' | 727 | 324 |
import os_xml_handler.xml_handler as xh
from os_xml_automation import shared_res as shared_res
from os_xml_automation import shared_tools as shared_tools
from os_xml_automation.text_manipulation import _res as res
# manipulate the files by the text mapper
def manipulate(xml_path, xml, place_holder_map):
file_nodes = xh.get_all_direct_child_nodes(xh.get_root_node(xml))
# run on all of the root's direction children
for file_node in file_nodes:
# get the <file_src> and <file_dst> nodes paths
src_file_path = shared_tools.get_file_node_path(xml_path, place_holder_map, file_node, shared_res.NODE_FILE_SRC)
dst_file_path = shared_tools.get_file_node_path(xml_path, place_holder_map, file_node, shared_res.NODE_FILE_DST, src_file_path)
texts_node = xh.get_child_nodes(file_node, res.NODE_TEXTS)[0]
text_nodes = xh.get_child_nodes(texts_node, res.NODE_TEXT)
for text_node in text_nodes:
init_text_node_cycle(text_node, place_holder_map, src_file_path, dst_file_path)
# will do a specific text node
def init_text_node_cycle(text_node, place_holder_map, src_file_path, dst_file_path):
# get the current action and text
action = str(xh.get_node_att(text_node, shared_res.ACTION))
original_text = xh.get_text_from_child_node(text_node, shared_res.NODE_ORIGINAL_TEXT)
cancel_if_already_present = False
new_text = ''
# delete range and set in range are special. They will need a special way to be dealt with
if action == res.NODE_TEXT_ATT_ACTION_VAL_DELETE_RANGE or action == res.NODE_TEXT_ATT_ACTION_VAL_REPLACE_IN_RANGE:
handle_delete_range(text_node, place_holder_map, src_file_path, dst_file_path)
if action == res.NODE_TEXT_ATT_ACTION_VAL_DELETE_RANGE:
return
else:
# set in range will change the action to above line and set the required text above the bottom boundary
action = res.NODE_TEXT_ATT_ACTION_VAL_ABOVE
original_text = xh.get_text_from_child_node(text_node, res.NODE_TO_TEXT)
original_text = shared_tools.fill_place_holders(original_text, place_holder_map)
if action != res.NODE_TEXT_ATT_ACTION_VAL_DELETE_LINE:
new_text_node = xh.get_child_nodes(text_node, shared_res.NODE_NEW_TEXT)[0]
new_text = xh.get_text_from_node(new_text_node)
cancel_if_already_present = xh.get_node_att(new_text_node, res.NODE_TEXT_ATT_IF_ALREADY_PRESENT) == res.NODE_TEXT_ATT_IF_ALREADY_PRESENT_VAL_CANCEL
# replace place holders
for key, value in place_holder_map.items():
if key in original_text:
original_text = original_text.replace(key, value)
if new_text and key in new_text:
new_text = new_text.replace(key, value)
from os_file_stream_handler import file_stream_handler as fsh
if action == res.NODE_TEXT_ATT_ACTION_VAL_DELETE_LINE:
fsh.delete_line_in_file(src_file_path, dst_file_path, original_text)
elif action == res.NODE_TEXT_ATT_ACTION_VAL_REPLACE or action == res.NODE_TEXT_ATT_ACTION_VAL_REPLACE_LINE:
fsh.replace_text_in_file(src_file_path, dst_file_path, original_text, new_text if new_text else '', action == res.NODE_TEXT_ATT_ACTION_VAL_REPLACE_LINE, cancel_if_already_present)
elif action == res.NODE_TEXT_ATT_ACTION_VAL_ABOVE:
fsh.append_text_above_line_in_file(src_file_path, dst_file_path, original_text, new_text, cancel_if_already_present)
elif action == res.NODE_TEXT_ATT_ACTION_VAL_BELOW:
fsh.append_text_below_line_in_file(src_file_path, dst_file_path, original_text, new_text, cancel_if_already_present)
# will delete a text in range
def handle_delete_range(text_node, place_holder_map, src_file_path, dst_file_path):
from_text = xh.get_text_from_child_node(text_node, res.NODE_FROM_TEXT)
to_text = xh.get_text_from_child_node(text_node, res.NODE_TO_TEXT)
from_text = shared_tools.fill_place_holders(from_text, place_holder_map)
to_text = shared_tools.fill_place_holders(to_text, place_holder_map)
include_boundaries = xh.get_node_att(text_node, res.NODE_TEXT_ATT_INCLUDE_BOUNDARIES)
include_boundaries = not include_boundaries or include_boundaries == 'false'
from os_file_stream_handler import file_stream_handler as fsh
fsh.delete_text_range_in_file(src_file_path, dst_file_path, from_text, to_text, include_bundaries=include_boundaries)
| 4,414 | 1,578 |
#!/usr/bin/env false
"""Manage tasks."""
# Internal packages (absolute references, distributed with Python)
from logging import getLogger
# External packages (absolute references, NOT distributed with Python)
# Library modules (absolute references, NOT packaged, in project)
from task.exception import Abort
from task.exception import Skip
from task.queue import TaskQueue
from utility.my_logging import log_exception
# Project modules (relative references, NOT packaged, in project)
class TaskManager(object):
def __init__(self, config, mapping):
self._log = getLogger(self.__class__.__name__)
self._config = config
self._mapping = mapping
self._q = TaskQueue()
super().__init__()
def _add(self, task):
self._q.put(task)
def _execute_task(self, the_task):
try:
the_task.execute()
except Abort as e:
self._log.debug("From %s _execute_task() except Abort", __name__)
self._log.info(repr(e))
except KeyboardInterrupt as e:
self._log.debug(
"From %s _execute_task() except KeyboardInterrupt", __name__
)
self._log.fatal(repr(e))
raise
except NotImplementedError as e:
self._log.debug(
"From %s _execute_task() except NotImplementedError", __name__
)
self._log.debug(repr(e))
except Skip as e:
self._log.debug("From %s _execute_task() except Skip", __name__)
self._log.info(repr(e))
except BaseException as e:
self._log.debug(
"From %s _execute_task() except BaseException", __name__
)
if self._config.should_abort_upon_task_failure:
log_exception(self._log, e)
raise
else:
log_exception(self._log, e, with_traceback=True)
@property
def config(self):
return self._config
@property
def mapping(self):
return self._mapping
def run(self):
self._log.info("Running task manager...")
while not self._q.empty():
self._execute_task(self._q.get())
self._log.debug("Queue contains %d tasks", self._q.length)
"""DisabledContent
"""
| 2,304 | 646 |
"""
domonic.CDN
====================================
Refs to some useful .js and .css libs.
use for prototyping. wget anything in later and create your own local references once ur happy
TODO - integrity/cross origin/module?
WARNING/NOTE - dont use. this isn't released or documented. just ideas atm...
"""
# class CDN_TEXT(object):
# lorem ipusm generator
# fake names
class CDN_IMG(object):
""" CDN images """
# - icons
# - UI - emojis
'''
# SOME EXAMPLES. NOT ALL ARE HTTPS:
http://placehold.it/350x150
http://unsplash.it/200/300
http://lorempixel.com/400/200
http://dummyimage.com/600x300/000/fff
# https://dummyimage.com/420x320/ff7f7f/333333.png&text=Sample
http://placekitten.com/200/300
https://placeimg.com/640/480/any
http://placebear.com/g/200/300
https://ipsumimage.appspot.com/140x100, ff7700
https://www.fillmurray.com/640/360
https://baconmockup.com/640/360
https://placebeard.it/640x360
https://www.placecage.com/640/360
https://www.stevensegallery.com/640/360
https://fakeimg.pl/640x360
# https://fakeimg.pl/420x320/ff0000,128/333333,255/?text=Sample&font=lobster
https://picsum.photos/640/360
https://via.placeholder.com/420x320/ff7f7f/333333?text=Sample
https://keywordimg.com/420x320/random
http://www.dummysrc.com/430x320.png/22c5fc/17202A
'''
PLACEHOLDER_SERVICE = "loremflickr.com"
@staticmethod
def PLACEHOLDER(width=100, height=100, HTTP="", seperator='/'):
"""
to update do CDN_IMG.PLACEHOLDER_SERVICE = "placebear.com/g"
usage : img(_src=CDN_IMG.PLACEHOLDER(300,100))
default HTTP is none, to let the browser decide
# use optional seperator if the site uses x instead of slash
img(_src=CDN_IMG.PLACEHOLDER(300,100,'x'))
"""
return f"{HTTP}://{CDN_IMG.PLACEHOLDER_SERVICE}/{width}{seperator}{height}"
class CDN_JS(object):
"""
You will need to append the lib version number if you add any libs here
# obvious candidates... https://github.com/sorrycc/awesome-javascript
"""
JQUERY_3_5_1 = "https://code.jquery.com/jquery-3.5.1.min.js"
JQUERY_UI = "https://code.jquery.com/ui/1.12.0/jquery-ui.min.js"
UNDERSCORE = "https://cdn.jsdelivr.net/npm/underscore@1.11.0/underscore-min.js"
BOOTSTRAP_4 = "https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/js/bootstrap.min.js"
POPPER_1_16_1 = "https://cdn.jsdelivr.net/npm/popper.js@1.16.1/dist/umd/popper.min.js"
BOOTSTRAP_5_ALPHA = "https://stackpath.bootstrapcdn.com/bootstrap/5.0.0-alpha1/js/bootstrap.min.js"
D3_6_1_0 = "https://cdnjs.cloudflare.com/ajax/libs/d3/6.1.0/d3.min.js"
MODERNIZER_2_8_3 = "https://cdnjs.cloudflare.com/ajax/libs/modernizr/2.8.3/modernizr.min.js"
MOMENT_2_27_0 = "https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.27.0/moment.min.js"
PIXI_5_3_3 = "https://cdnjs.cloudflare.com/ajax/libs/pixi.js/5.3.3/pixi.min.js"
SOCKET_1_4_5 = "https://cdnjs.cloudflare.com/ajax/libs/socket.io/1.4.5/socket.io.min.js"
X3DOM = "https://www.x3dom.org/download/x3dom.js"
AFRAME_1_2 = "https://aframe.io/releases/1.2.0/aframe.min.js"
BRYTHON_3_9_5 = "https://cdnjs.cloudflare.com/ajax/libs/brython/3.9.5/brython.min.js"
MATHML = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=MML_HTMLorMML" # polyfill mathml
# def find_on_cdn():
# https://cdn.jsdelivr.net/npm/
# https://cdnjs.cloudflare.com/ajax/libs/
# def dl(self, path=None): # download
# if path none domonic.JS_MASTER < strip off name to get default assets folder if non passed
class CDN_CSS(object):
"""
Preferably use version numbers if available. user LATEST if it always gets the latest
"""
BOOTSTRAP_5_ALPHA = "https://stackpath.bootstrapcdn.com/bootstrap/5.0.0-alpha1/js/bootstrap.min.js"
BOOTSTRAP_4 = "https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css"
MARX = "https://unpkg.com/marx-css/css/marx.min.css" # version?
MVP = "https://unpkg.com/mvp.css" # version?
WATER_LATEST = "https://cdn.jsdelivr.net/gh/kognise/water.css@latest/water.min.css" # note 'latest' in cdn url
BALLOON = "https://unpkg.com/balloon-css/balloon.min.css"
THREE_DOTS_0_2_0 = "https://cdnjs.cloudflare.com/ajax/libs/three-dots/0.2.0/three-dots.min.css"
MILLIGRAM_1_3_0 = "https://cdnjs.cloudflare.com/ajax/libs/milligram/1.3.0/milligram.css"
X3DOM = "https://www.x3dom.org/download/x3dom.css"
FONTAWESOME_5_7_1 = "https://use.fontawesome.com/releases/v5.7.1/css/all.css"
MDI_5_4_55 = "https://cdn.materialdesignicons.com/5.4.55/css/materialdesignicons.min.css" # icons
# find_on_cdn():
# https://unpkg.com/
# https://cdnjs.cloudflare.com/ajax/libs/
# def dl(self, path=domonic.JS_MASTER): # download
class CDN_FONT(object):
@staticmethod
def google(family):
return "http://fonts.googleapis.com/css?family=" + '+'.join(family)
| 5,002 | 2,144 |
from mpi4py import MPI
import numpy as np
import torch
# sync_networks across the different cores
def sync_networks(network):
"""
netowrk is the network you want to sync
"""
comm = MPI.COMM_WORLD
flat_params, params_shape = _get_flat_params(network)
comm.Bcast(flat_params, root=0)
# set the flat params back to the network
_set_flat_params(network, params_shape, flat_params)
# get the flat params from the network
def _get_flat_params(network):
param_shape = {}
flat_params = None
for key_name, value in network.named_parameters():
param_shape[key_name] = value.detach().numpy().shape
if flat_params is None:
flat_params = value.detach().numpy().flatten()
else:
flat_params = np.append(flat_params, value.detach().numpy().flatten())
return flat_params, param_shape
# set the params from the network
def _set_flat_params(network, params_shape, params):
pointer = 0
for key_name, values in network.named_parameters():
# get the length of the parameters
len_param = np.prod(params_shape[key_name])
copy_params = params[pointer:pointer + len_param].reshape(params_shape[key_name])
copy_params = torch.tensor(copy_params)
# copy the params
values.data.copy_(copy_params.data)
# update the pointer
pointer += len_param
# sync the networks
def sync_grads(network):
flat_grads, grads_shape = _get_flat_grads(network)
comm = MPI.COMM_WORLD
global_grads = np.zeros_like(flat_grads)
comm.Allreduce(flat_grads, global_grads, op=MPI.SUM)
_set_flat_grads(network, grads_shape, global_grads)
def _set_flat_grads(network, grads_shape, flat_grads):
pointer = 0
for key_name, value in network.named_parameters():
len_grads = np.prod(grads_shape[key_name])
copy_grads = flat_grads[pointer:pointer + len_grads].reshape(grads_shape[key_name])
copy_grads = torch.tensor(copy_grads)
# copy the grads
value.grad.data.copy_(copy_grads.data)
pointer += len_grads
def _get_flat_grads(network):
grads_shape = {}
flat_grads = None
for key_name, value in network.named_parameters():
grads_shape[key_name] = value.grad.data.cpu().numpy().shape
if flat_grads is None:
flat_grads = value.grad.data.cpu().numpy().flatten()
else:
flat_grads = np.append(flat_grads, value.grad.data.cpu().numpy().flatten())
return flat_grads, grads_shape
| 2,519 | 818 |
from django.db import models
from django.contrib.auth import get_user_model
# Create your models here.
User = get_user_model()
class GUIType(models.TextChoices):
img = "image_upload", "Image Upload"
draw = "draw", "Draw"
class Demo(models.Model):
name_ja = models.CharField(max_length=128)
name_en = models.CharField(max_length=128)
description_ja = models.TextField()
description_en = models.TextField()
endpoint_url = models.CharField(max_length=128)
thumbnail_uri = models.CharField(max_length=128)
gui_type = models.CharField(max_length=128, choices=GUIType.choices)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
class DeployStatus(models.Model):
status = models.CharField(max_length=10)
message = models.CharField(max_length=50)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
| 873 | 296 |
from django.contrib import admin
from app.models import User,Admin,UserFile
# Register your models here.
admin.site.register(User)
admin.site.register(Admin)
admin.site.register(UserFile) | 187 | 54 |
import asyncio
import functools
import importlib.util
import logging
import signal
import sys
import typing as t
import uuid
import aiohttp
import click
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from . import collect
from .actions.runner import DataProcessor
from .actions.source import get_data_ongoing, refeed_queue_var
from .database import Deployment, deployment_table
from .query import with_database
logger = logging.getLogger(__name__)
@click.command()
@click.argument("server", nargs=-1)
@click.option(
"--db",
metavar="<CONNECTION_STRING>",
default="postgresql+psycopg2://localhost/apd",
help="The connection string to a PostgreSQL database",
envvar="APD_DB_URI",
)
@click.option("--api-key", metavar="<KEY>", envvar="APD_API_KEY")
@click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode")
def collect_sensor_data(
db: str, server: t.Tuple[str], api_key: str, verbose: bool
) -> None:
"""This loads data from one or more sensors into the specified database.
Only PostgreSQL databases are supported, as the column definitions use
multiple pg specific features. The database must already exist and be
populated with the required tables.
The --api-key option is used to specify the access token for the sensors
being queried.
You may specify any number of servers, the variable should be the full URL
to the sensor's HTTP interface, not including the /v/2.0 portion. Multiple
URLs should be separated with a space.
"""
success = True
try:
collect.standalone(db, server, api_key, echo=verbose)
except ValueError as e:
click.secho(str(e), err=True, fg="red")
success = False
if not success:
sys.exit(1)
def load_handler_config(path: str) -> t.List[DataProcessor]:
# Create a module called user_config backed by the file specified, and load it
# This uses Python's import internals to fake a module in a known location
# Based on an SO answer by Sebastian Rittau and sample code from Brett Cannon
module_spec = importlib.util.spec_from_file_location("user_config", path)
module = importlib.util.module_from_spec(module_spec)
loader = module_spec.loader
if isinstance(loader, importlib.abc.Loader):
loader.exec_module(module)
try:
return module.handlers # type: ignore
except AttributeError as err:
raise ValueError(f"Could not load config file from {path}") from err
else:
# No valid loader could be found
raise ValueError(f"Could not load config file from {path}")
def actually_exit(sig, frame):
click.secho("Exiting...", bold=True)
sys.exit(1)
def stats_signal_handler(sig, frame, handlers=None):
for handler in handlers:
click.echo(
click.style(handler.name, bold=True, fg="red") + " " + handler.stats()
)
if sig == signal.SIGINT:
click.secho("Press Ctrl+C again to end the process", bold=True)
handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, actually_exit)
asyncio.get_running_loop().call_later(5, install_ctrl_c_signal_handler, handler)
return
def install_ctrl_c_signal_handler(signal_handler):
click.secho("Press Ctrl+C to view statistics", bold=True)
signal.signal(signal.SIGINT, signal_handler)
@click.command()
@click.argument("config", nargs=1)
@click.option(
"--db",
metavar="<CONNECTION_STRING>",
default="postgresql+psycopg2://localhost/apd",
help="The connection string to a PostgreSQL database",
envvar="APD_DB_URI",
)
@click.option(
"--historical",
is_flag=True,
help="Also trigger actions for data points that were already present in the database",
)
@click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode")
def run_actions(config: str, db: str, verbose: bool, historical: bool):
"""This runs the long-running action processors defined in a config file.
The configuration file specified should be a Python file that defines a
list of DataProcessor objects called processors.n
"""
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.DEBUG if verbose else logging.WARN,
)
async def main_loop():
with with_database(db):
logger.info("Loading configuration")
handlers = load_handler_config(config)
# Set up the refeed queue before starting the handlers
# or source, so they all have access to it
refeed_queue_var.set(asyncio.Queue())
logger.info(f"Configured {len(handlers)} handlers")
starters = [handler.start() for handler in handlers]
await asyncio.gather(*starters)
logger.info("Ingesting data")
data = get_data_ongoing(historical=historical)
signal_handler = functools.partial(
stats_signal_handler,
handlers=handlers,
)
for signal_name in "SIGINFO", "SIGUSR1", "SIGINT":
try:
signal.signal(signal.Signals[signal_name], signal_handler)
except KeyError:
pass
async for datapoint in data:
for handler in handlers:
await handler.push(datapoint)
asyncio.run(main_loop())
@click.group()
def deployments():
pass
@deployments.command()
@click.argument("uri")
@click.argument("name")
@click.option(
"--db",
metavar="<CONNECTION_STRING>",
default="postgresql+psycopg2://localhost/apd",
help="The connection string to a PostgreSQL database",
envvar="APD_DB_URI",
)
@click.option("--api-key", metavar="<KEY>", envvar="APD_API_KEY")
@click.option("--colour")
def add(
db: str,
uri: str,
name: str,
api_key: t.Optional[str],
colour: t.Optional[str],
) -> None:
"""This creates a record of a new deployment in the database."""
deployment = Deployment(id=None, uri=uri, name=name, api_key=api_key, colour=colour)
async def http_get_deployment_id():
async with aiohttp.ClientSession() as http:
collect.http_session_var.set(http)
return await collect.get_deployment_id(uri)
deployment.id = asyncio.run(http_get_deployment_id())
insert = deployment_table.insert().values(**deployment._asdict())
engine = create_engine(db)
sm = sessionmaker(engine)
Session = sm()
Session.execute(insert)
Session.commit()
@deployments.command()
@click.option(
"--db",
metavar="<CONNECTION_STRING>",
default="postgresql+psycopg2://localhost/apd",
help="The connection string to a PostgreSQL database",
envvar="APD_DB_URI",
)
def list(db: str) -> None:
"""This creates a record of a new deployment in the database."""
engine = create_engine(db)
sm = sessionmaker(engine)
Session = sm()
deployments = Session.query(deployment_table).all()
for deployment in deployments:
click.secho(deployment.name, bold=True)
click.echo(click.style("ID ", bold=True) + deployment.id.hex)
click.echo(click.style("URI ", bold=True) + deployment.uri)
click.echo(click.style("API key ", bold=True) + deployment.api_key)
click.echo(click.style("Colour ", bold=True) + str(deployment.colour))
click.echo()
Session.rollback()
@deployments.command()
@click.argument("id")
@click.option("--uri")
@click.option("--name")
@click.option(
"--db",
metavar="<CONNECTION_STRING>",
default="postgresql+psycopg2://localhost/apd",
help="The connection string to a PostgreSQL database",
envvar="APD_DB_URI",
)
@click.option("--api-key", metavar="<KEY>", envvar="APD_API_KEY")
@click.option("--colour")
def edit(
db: str,
id,
uri: t.Optional[str],
name: t.Optional[str],
api_key: t.Optional[str],
colour: t.Optional[str],
) -> None:
"""This creates a record of a new deployment in the database."""
update = {}
if uri is not None:
update["uri"] = uri
if name is not None:
update["name"] = name
if api_key is not None:
update["api_key"] = api_key
if colour is not None:
update["colour"] = colour
deployment_id = uuid.UUID(id)
update_stmt = (
deployment_table.update()
.where(deployment_table.c.id == deployment_id)
.values(**update)
)
engine = create_engine(db)
sm = sessionmaker(engine)
Session = sm()
Session.execute(update_stmt)
deployments = Session.query(deployment_table).filter(
deployment_table.c.id == deployment_id
)
Session.commit()
for deployment in deployments:
click.secho(deployment.name, bold=True)
click.echo(click.style("ID ", bold=True) + deployment.id.hex)
click.echo(click.style("URI ", bold=True) + deployment.uri)
click.echo(click.style("API key ", bold=True) + deployment.api_key)
click.echo(click.style("Colour ", bold=True) + str(deployment.colour))
click.echo()
| 9,143 | 2,799 |
import pytest
import pandas as pd
from reason.cluster import KMeansClusterer
@pytest.fixture
def df():
x = {'feature1': [2, 7], 'feature2': [3, 6]}
df = pd.DataFrame(data=x)
return df
def test_object_creation():
KMeansClusterer()
def test_fit_featureset():
data = [{'feature1': 2, 'feature2': 3}, {'feature1': 7, 'feature2': 6}]
clusterer = KMeansClusterer()
clusterer.fit(data)
def test_fit_dataframe(df):
clusterer = KMeansClusterer()
clusterer.fit(df)
def test_fit_tolerance():
data = pd.DataFrame(data={'feature': [1]})
clusterer = KMeansClusterer()
clusterer.fit(data)
def test_fit_bad_data():
data = [1, 0]
clusterer = KMeansClusterer()
with pytest.raises(TypeError):
clusterer.fit(data)
def test_fit_bad_k(df):
clusterer = KMeansClusterer()
with pytest.raises(ValueError):
clusterer.fit(df, k=0)
def test_fit_callable_distance(df):
clusterer = KMeansClusterer()
def dist(u, v):
return 1
clusterer.fit(df, distance=dist)
def test_fit_bad_distance(df):
clusterer = KMeansClusterer()
with pytest.raises(TypeError):
clusterer.fit(df, distance=1)
def test_fit_bad_max_iter(df):
clusterer = KMeansClusterer()
with pytest.raises(ValueError):
clusterer.fit(df, max_iter=0)
def test_predict_dict(df):
clusterer = KMeansClusterer()
clusterer.fit(df)
new = {'feature1': 2, 'feature2': 2}
clusterer.predict(new)
def test_predict_featuresets(df):
clusterer = KMeansClusterer()
clusterer.fit(df)
new = [
{'feature1': 2, 'feature2': 2}
]
clusterer.predict(new)
def test_predict_dataframe(df):
clusterer = KMeansClusterer()
clusterer.fit(df)
new = {'feature1': [2],'feature2': [2]}
dataframe = pd.DataFrame(data=new)
clusterer.predict(dataframe)
def test_predict_series(df):
clusterer = KMeansClusterer()
clusterer.fit(df)
new = {'feature1': 2, 'feature2': 2}
series = pd.Series(data=new)
clusterer.predict(series)
def test_predict_not_fitted():
clusterer = KMeansClusterer()
new = {'feature1': 2, 'feature2': 2}
series = pd.Series(data=new)
with pytest.raises(AttributeError):
clusterer.predict(series)
def test_predict_bad_input_type(df):
clusterer = KMeansClusterer()
clusterer.fit(df)
with pytest.raises(TypeError):
clusterer.predict([0 ,1])
def test_predict_bad_input_value(df):
clusterer = KMeansClusterer()
clusterer.fit(df)
with pytest.raises(ValueError):
clusterer.predict(pd.Series([0, 1, 2]))
def test_get_clusters_not_fitted():
clusterer = KMeansClusterer()
with pytest.raises(AttributeError):
clusterer.get_clusters()
def test_get_features(df):
clusterer = KMeansClusterer()
clusterer.fit(df)
assert clusterer.get_features() == ['feature1', 'feature2']
def test_inertia(df):
clusterer = KMeansClusterer()
clusterer.fit(df)
assert isinstance(clusterer.inertia(), float)
| 3,019 | 1,121 |
from django.db import models
#TODO candidatura
class Candidato(models.Model):
nome = models.CharField(max_length = 120, null=True)
ra = models.CharField(max_length = 80, null=True)
email = models.CharField(max_length = 80, null=True)
celular = models.CharField(max_length = 11, null=True)
codigo_acesso = models.CharField(max_length = 120, null=True)
foto = models.ForeignKey(to='ArquivosFoto', related_name="candidatos", null=True, blank=True) #onetomany
#via token
turma = models.ForeignKey(to='Turma', related_name="candidatos", null=True, blank=True) #onetomany
aluno = models.ForeignKey(to='Aluno', related_name="candidatos", null=True, blank=True) #onetomany
#nao entra no form
matricula_aceita = models.BooleanField(default=False)
confirmado = models.BooleanField(default=False)
def __str__(self):
return "{} - {}".format(self.nome, self.email, self.turma)
class Meta:
db_table = 'Candidado'
from .ArquivosFoto import ArquivosFoto
from .Candidato import Candidato
from .Aluno import Aluno | 1,081 | 374 |
from .baseline import BaselineCheck
from .checksum import ChecksumCheck
from .heuristic import DuplicateRowCheck, DeviatedValueCheck, TruncatedValueCheck
from .regulation import BlacklistedValueCheck, SequentialValueCheck, RowConstraintCheck
| 242 | 63 |
from unittest.mock import MagicMock, Mock
import six
from django.contrib.auth.models import User
from django.core.exceptions import SuspiciousOperation
from django.test import SimpleTestCase, Client, TransactionTestCase
from django.urls import reverse, resolve
from branch.models import Branch
from integration.views import receive_webhook_request
from integration.webhook_handler import WebhookHandler, _format_event
from project.models import Project
from repository.models import Repository
class TestWebhookHandler(SimpleTestCase):
def test_if_secret_not_initialized(self):
webhook_handler = WebhookHandler()
self.assertIsNone(webhook_handler.secret)
def test_if_secret_properly_initialized(self):
webhook_handler = WebhookHandler(secret="test-secret")
self.assertIsNotNone(webhook_handler.secret)
self.assertIsInstance(webhook_handler.secret, bytes)
self.assertEqual(webhook_handler.secret, "test-secret".encode("utf-8"))
def test_format_event_if_key_is_present(self):
data = {'pusher': {'name': 'test_name'}, 'ref': 'test_ref',
'repository': {'full_name': 'test_repository_full_name'}}
push_event_description = _format_event("push", data)
self.assertEqual(push_event_description, "test_name pushed test_ref in test_repository_full_name")
def test_format_event_if_key_is_not_present(self):
push_event_description = _format_event("non-existing-key", {})
self.assertEqual(push_event_description, "non-existing-key")
def test__get_header_if_key_is_present(self):
request = Mock()
request.headers = {WebhookHandler.X_GITHUB_DELIVERY: 'some-guid'}
header_value = WebhookHandler._get_header(WebhookHandler.X_GITHUB_DELIVERY, request)
self.assertEqual(header_value, 'some-guid')
def test__get_header_if_key_is_not_present(self):
with self.assertRaisesMessage(SuspiciousOperation, f'Missing header: {WebhookHandler.X_GITHUB_DELIVERY}'):
request = Mock()
request.headers = {}
WebhookHandler._get_header(WebhookHandler.X_GITHUB_DELIVERY, request)
def test__get_digest_if_secret_is_present(self):
request = Mock()
request.body = '{"key": "value"}'.encode('utf-8')
webhook_handler = WebhookHandler(secret="test-secret")
digest = webhook_handler._get_digest(request)
self.assertIsNotNone(digest)
self.assertIsInstance(digest, six.text_type)
def test__get_digest_if_secret_is_not_present(self):
request = Mock()
request.body = {}
webhook_handler = WebhookHandler()
digest = webhook_handler._get_digest(request)
self.assertIsNone(digest)
def test_handle_if_no_signature(self):
request = Mock()
request.headers = {WebhookHandler.X_HUB_SIGNATURE_256: 'incorrect-digest'}
webhook_handler = WebhookHandler()
webhook_handler._get_digest = MagicMock(return_value="sha256-digest")
with self.assertRaisesMessage(SuspiciousOperation, "Signature required."):
webhook_handler.handle(request)
def test_handle_if_signature_invalid(self):
request = Mock()
request.headers = {WebhookHandler.X_HUB_SIGNATURE_256: 'sha256=incorrect-digest'}
webhook_handler = WebhookHandler()
webhook_handler._get_digest = MagicMock(return_value="sha256-digest")
with self.assertRaisesMessage(SuspiciousOperation, "Invalid signature."):
webhook_handler.handle(request)
def test_handle_if_event_type_missing(self):
request = Mock()
request.headers = {}
webhook_handler = WebhookHandler()
webhook_handler._get_digest = MagicMock(return_value=None)
with self.assertRaisesMessage(SuspiciousOperation, f'Missing header: {WebhookHandler.X_GITHUB_EVENT}'):
webhook_handler.handle(request)
def test_handle_when_content_type_form(self):
webhook_handler = WebhookHandler()
webhook_handler._get_digest = MagicMock(return_value=None)
request = Mock()
request.headers = {'content-type': 'application/x-www-form-urlencoded', WebhookHandler.X_GITHUB_EVENT: 'push'}
with self.assertRaisesMessage(SuspiciousOperation, "Unsupported operation."):
webhook_handler.handle(request)
def test_handle_when_content_type_json_and_data_invalid(self):
webhook_handler = WebhookHandler()
webhook_handler._get_digest = MagicMock(return_value=None)
request = Mock()
request.headers = {
'content-type': 'application/json',
'X-Github-Delivery': 'some-guid',
WebhookHandler.X_GITHUB_EVENT: 'push'
}
request.body = ''.encode('utf-8')
with self.assertRaisesMessage(SuspiciousOperation, "Request body must contain valid JSON data."):
webhook_handler.handle(request)
def test_handle_when_content_type_json_and_data_valid(self):
webhook_handler = WebhookHandler()
webhook_handler._get_digest = MagicMock(return_value=None)
request = Mock()
request.headers = {
'content-type': 'application/json',
'X-Github-Delivery': 'some-guid',
WebhookHandler.X_GITHUB_EVENT: 'push'
}
request.body = '{"key": "value"}'.encode('utf-8')
webhook_handler.handle(request)
def test_if_webhook_handler_handle_called(self):
webhook_handler = WebhookHandler()
webhook_handler.handle = MagicMock(return_value=None)
webhook_handler.handle(request=Mock())
webhook_handler.handle.assert_called_once()
def test_if_webhook_handler_called_all_registered_hook_handlers(self):
webhook_handler = WebhookHandler()
webhook_handler._get_digest = MagicMock(return_value=None)
request = Mock()
request.headers = {
'content-type': 'application/json',
'X-Github-Delivery': 'some-guid',
WebhookHandler.X_GITHUB_EVENT: 'push'
}
request.body = '{"key": "value"}'.encode('utf-8')
@webhook_handler.hook(event_type="push")
@MagicMock
def first_decorated_func(): pass
@webhook_handler.hook(event_type="push")
@MagicMock
def second_decorated_func(): pass
@webhook_handler.hook(event_type="ping")
@MagicMock
def third_decorated_func(): pass
webhook_handler.handle(request)
first_decorated_func.assert_called_once()
second_decorated_func.assert_called_once()
third_decorated_func.assert_not_called()
class TestIntegrationURLs(SimpleTestCase):
def test_notify_url(self):
notify_url = reverse('notify')
self.assertEquals(resolve(notify_url).func, receive_webhook_request)
class TestIntegrationViews(TransactionTestCase):
def setUp(self):
self.client = Client()
self.notify_url = reverse('notify')
self.user = User.objects.create_user('test_username', 'test@email.com', 'test_password')
self.repository = Repository.objects.create(
url="https://github.com/fivkovic/uks-demo",
name="uks-demo",
description="uks-demo repository description",
is_public=True)
self.project = Project.objects.create(
name="UKS DEMO PROJECT",
description="UKS demo project description",
is_public=True,
wiki_content="Wiki",
repository=self.repository,
owner=self.user)
self.branch = Branch.objects.create(name="main", repository=self.repository)
self.task = None
def test_receive_webhook_request_view(self):
headers = {
'HTTP_' + WebhookHandler.X_GITHUB_EVENT: 'push',
'HTTP_' + WebhookHandler.X_GITHUB_DELIVERY: 'some-guid'
}
response = self.client.post(
self.notify_url,
INTEGRATION_TEST_REQUEST_BODY,
content_type='application/json',
**headers)
self.assertEquals(response.status_code, 204)
INTEGRATION_TEST_REQUEST_BODY = {
"ref": "refs/heads/main",
"before": "2f781a5371291ce8ba3f3a8acdf8bd673889dcaf",
"after": "9549a348a9c4e175cf8a27e45bab93407d178767",
"repository": {
"id": 339193534,
"node_id": "MDEwOlJlcG9zaXRvcnkzMzkxOTM1MzQ=",
"name": "uks-demo",
"full_name": "fivkovic/uks-demo",
"private": False,
"owner": {
"name": "fivkovic",
"email": "f.ivkovic16@gmail.com",
"login": "fivkovic",
"id": 17569172,
"node_id": "MDQ6VXNlcjE3NTY5MTcy",
"avatar_url": "https://avatars.githubusercontent.com/u/17569172?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/fivkovic",
"html_url": "https://github.com/fivkovic",
"followers_url": "https://api.github.com/users/fivkovic/followers",
"following_url": "https://api.github.com/users/fivkovic/following{/other_user}",
"gists_url": "https://api.github.com/users/fivkovic/gists{/gist_id}",
"starred_url": "https://api.github.com/users/fivkovic/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/fivkovic/subscriptions",
"organizations_url": "https://api.github.com/users/fivkovic/orgs",
"repos_url": "https://api.github.com/users/fivkovic/repos",
"events_url": "https://api.github.com/users/fivkovic/events{/privacy}",
"received_events_url": "https://api.github.com/users/fivkovic/received_events",
"type": "User",
"site_admin": False
},
"html_url": "https://github.com/fivkovic/uks-demo",
"description": "Demo repository for testing UKS project",
"fork": False,
"url": "https://github.com/fivkovic/uks-demo",
"forks_url": "https://api.github.com/repos/fivkovic/uks-demo/forks",
"keys_url": "https://api.github.com/repos/fivkovic/uks-demo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/fivkovic/uks-demo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/fivkovic/uks-demo/teams",
"hooks_url": "https://api.github.com/repos/fivkovic/uks-demo/hooks",
"issue_events_url": "https://api.github.com/repos/fivkovic/uks-demo/issues/events{/number}",
"events_url": "https://api.github.com/repos/fivkovic/uks-demo/events",
"assignees_url": "https://api.github.com/repos/fivkovic/uks-demo/assignees{/user}",
"branches_url": "https://api.github.com/repos/fivkovic/uks-demo/branches{/branch}",
"tags_url": "https://api.github.com/repos/fivkovic/uks-demo/tags",
"blobs_url": "https://api.github.com/repos/fivkovic/uks-demo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/fivkovic/uks-demo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/fivkovic/uks-demo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/fivkovic/uks-demo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/fivkovic/uks-demo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/fivkovic/uks-demo/languages",
"stargazers_url": "https://api.github.com/repos/fivkovic/uks-demo/stargazers",
"contributors_url": "https://api.github.com/repos/fivkovic/uks-demo/contributors",
"subscribers_url": "https://api.github.com/repos/fivkovic/uks-demo/subscribers",
"subscription_url": "https://api.github.com/repos/fivkovic/uks-demo/subscription",
"commits_url": "https://api.github.com/repos/fivkovic/uks-demo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/fivkovic/uks-demo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/fivkovic/uks-demo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/fivkovic/uks-demo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/fivkovic/uks-demo/contents/{+path}",
"compare_url": "https://api.github.com/repos/fivkovic/uks-demo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/fivkovic/uks-demo/merges",
"archive_url": "https://api.github.com/repos/fivkovic/uks-demo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/fivkovic/uks-demo/downloads",
"issues_url": "https://api.github.com/repos/fivkovic/uks-demo/issues{/number}",
"pulls_url": "https://api.github.com/repos/fivkovic/uks-demo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/fivkovic/uks-demo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/fivkovic/uks-demo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/fivkovic/uks-demo/labels{/name}",
"releases_url": "https://api.github.com/repos/fivkovic/uks-demo/releases{/id}",
"deployments_url": "https://api.github.com/repos/fivkovic/uks-demo/deployments",
"created_at": 1613419653,
"updated_at": "2021-02-15T20:07:41Z",
"pushed_at": 1613420915,
"git_url": "git://github.com/fivkovic/uks-demo.git",
"ssh_url": "git@github.com:fivkovic/uks-demo.git",
"clone_url": "https://github.com/fivkovic/uks-demo.git",
"svn_url": "https://github.com/fivkovic/uks-demo",
"homepage": None,
"size": 0,
"stargazers_count": 0,
"watchers_count": 0,
"language": None,
"has_issues": True,
"has_projects": True,
"has_downloads": True,
"has_wiki": True,
"has_pages": False,
"forks_count": 0,
"mirror_url": None,
"archived": False,
"disabled": False,
"open_issues_count": 0,
"license": {
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit",
"node_id": "MDc6TGljZW5zZTEz"
},
"forks": 0,
"open_issues": 0,
"watchers": 0,
"default_branch": "main",
"stargazers": 0,
"master_branch": "main"
},
"pusher": {
"name": "fivkovic",
"email": "f.ivkovic16@gmail.com"
},
"sender": {
"login": "fivkovic",
"id": 17569172,
"node_id": "MDQ6VXNlcjE3NTY5MTcy",
"avatar_url": "https://avatars.githubusercontent.com/u/17569172?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/fivkovic",
"html_url": "https://github.com/fivkovic",
"followers_url": "https://api.github.com/users/fivkovic/followers",
"following_url": "https://api.github.com/users/fivkovic/following{/other_user}",
"gists_url": "https://api.github.com/users/fivkovic/gists{/gist_id}",
"starred_url": "https://api.github.com/users/fivkovic/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/fivkovic/subscriptions",
"organizations_url": "https://api.github.com/users/fivkovic/orgs",
"repos_url": "https://api.github.com/users/fivkovic/repos",
"events_url": "https://api.github.com/users/fivkovic/events{/privacy}",
"received_events_url": "https://api.github.com/users/fivkovic/received_events",
"type": "User",
"site_admin": False
},
"created": False,
"deleted": False,
"forced": False,
"base_ref": None,
"compare": "https://github.com/fivkovic/uks-demo/compare/2f781a537129...9549a348a9c4",
"commits": [
{
"id": "9549a348a9c4e175cf8a27e45bab93407d178767",
"tree_id": "20f7ae1a25f3c039e7d6442440672bd012c3a78d",
"distinct": True,
"message": "First test commit closes #1 #2",
"timestamp": "2021-02-15T21:12:35+01:00",
"url": "https://github.com/fivkovic/uks-demo/commit/9549a348a9c4e175cf8a27e45bab93407d178767",
"author": {
"name": "Filip Ivkovic",
"email": "fivkovic@uns.ac.rs",
"username": "fivkovic"
},
"committer": {
"name": "Filip Ivkovic",
"email": "fivkovic@uns.ac.rs",
"username": "fivkovic"
},
"added": [
"F1.txt",
"F2.txt"
],
"removed": [],
"modified": []
}
],
"head_commit": {
"id": "9549a348a9c4e175cf8a27e45bab93407d178767",
"tree_id": "20f7ae1a25f3c039e7d6442440672bd012c3a78d",
"distinct": True,
"message": "First test commit closes #1 #2",
"timestamp": "2021-02-15T21:12:35+01:00",
"url": "https://github.com/fivkovic/uks-demo/commit/9549a348a9c4e175cf8a27e45bab93407d178767",
"author": {
"name": "Filip Ivkovic",
"email": "fivkovic@uns.ac.rs",
"username": "fivkovic"
},
"committer": {
"name": "Filip Ivkovic",
"email": "fivkovic@uns.ac.rs",
"username": "fivkovic"
},
"added": [
"F1.txt",
"F2.txt"
],
"removed": [],
"modified": []
}
} | 17,447 | 6,231 |
import FWCore.ParameterSet.Config as cms
#
# produce hitFit hypothesis with all necessary
# ingredients
#
## std sequence to perform kinematic fit
import TopQuarkAnalysis.TopHitFit.TtSemiLepHitFitProducer_Muons_cfi
hitFitTtSemiLepEventHypothesis = TopQuarkAnalysis.TopHitFit.TtSemiLepHitFitProducer_Muons_cfi.hitFitTtSemiLepEvent.clone()
## configure hitFit hypothesis
from TopQuarkAnalysis.TopJetCombination.TtSemiLepHypHitFit_cfi import *
## make hypothesis
makeHypothesis_hitFitTask = cms.Task(
hitFitTtSemiLepEventHypothesis,
ttSemiLepHypHitFit
)
makeHypothesis_hitFit = cms.Sequence(makeHypothesis_hitFitTask)
| 628 | 261 |
import ajax_select.urls
from django.contrib import admin
from django.urls import include, path
import tracker.urls
urlpatterns = [
path('tracker/', include(tracker.urls)),
path('admin/lookups/', include(ajax_select.urls)),
path('admin/', admin.site.urls),
]
| 272 | 86 |