id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6512722 | from pyspark import SparkContext, SQLContext
import pyspark.sql.functions as sql
import pyspark.sql.types as types
sc = SparkContext(appName="SmallIDB")
sqlContext = SQLContext(sc)
idb_df_version = "20161119"
idb_df = sqlContext.read.load("/guoda/data/idigbio-{0}.parquet"
.format(idb_df_version))
#count = idb_df.count()
(idb_df
.limit(100000)
.write
.mode("overwrite")
.parquet("/guoda/data/idigbio-{}-100k.parquet".format(idb_df_version))
)
# 4:45 on mesos1 with 32 cores
# time HADOOP_USER_NAME=hdfs spark-submit --master mesos://mesos01.acis.ufl.edu:5050 --executor-memory 20G --driver-memory 10G --total-executor-cores 32 idb_subset.py
| StarcoderdataPython |
8196355 | <gh_stars>1-10
import os
import tempfile
import unittest
from unittest import mock
from dbt import linker
try:
from queue import Empty
except ImportError:
from Queue import Empty
def _mock_manifest(nodes):
return mock.MagicMock(nodes={
n: mock.MagicMock(unique_id=n) for n in nodes
})
class LinkerTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.object(linker, 'is_blocking_dependency')
self.is_blocking_dependency = self.patcher.start()
self.is_blocking_dependency.return_value = True
self.linker = linker.Linker()
def tearDown(self):
self.patcher.stop()
def test_linker_add_node(self):
expected_nodes = ['A', 'B', 'C']
for node in expected_nodes:
self.linker.add_node(node)
actual_nodes = self.linker.nodes()
for node in expected_nodes:
self.assertIn(node, actual_nodes)
self.assertEqual(len(actual_nodes), len(expected_nodes))
def test_linker_write_and_read_graph(self):
expected_nodes = ['A', 'B', 'C']
for node in expected_nodes:
self.linker.add_node(node)
manifest = _mock_manifest('ABC')
(fd, fname) = tempfile.mkstemp()
os.close(fd)
try:
self.linker.write_graph(fname, manifest)
new_linker = linker.from_file(fname)
finally:
os.unlink(fname)
actual_nodes = new_linker.nodes()
for node in expected_nodes:
self.assertIn(node, actual_nodes)
self.assertEqual(len(actual_nodes), len(expected_nodes))
def assert_would_join(self, queue):
"""test join() without timeout risk"""
self.assertEqual(queue.inner.unfinished_tasks, 0)
def test_linker_add_dependency(self):
actual_deps = [('A', 'B'), ('A', 'C'), ('B', 'C')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
manifest = _mock_manifest('ABC')
queue = self.linker.as_graph_queue(manifest)
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'C')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertFalse(queue.empty())
queue.mark_done('C')
self.assertFalse(queue.empty())
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'B')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertFalse(queue.empty())
queue.mark_done('B')
self.assertFalse(queue.empty())
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'A')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertTrue(queue.empty())
queue.mark_done('A')
self.assert_would_join(queue)
self.assertTrue(queue.empty())
def test_linker_add_disjoint_dependencies(self):
actual_deps = [('A', 'B')]
additional_node = 'Z'
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.linker.add_node(additional_node)
manifest = _mock_manifest('ABZ')
queue = self.linker.as_graph_queue(manifest)
# the first one we get must be B, it has the longest dep chain
first = queue.get(block=False)
self.assertEqual(first.unique_id, 'B')
self.assertFalse(queue.empty())
queue.mark_done('B')
self.assertFalse(queue.empty())
second = queue.get(block=False)
self.assertIn(second.unique_id, {'A', 'Z'})
self.assertFalse(queue.empty())
queue.mark_done(second.unique_id)
self.assertFalse(queue.empty())
third = queue.get(block=False)
self.assertIn(third.unique_id, {'A', 'Z'})
with self.assertRaises(Empty):
queue.get(block=False)
self.assertNotEqual(second.unique_id, third.unique_id)
self.assertTrue(queue.empty())
queue.mark_done(third.unique_id)
self.assert_would_join(queue)
self.assertTrue(queue.empty())
def test_linker_dependencies_limited_to_some_nodes(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'D')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
queue = self.linker.as_graph_queue(_mock_manifest('ABCD'), ['B'])
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'B')
self.assertTrue(queue.empty())
queue.mark_done('B')
self.assert_would_join(queue)
queue_2 = self.linker.as_graph_queue(_mock_manifest('ABCD'), ['A', 'B'])
got = queue_2.get(block=False)
self.assertEqual(got.unique_id, 'B')
self.assertFalse(queue_2.empty())
with self.assertRaises(Empty):
queue_2.get(block=False)
queue_2.mark_done('B')
self.assertFalse(queue_2.empty())
got = queue_2.get(block=False)
self.assertEqual(got.unique_id, 'A')
self.assertTrue(queue_2.empty())
with self.assertRaises(Empty):
queue_2.get(block=False)
self.assertTrue(queue_2.empty())
queue_2.mark_done('A')
self.assert_would_join(queue_2)
def test_linker_bad_limit_throws_runtime_error(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'D')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
with self.assertRaises(RuntimeError):
self.linker.as_graph_queue(_mock_manifest('ABCD'), ['ZZZ'])
def test__find_cycles__cycles(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'A')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.assertIsNotNone(self.linker.find_cycles())
def test__find_cycles__no_cycles(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'D')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.assertIsNone(self.linker.find_cycles())
| StarcoderdataPython |
1631628 | <filename>pyblaze/nn/modules/__init__.py
from .distribution import TransformedNormalLoss, TransformedGmmLoss
from .gp import GradientPenalty
from .lstm import StackedLSTM, StackedLSTMCell
from .made import MADE
from .normalizing import NormalizingFlow
from .residual import LinearResidual
from .transforms import AffineTransform, PlanarTransform, RadialTransform, \
AffineCouplingTransform1d, MaskedAutoregressiveTransform1d, BatchNormTransform1d, \
LeakyReLUTransform, PReLUTransform, FlipTransform1d
from .vae import VAELoss
from .view import View
from .wasserstein import WassersteinLossGenerator, WassersteinLossCritic
| StarcoderdataPython |
1735039 | from django.db import models
from django.contrib.auth.models import User, AbstractBaseUser, PermissionsMixin, BaseUserManager
from django.utils.http import urlquote
from django.utils.text import slugify
import datetime
import pytz
# Create your models here.
class AuthorManager(BaseUserManager):
def _create_user(self, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
now = datetime.datetime.now(pytz.utc)
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class Author(AbstractBaseUser, PermissionsMixin):
'''
The user who can write blogposts
'''
email = models.EmailField(max_length=255, unique=True)
first_name = models.CharField('first name', max_length=50, blank=False) # No delete
last_name = models.CharField('last name', max_length=50, blank=False) # No delete
def __str__(self):
return u'{0}'.format(self.email)
def __unicode__(self):
return u'{0}'.format(self.email)
is_staff = models.BooleanField('staff status', default=False,
help_text='Designates whether the user can log into this admin '
'site.')
is_active = models.BooleanField('active', default=True,
help_text='Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.')
date_joined = models.DateTimeField('date joined', auto_now_add=True)
objects = AuthorManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = 'author'
verbose_name_plural = 'authors'
def get_absolute_url(self):
return "/author/%s/" % urlquote(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.email
class Category(models.Model):
name = models.CharField(max_length=250)
slug = models.SlugField(max_length=250, blank=True, null=True, unique=True)
class Meta:
verbose_name_plural = "Categories"
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
def __str__(self):
return u'{0}'.format(self.name)
def __unicode__(self):
return u'{0}'.format(self.name)
class Tag(models.Model):
name = models.CharField(max_length=250)
slug = models.SlugField(max_length=250, blank=True, null=True, unique=True)
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.name)
super(Tag, self).save(*args, **kwargs)
def __str__(self):
return u'{0}'.format(self.name)
def __unicode__(self):
return u'{0}'.format(self.name)
class Post(models.Model):
title = models.CharField(max_length=250, blank=False, null=False, unique=True)
summary = models.CharField(max_length=1000, blank=True, null=True)
slug = models.SlugField(max_length=250, blank=True, null=True, unique=True)
body = models.TextField(blank=False, null=False)
main_image = models.ImageField(upload_to='blog_images/', blank=True, null=True)
publish = models.BooleanField(default=False)
categories = models.ManyToManyField(Category)
tags = models.ManyToManyField(Tag)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "Posts"
ordering = ('created_at',)
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.title)
self.summary = self.body[:150] + '...'
super(Post, self).save(*args, **kwargs)
def __str__(self):
return u'{0}'.format(self.title)
def __unicode__(self):
return u'{0}'.format(self.title) | StarcoderdataPython |
96537 | <filename>train.py
import tensorflow as tf
import numpy as np
np.random.seed(1234)
import os
import pickle
from importlib import import_module
from log import Logger
from batching import *
tf.flags.DEFINE_string("data_dir", "./data", "The data dir.")
tf.flags.DEFINE_string("sub_dir", "WikiPeople", "The sub data dir.")
tf.flags.DEFINE_string("dataset_name", "WikiPeople", "The name of the dataset.")
tf.flags.DEFINE_string("wholeset_name", "WikiPeople_permutate", "The name of the whole dataset for negative sampling or computing the filtered metrics.")
tf.flags.DEFINE_string("model_name", "WikiPeople", "")
tf.flags.DEFINE_integer("embedding_dim", 100, "The embedding dimension.")
tf.flags.DEFINE_integer("hrtFCNs_layers", 1, "The number of layers in hrt-FCNs")
tf.flags.DEFINE_integer("hrtavFCNs_layers", 1, "The number of layers in hrtav-FCNs")
tf.flags.DEFINE_integer("g_theta_dim", 1000, "The dimension of the interaction vector o_hrtav.")
tf.flags.DEFINE_float("weight", 0.3, "The weight factor of the scores")
tf.flags.DEFINE_integer("batch_size", 128, "The batch size.")
tf.flags.DEFINE_boolean("is_trainable", True, "")
tf.flags.DEFINE_float("learning_rate", 0.0001, "The learning rate.")
tf.flags.DEFINE_integer("n_epochs", 5000, "The number of training epochs.")
tf.flags.DEFINE_boolean("if_restart", False, "")
tf.flags.DEFINE_integer("start_epoch", 0, "Change this when restarting from halfway.")
tf.flags.DEFINE_integer("saveStep", 100, "Save the model every saveStep.")
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement.")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices.")
tf.flags.DEFINE_string("model_postfix", "", "Which model to load.")
tf.flags.DEFINE_string("run_folder", "./", "The dir to store models.")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
model = import_module("model"+FLAGS.model_postfix)
# The log file to store the parameters and the training details of each epoch
logger = Logger("logs", "run_"+FLAGS.model_name+"_"+str(FLAGS.embedding_dim)+"_"+str(FLAGS.hrtFCNs_layers)+"_"+str(FLAGS.hrtavFCNs_layers)+"_"+str(FLAGS.g_theta_dim)+"_"+str(FLAGS.weight)+"_"+str(FLAGS.batch_size)+"_"+str(FLAGS.learning_rate)).logger
logger.info("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
logger.info("{}={}".format(attr.upper(), value))
# Load training data
logger.info("Loading data...")
afolder = FLAGS.data_dir + "/"
if FLAGS.sub_dir != "":
afolder = FLAGS.data_dir + "/" + FLAGS.sub_dir + "/"
with open(afolder + FLAGS.dataset_name + ".bin", "rb") as fin:
data_info = pickle.load(fin)
train = data_info["train_facts"]
entities_indexes = data_info["entities_indexes"]
relations_indexes = data_info["relations_indexes"]
attr_val = data_info["attr_val"]
rel_head = data_info["rel_head"]
rel_tail = data_info["rel_tail"]
entity_array = np.array(list(entities_indexes.values()))
relation_array = np.array(list(relations_indexes.values()))
# Load the whole dataset for negative sampling in "batching.py"
with open(afolder + FLAGS.wholeset_name + ".bin", "rb") as fin:
data_info1 = pickle.load(fin)
whole_train = data_info1["train_facts"]
logger.info("Loading data... finished!")
with tf.Graph().as_default():
tf.set_random_seed(1234)
session_conf = tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
with sess.as_default():
aNeuInfer = model.NeuInfer(
n_entities=len(entities_indexes),
n_relations=len(relations_indexes),
embedding_dim=FLAGS.embedding_dim,
hrtFCNs_layers=FLAGS.hrtFCNs_layers,
hrtavFCNs_layers=FLAGS.hrtavFCNs_layers,
g_theta_dim=FLAGS.g_theta_dim,
weight=FLAGS.weight,
batch_size=FLAGS.batch_size*2,
is_trainable=FLAGS.is_trainable)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
grads_and_vars = optimizer.compute_gradients(aNeuInfer.loss)
train_op = optimizer.apply_gradients(grads_and_vars)
# Output directory for models and summaries
out_dir = os.path.abspath(os.path.join(FLAGS.run_folder, "runs", FLAGS.model_name))
logger.info("Writing to {}\n".format(out_dir))
# Train Summaries
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Initialize all variables
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch, arity):
"""
A single training step
"""
feed_dict = {
aNeuInfer.input_x: x_batch,
aNeuInfer.input_y: y_batch,
aNeuInfer.arity: arity,
}
_, loss = sess.run([train_op, aNeuInfer.loss], feed_dict)
return loss
# If restart for a certain epoch, then load the model
if FLAGS.if_restart == True:
_file = checkpoint_prefix + "-" + str(FLAGS.start_epoch)
aNeuInfer.saver.restore(sess, _file)
# Training
n_batches_per_epoch = []
for i in train:
ll = len(i)
if ll == 0:
n_batches_per_epoch.append(0)
else:
n_batches_per_epoch.append(int((ll - 1) / FLAGS.batch_size) + 1)
for epoch in range(FLAGS.start_epoch, FLAGS.n_epochs):
train_loss = 0
for i in range(len(train)):
train_batch_indexes = np.array(list(train[i].keys())).astype(np.int32)
train_batch_values = np.array(list(train[i].values())).astype(np.float32)
for batch_num in range(n_batches_per_epoch[i]):
arity = i+2 # 2-ary in index 0
x_batch, y_batch = Batch_Loader(train_batch_indexes, train_batch_values, entities_indexes, relations_indexes, attr_val, rel_head, rel_tail, FLAGS.batch_size, arity, whole_train[i])
tmp_loss = train_step(x_batch, y_batch, arity)
train_loss = train_loss + tmp_loss
logger.info("nepoch: "+str(epoch+1)+", trainloss: "+str(train_loss))
if (epoch+1) % FLAGS.saveStep == 0:
path = aNeuInfer.saver.save(sess, checkpoint_prefix, global_step=epoch+1)
logger.info("Saved model checkpoint to {}\n".format(path))
train_summary_writer.close
| StarcoderdataPython |
9798350 | <filename>kill_jobs.py
#!/usr/bin/env python3
# Copyright 2020, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
import click_log
from cosmicops import logging
from cosmicops.kill_jobs import kill_jobs
@click.command()
@click.option('--profile', '-p', metavar='<name>', help='Name of the configuration profile containing the credentials')
@click.option('--dry-run/--exec', is_flag=True, default=True, show_default=True, help='Enable/disable dry-run')
@click_log.simple_verbosity_option(logging.getLogger(), default="INFO", show_default=True)
@click.argument('instance_id')
def main(profile, dry_run, instance_id):
"""Kills all jobs related to INSTANCE_ID"""
click_log.basic_config()
if dry_run:
logging.warning('Running in dry-run mode, will only show changes')
kill_jobs(profile, dry_run, instance_id)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3371232 | import os
import glob
import random
import time
import json
from datetime import datetime
from statistics import mean
import argparse
from PIL import Image
import numpy as np
from scipy.io import loadmat
import cv2
import torch
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from models import Generator
from utils import tensor2image
parser = argparse.ArgumentParser('Options for finetuning GazeNet++ in PyTorch...')
parser.add_argument('--dataset-root-path', type=str, default=None, help='path to dataset')
parser.add_argument('--output-dir', type=str, default=None, help='output directory for model and logs')
parser.add_argument('--snapshot-dir', type=str, default=None, help='directory with pre-trained model snapshots')
parser.add_argument('--no-cuda', action='store_true', default=False, help='do not use cuda for training')
parser.add_argument('--size', type=int, default=224, help='size of the data crop (squared assumed)')
args = parser.parse_args()
# check args
if args.dataset_root_path is None:
assert False, 'Path to dataset not provided!'
# determine if ir or rgb data
args.dataset_root_path = os.path.normpath(args.dataset_root_path)
if 'ir_' in args.dataset_root_path:
args.data_type = 'ir'
args.nc = 1
else:
args.data_type = 'rgb'
args.nc = 3
# setup args
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.output_dir is None:
args.output_dir = os.path.join(os.path.dirname(args.dataset_root_path), os.path.basename(args.dataset_root_path) + '_fake')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
else:
assert False, 'Output directory already exists!'
# validation function
def infer(netG_B2A, im_path):
transforms_ = [ transforms.Resize(args.size, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ]
transforms_ = transforms.Compose(transforms_)
real_im = Image.open(im_path)
data = transforms_(real_im)
if args.cuda:
data = data.cuda()
data = data.unsqueeze(0)
data = data[:, :args.nc, :, :]
# do the forward pass
fake_data = netG_B2A(data)
fake_im = tensor2image(fake_data.detach(), np.array([0.5 for _ in range(args.nc)], dtype='float32'),
np.array([0.5 for _ in range(args.nc)], dtype='float32'))
fake_im = np.transpose(fake_im, (1, 2, 0))
fake_im = fake_im[:, :, ::-1]
out_path = os.path.join(args.output_dir, im_path[len(args.dataset_root_path) + 1:])
cv2.imwrite(out_path, fake_im)
return
if __name__ == '__main__':
# get the model, load pretrained weights, and convert it into cuda for if necessary
netG_B2A = Generator(args.nc, args.nc)
if args.snapshot_dir is not None:
if os.path.exists(os.path.join(args.snapshot_dir, 'netG_B2A.pth')):
netG_B2A.load_state_dict(torch.load(os.path.join(args.snapshot_dir, 'netG_B2A.pth')), strict=False)
if args.cuda:
netG_B2A.cuda()
im_paths = sorted(glob.glob(os.path.join(args.dataset_root_path, '*', '*', '*.jpg')))
for i, im_path in enumerate(im_paths):
(head, tail) = os.path.split(im_path)
os.makedirs(os.path.join(args.output_dir, head[len(args.dataset_root_path) + 1:]), exist_ok=True)
infer(netG_B2A, im_path)
print("Done creating image %d/%d" % (i+1, len(im_paths)))
| StarcoderdataPython |
3510072 | import cv2
import colorsys
import numpy as np
import pandas as pd
### get the images and scale them by 1/10 to get the of a 10x10 area ###
ImgL = cv2.imread('image_1.png')
resizeImgL = cv2.resize(ImgL, (0,0), fx=0.1, fy=0.1)
ImgR = cv2.imread('image_2.png')
resizeImgR = cv2.resize(ImgR, (0,0), fx=0.1, fy=0.1)
ImgS = cv2.imread('disparity_map.png',0)
resizeImgS = cv2.resize(ImgS, (0,0), fx=0.1, fy=0.1)
def hue2rgb(hue, value):
return colorsys.hsv_to_rgb(hue, 1., value)
file = open('test.csv', 'w')
file.write(',x,y,z,r,g,b\n')
i = 1
for y in xrange(0, resizeImgS.shape[0]):
for x in xrange(0, resizeImgS.shape[1]):
b = (resizeImgL[y,x,0])/255.0
g = (resizeImgL[y,x,1])/255.0
r = (resizeImgL[y,x,2])/255.0
z = resizeImgS[y,x]
file.write(str(i) + "," + str(x*10) + "," + str(y*10) + "," + str(z*10) + "," + str((r)) + "," + str((g)) + "," + str((b)) + '\n')
i+=1
file.close()
| StarcoderdataPython |
1603843 | import numpy as np
def _recall_values(labels, x_absolute=False, y_absolute=False):
n_docs = len(labels)
n_pos_docs = sum(labels)
x = np.arange(1, n_docs + 1)
recall = np.cumsum(labels)
if not x_absolute:
x = x / n_docs
if y_absolute:
y = recall
else:
y = recall / n_pos_docs
return x.tolist(), y.tolist()
def _wss_values(labels, x_absolute=False, y_absolute=False):
n_docs = len(labels)
n_pos_docs = sum(labels)
docs_found = np.cumsum(labels)
docs_found_random = np.round(np.linspace(0, n_pos_docs, n_docs))
# Get the first occurrence of 1, 2, 3, ..., n_pos_docs in both arrays.
when_found = np.searchsorted(docs_found, np.arange(1, n_pos_docs + 1))
when_found_random = np.searchsorted(docs_found_random,
np.arange(1, n_pos_docs + 1))
n_found_earlier = when_found_random - when_found
x = np.arange(1, n_pos_docs + 1)
if not x_absolute:
x = x / n_pos_docs
if y_absolute:
y = n_found_earlier
else:
y = n_found_earlier / n_docs
return x.tolist(), y.tolist()
def _erf_values(labels, x_absolute=False, y_absolute=False):
n_docs = len(labels)
n_pos_docs = sum(labels)
docs_found = np.cumsum(labels)
docs_found_random = np.round(np.linspace(0, n_pos_docs, n_docs))
extra_records_found = docs_found - docs_found_random
x = np.arange(1, n_docs + 1)
if not x_absolute:
x = x / n_docs
if y_absolute:
y = extra_records_found
else:
y = extra_records_found / n_pos_docs
return x.tolist(), y.tolist()
| StarcoderdataPython |
8086665 | <filename>model/encoders.py
import torch
import torch.nn as nn
from model.blocks import (BridgeConnection, LayerStack,
PositionwiseFeedForward, ResidualConnection, clone)
from model.multihead_attention import MultiheadedAttention
class EncoderLayer(nn.Module):
def __init__(self, d_model, dout_p, H, d_ff):
super(EncoderLayer, self).__init__()
self.res_layers = clone(ResidualConnection(d_model, dout_p), 2)
self.self_att = MultiheadedAttention(d_model, d_model, d_model, H)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dout_p=0.0)
def forward(self, x, src_mask):
'''
in:
x: (B, S, d_model), src_mask: (B, 1, S)
out:
(B, S, d_model)
'''
# sublayer should be a function which inputs x and outputs transformation
# thus, lambda is used instead of just `self.self_att(x, x, x)` which outputs
# the output of the self attention
sublayer0 = lambda x: self.self_att(x, x, x, src_mask)
sublayer1 = self.feed_forward
x = self.res_layers[0](x, sublayer0)
x = self.res_layers[1](x, sublayer1)
return x
class BiModalEncoderLayer(nn.Module):
def __init__(self, d_model_M1, d_model_M2, d_model, dout_p, H, d_ff_M1, d_ff_M2):
super(BiModalEncoderLayer, self).__init__()
self.self_att_M1 = MultiheadedAttention(d_model_M1, d_model_M1, d_model_M1, H, dout_p, d_model)
self.self_att_M2 = MultiheadedAttention(d_model_M2, d_model_M2, d_model_M2, H, dout_p, d_model)
self.bi_modal_att_M1 = MultiheadedAttention(d_model_M1, d_model_M2, d_model_M2, H, dout_p, d_model)
self.bi_modal_att_M2 = MultiheadedAttention(d_model_M2, d_model_M1, d_model_M1, H, dout_p, d_model)
self.feed_forward_M1 = PositionwiseFeedForward(d_model_M1, d_ff_M1, dout_p)
self.feed_forward_M2 = PositionwiseFeedForward(d_model_M2, d_ff_M2, dout_p)
self.res_layers_M1 = clone(ResidualConnection(d_model_M1, dout_p), 3)
self.res_layers_M2 = clone(ResidualConnection(d_model_M2, dout_p), 3)
def forward(self, x, masks):
'''
Inputs:
x (M1, M2): (B, Sm, Dm)
masks (M1, M2): (B, 1, Sm)
Output:
M1m2 (B, Sm1, Dm1), M2m1 (B, Sm2, Dm2),
'''
M1, M2 = x
M1_mask, M2_mask = masks
# sublayer should be a function which inputs x and outputs transformation
# thus, lambda is used instead of just `self.self_att(x, x, x)` which outputs
# the output of the self attention
def sublayer_self_att_M1(M1): return self.self_att_M1(M1, M1, M1, M1_mask)
def sublayer_self_att_M2(M2): return self.self_att_M2(M2, M2, M2, M2_mask)
def sublayer_att_M1(M1): return self.bi_modal_att_M1(M1, M2, M2, M2_mask)
def sublayer_att_M2(M2): return self.bi_modal_att_M2(M2, M1, M1, M1_mask)
sublayer_ff_M1 = self.feed_forward_M1
sublayer_ff_M2 = self.feed_forward_M2
# 1. Self-Attention
# both (B, Sm*, Dm*)
M1 = self.res_layers_M1[0](M1, sublayer_self_att_M1)
M2 = self.res_layers_M2[0](M2, sublayer_self_att_M2)
# 2. Multimodal Attention (var names: M* is the target modality; m* is the source modality)
# (B, Sm1, Dm1)
M1m2 = self.res_layers_M1[1](M1, sublayer_att_M1)
# (B, Sm2, Dm2)
M2m1 = self.res_layers_M2[1](M2, sublayer_att_M2)
# 3. Feed-forward (var names: M* is the target modality; m* is the source modality)
# (B, Sm1, Dm1)
M1m2 = self.res_layers_M1[2](M1m2, sublayer_ff_M1)
# (B, Sm2, Dm2)
M2m1 = self.res_layers_M2[2](M2m1, sublayer_ff_M2)
return M1m2, M2m1
class Encoder(nn.Module):
def __init__(self, d_model, dout_p, H, d_ff, N):
super(Encoder, self).__init__()
self.enc_layers = clone(EncoderLayer(d_model, dout_p, H, d_ff), N)
def forward(self, x, src_mask):
'''
in:
x: (B, S, d_model) src_mask: (B, 1, S)
out:
# x: (B, S, d_model) which will be used as Q and K in decoder
'''
for layer in self.enc_layers:
x = layer(x, src_mask)
return x
class BiModalEncoder(nn.Module):
def __init__(self, d_model_A, d_model_V, d_model, dout_p, H, d_ff_A, d_ff_V, N):
super(BiModalEncoder, self).__init__()
layer_AV = BiModalEncoderLayer(d_model_A, d_model_V, d_model, dout_p, H, d_ff_A, d_ff_V)
self.encoder_AV = LayerStack(layer_AV, N)
def forward(self, x, masks: dict):
'''
Input:
x (A, V): (B, Sm, D)
masks: {V_mask: (B, 1, Sv); A_mask: (B, 1, Sa)}
Output:
(Av, Va): (B, Sm1, Dm1)
'''
A, V = x
# M1m2 (B, Sm1, D), M2m1 (B, Sm2, D) <-
Av, Va = self.encoder_AV((A, V), (masks['A_mask'], masks['V_mask']))
return (Av, Va)
| StarcoderdataPython |
3201880 | # Stock data visualization dashboard
# Awesome-quent list of packages for fin data: https://github.com/wilsonfreitas/awesome-quant#data-sources
import os
import pandas as pd
import matplotlib.pyplot as plt
import pandas_datareader as web
import finnhub
from dateutil import parser
import datetime as dt
# Dash imports
import plotly.express as px
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_table
# import sklearn
from scipy import optimize
from sentiment import finnhub_api_request
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server # server needed for heroku deploy
# Get free finnhub key from here: https://finnhub.io/dashboard
finnhub_key = os.environ['FINNHUB_KEY']
finnhub_client = finnhub.Client(api_key=finnhub_key)
def test_func(x, dist, amp, omega, phi):
# For a sinusoidal model: https://towardsdatascience.com/fitting-cosine-sine-functions-with-machine-learning-in-python-610605d9b057
return dist + amp * np.cos(omega * x + phi)
def convert_to_unix(date_str):
'''
Converts a ISO 8601 datetime string to a UNIX timestamp (number)
date: datetime obj
returns; unix timestamp in integer forma
'''
unix = int(parser.parse(date_str).timestamp())
return unix
print('Getting stock data...')
default_symbol = 'AAPL'
# UNIX timestamps of default start/end dates
default_start = dt.datetime(2020, 1, 1)
default_end = dt.datetime.now().date().strftime('%Y-%m-%d ')
start_me = int(parser.parse('2020-01-01').timestamp())
end_me = int(parser.parse(default_end).timestamp())
# Default dataframe
ddf = pd.DataFrame(finnhub_client.stock_candles('AAPL', 'D', start_me, end_me))
print('FOO', ddf)
ddf['t'] = [dt.datetime.fromtimestamp(t).strftime('%Y-%m-%d') for t in ddf['t']]
#Resetting DF columns
new_cols = ['Close', 'High', 'Low', 'Open', 'Status', 'Date', 'Volume']
ddf.columns = new_cols
ddf = ddf.drop(['Status'], axis=1)
print('Default Stock data:\n', ddf)
f = px.line(ddf, x=ddf.index, y='Close', title=default_symbol)
#############################################
# Creating app layout
app.layout = html.Div(children=[
html.H1('Welcome to Stonk World.', style={'text-align':'center'}),
# Search field -- user input
html.Div(className='row', children=[
# Symbol lookup
html.Div([
dcc.Input(
id='lookup-stock',
type='text',
placeholder="Search Stock Symbol",
value='AAPL',
debounce=True
)
], className='two columns'),
# Data desired (price, open, close, etc.)
html.Div([
dcc.Dropdown(
id='price-selection',
options=[{'label': x, 'value': x} for x in ddf.columns],
value='Close'
)
], className='two columns'),
# Start & end date selection
html.Div([
dcc.DatePickerRange(
id='date-selector',
min_date_allowed=dt.date(1985,1,1),
max_date_allowed=dt.datetime.now().date(),
start_date=dt.datetime(2020,1,1),
end_date=dt.datetime.now().date()
)
])
]),
# Stock graph - main view of dashboard
html.Div([
dcc.Graph(
id='stock-graph',
figure=f
)
]),
# User can select model
html.Div([
dcc.Checklist(
id='model-selector',
options=[{'label': 'Show trend model?', 'value': 'True'}],
value=[],
labelStyle={'display': 'inline-block'}
)
], className='two columns'),
# Table with ind stock data
html.Div([
dash_table.DataTable(
id='stock-table',
columns=[{"name": i, "id": i} for i in ddf.columns],
# data=df.to_dict('records')
)
])
])
# Updating graph based on user input values
@app.callback(
Output('stock-graph', 'figure'),
Output('stock-table', 'data'),
[Input('lookup-stock', 'value'),
Input('price-selection', 'value'),
Input('date-selector', 'start_date'),
Input('date-selector', 'end_date'),
Input('model-selector', 'value')]
)
def update_stock_data(value, price, start_date, end_date, model):
'''
Lookup new stock prices with user input
Updates both stock graph AND data table below based on user input
Value: str, inputted ticker symbol
start_date, end_date: datetime object, needs to be converted to UNIX timestamp. Start and end-periods of price lookup
'''
# df = web.DataReader(value, 'yahoo', start_date, end_date)
start = convert_to_unix(start_date)
end = convert_to_unix(end_date)
df = pd.DataFrame(finnhub_client.stock_candles(value,'D', start, end))
# Converting timestamp for readability and changing column names
df['t'] = [dt.datetime.fromtimestamp(t).strftime('%Y-%m-%d') for t in df['t']]
df.drop(['s'], axis=1)
df.columns = new_cols
print(f'New stock searched: {value}')
print(df.head())
# Creating figure
f = px.line(df, x=df.index, y=price, title=value)
if model=='True':
print('Generating model...')
return [f, df[['Close', 'High', 'Low', 'Open','Date', 'Volume']].head(n=10).to_dict('records')]
# Run the app
if __name__ == "__main__":
print(dt.datetime.fromtimestamp(15705789).strftime('%Y-%m-%d %H:%M:%S'))
app.run_server(debug=True)
# TODO:
# -Add symbol lookup (Check Finnhub's function: https://finnhub.io/docs/api/symbol-search)
# -Clean up
| StarcoderdataPython |
4811465 | import profiles
from pointSource import PixelizedModel as PM, GaussianModel as GM
from math import pi
def cnts2mag(cnts,zp):
from math import log10
return -2.5*log10(cnts) + zp
_SersicPars = [['amp','n','pa','q','re','x','y'],
['logamp','n','pa','q','re','x','y'],
['amp','n','q','re','theta','x','y'],
['logamp','n','q','re','theta','x','y']]
class Sersic(profiles._Sersic):
def __init__(self,name,var=None,const=None,convolve=0):
if const is None:
const = {}
if var is None:
var = {}
# Check for all keys to be set
keys = var.keys()+const.keys()
keys.sort()
if keys not in _SersicPars:
import sys
print "Not all parameters defined!"
sys.exit()
profiles._Sersic.__init__(self)
self.invar = var
self.keys = keys
self.values = {}
self.vmap = {}
for key in var.keys():
self.values[key] = None
self.vmap[key] = var[key]
for key in const.keys():
self.__setattr__(key,const[key])
self.setPars()
self.name = name
self.convolve = convolve
def __setattr__(self,key,value):
if key=='pa':
self.__dict__['pa'] = value
if value is not None:
self.__dict__['theta'] = value*pi/180.
elif key=='theta':
if value is not None:
self.__dict__['pa'] = value*180./pi
self.__dict__['theta'] = value
elif key=='logamp':
if value is not None:
self.__dict__['amp'] = 10**value
elif key=='scale':
self.__dict__['re'] = value
else:
self.__dict__[key] = value
def setPars(self):
for key in self.vmap:
self.__setattr__(key,self.vmap[key].value)
def getMag(self,amp,zp):
from scipy.special import gamma
from math import exp,pi
n = self.n
re = self.re
k = 2.*n-1./3+4./(405.*n)+46/(25515.*n**2)
cnts = (re**2)*amp*exp(k)*n*(k**(-2*n))*gamma(2*n)*2*pi
return cnts2mag(cnts,zp)
def Mag(self,zp):
return self.getMag(self.amp,zp)
class Gauss(profiles._Gauss):
def __init__(self,name,var=None,const=None,convolve=0):
if const is None:
const = {}
if var is None:
var = {}
# Check for all keys to be set
keys = var.keys()+const.keys()
keys.sort()
if 'r0' not in keys:
keys.append('r0')
keys.sort()
if keys!=['amp','pa','q','r0','sigma','x','y']:
import sys
print "Not all parameters defined!"
sys.exit()
profiles._Gauss.__init__(self)
self.invar = var
self.keys = keys
self.values = {'r0':None}
self.vmap = {}
for key in var.keys():
self.values[key] = None
self.vmap[key] = var[key]
for key in const.keys():
self.__setattr__(key,const[key])
self.setPars()
self.name = name
self.convolve = convolve
def __setattr__(self,key,value):
if key=='pa':
self.__dict__['pa'] = value
if value is not None:
self.__dict__['theta'] = value*pi/180.
elif key=='theta':
if value is not None:
self.__dict__['pa'] = value*180./pi
self.__dict__['theta'] = value
elif key=='logamp':
if value is not None:
self.__dict__['amp'] = 10**value
elif key=='scale':
self.__dict__['sigma'] = value
else:
self.__dict__[key] = value
def setPars(self):
for key in self.vmap:
self.__setattr__(key,self.vmap[key].value)
def getMag(self,amp,zp):
from math import exp,pi
if self.r0 is None:
cnts = amp/(2*pi*self.sigma**2)
else:
from scipy.special import erf
r0 = self.r0
s = self.sigma
r2pi = (2*pi)**0.5
cnts = amp*pi*s*(r2pi*r0*(1.+erf(r0/(s*2**0.5)))+2*s*exp(-0.5*r0**2/s**2))
return cnts2mag(cnts,zp)
def Mag(self,zp):
return self.getMag(self.amp,zp)
class PointSource(GM,PM):
def __init__(self,name,model,var=None,const=None):
if const is None:
const = {}
if var is None:
var = {}
keys = var.keys()+const.keys()
keys.sort()
if keys!=['amp','x','y']:
print "Not all parameters defined!",keys
df
self.keys = keys
self.values = {}
self.vmap = {}
self.ispix = False
for key in var.keys():
self.values[key] = None
self.vmap[var[key]] = key
for key in const.keys():
self.values[key] = const[key]
if type(model)==type([]):
GM.__init__(self,model)
else:
PM.__init__(self,model)
self.ispix = True
self.setValues()
self.name = name
self.convolve = None
def __setattr__(self,key,value):
if key=='logamp':
if value is not None:
self.__dict__['amp'] = 10**value
else:
self.__dict__[key] = value
def pixeval(self,xc,yc,dummy1=None,dummy2=None,**kwargs):
if self.ispix==True:
return PM.pixeval(self,xc,yc)
else:
return GM.pixeval(self,xc,yc)
def setValues(self):
self.x = self.values['x']
self.y = self.values['y']
if 'amp' in self.keys:
self.amp = self.values['amp']
elif self.values['logamp'] is not None:
self.amp = 10**self.values['logamp']
def getMag(self,amp,zp):
return cnts2mag(amp,zp)
def Mag(self,zp):
return self.getMag(self.amp,zp)
def depricated_setPars(self,pars):
for key in self.vmap:
self.values[self.vmap[key]] = pars[key]
self.setValues()
def setPars(self):
if len(self.vmap)>0:
for key in self.vmap:
self.__setattr__(key,self.vmap[key].value)
| StarcoderdataPython |
8015242 | # -*- coding: utf-8 -*-
#
# python_common/mysql_utf8.py
#
# Apr/21/2010
#
# --------------------------------------------------------
def mysql_utf8_proc (cursor):
sql_str="SET NAMES utf8"
cursor.execute (sql_str)
#
# --------------------------------------------------------
| StarcoderdataPython |
3238598 | #-*-coding-utf-8-*-
import logging
from datetime import datetime
from rest_framework import generics
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.auth.models import check_password
from biz.account.settings import QUOTA_ITEM, NotificationLevel
from biz.account.models import (Contract, Operation, Quota,
UserProxy, Notification, Feed, UserProfile)
from biz.account.serializer import (ContractSerializer, OperationSerializer,
UserSerializer, QuotaSerializer,
FeedSerializer, DetailedUserSerializer,
NotificationSerializer)
from biz.account.utils import get_quota_usage
from biz.idc.models import DataCenter, UserDataCenter
from biz.common.pagination import PagePagination
from biz.common.decorators import require_POST, require_GET
from biz.common.utils import retrieve_params
from biz.workflow.models import Step
from cloud.api import neutron
from cloud.cloud_utils import create_rc_by_dc
from cloud.tasks import (link_user_to_dc_task, send_notifications,
send_notifications_by_data_center)
from frontend.forms import CloudUserCreateFormWithoutCapatcha
LOG = logging.getLogger(__name__)
@api_view(["GET"])
def contract_view(request):
c = Contract.objects.filter(user=request.user,
udc__id=request.session["UDC_ID"])[0]
s = ContractSerializer(c)
return Response(s.data)
@api_view(["GET"])
def quota_view(request):
quota = get_quota_usage(request.user, request.session["UDC_ID"])
return Response(quota)
class OperationList(generics.ListAPIView):
queryset = Operation.objects
serializer_class = OperationSerializer
pagination_class = PagePagination
def get_queryset(self):
request = self.request
resource = request.query_params.get('resource')
resource_name = request.query_params.get('resource_name')
start_date = request.query_params.get('start_date')
end_date = request.query_params.get('end_date')
queryset = super(OperationList, self).get_queryset()
if resource:
queryset = queryset.filter(resource=resource)
if resource_name:
queryset = queryset.filter(resource_name__istartswith=resource_name)
if start_date:
queryset = queryset.filter(create_date__gte=start_date)
if end_date:
queryset = queryset.filter(create_date__lte=end_date)
if request.user.is_superuser:
data_center_pk = request.query_params.get('data_center', '')
operator_pk = request.query_params.get('operator', '')
if data_center_pk:
queryset = queryset.filter(udc__data_center__pk=data_center_pk)
if operator_pk:
queryset = queryset.filter(user__pk=operator_pk)
else:
queryset = queryset.filter(user=request.user,
udc__id=request.session["UDC_ID"])
return queryset.order_by('-create_date')
@api_view()
def operation_filters(request):
resources = Operation.objects.values('resource').distinct()
for data in resources:
data['name'] = _(data['resource'])
return Response({
"resources": resources,
"operators": UserProxy.normal_users.values('pk', 'username'),
"data_centers": DataCenter.objects.values('pk', 'name')
})
class ContractList(generics.ListCreateAPIView):
queryset = Contract.living.filter(deleted=False)
serializer_class = ContractSerializer
pagination_class = PagePagination
class ContractDetail(generics.RetrieveAPIView):
queryset = Contract.living.all()
serializer_class = ContractSerializer
@api_view(['POST'])
def create_contract(request):
try:
serializer = ContractSerializer(data=request.data,
context={"request": request})
if serializer.is_valid():
contract = serializer.save()
Operation.log(contract, contract.name, 'create', udc=contract.udc,
user=request.user)
return Response({'success': True,
"msg": _('Contract is created successfully!')},
status=status.HTTP_201_CREATED)
else:
return Response({"success": False,
"msg": _('Contract data is not valid!'),
'errors': serializer.errors},
status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
LOG.error("Failed to create contract, msg:[%s]" % e)
return Response({"success": False, "msg": _(
'Failed to create contract for unknown reason.')})
@api_view(['POST'])
def update_contract(request):
try:
pk = request.data['id']
contract = Contract.objects.get(pk=pk)
contract.name = request.data['name']
contract.customer = request.data['customer']
contract.start_date = datetime.strptime(request.data['start_date'],
'%Y-%m-%d %H:%M:%S')
contract.end_date = datetime.strptime(request.data['end_date'],
'%Y-%m-%d %H:%M:%S')
contract.save()
Operation.log(contract, contract.name, 'update', udc=contract.udc,
user=request.user)
return Response(
{'success': True, "msg": _('Contract is updated successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to update contract, msg:[%s]" % e)
return Response({"success": False, "msg": _(
'Failed to update contract for unknown reason.')})
@api_view(['POST'])
def delete_contracts(request):
try:
contract_ids = request.data.getlist('contract_ids[]')
for contract_id in contract_ids:
contract = Contract.objects.get(pk=contract_id)
contract.deleted = True
contract.save()
Quota.living.filter(contract__pk=contract_id).update(deleted=True,
update_date=timezone.now())
Operation.log(contract, contract.name, 'delete', udc=contract.udc,
user=request.user)
return Response(
{'success': True, "msg": _('Contracts have been deleted!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to delete contracts, msg:[%s]" % e)
return Response({"success": False, "msg": _(
'Failed to delete contracts for unknown reason.')})
class UserList(generics.ListAPIView):
queryset = UserProxy.normal_users.all()
serializer_class = UserSerializer
pagination_class = PagePagination
@require_GET
def active_users(request):
queryset = UserProxy.normal_users.filter(is_active=True)
serializer = UserSerializer(queryset.all(), many=True)
return Response(serializer.data)
@require_GET
def workflow_approvers(request):
queryset = UserProxy.normal_users.filter(
is_active=True, user_permissions__codename='approve_workflow')
serializer = UserSerializer(queryset.all(), many=True)
return Response(serializer.data)
class UserDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = UserProxy.normal_users.all()
serializer_class = DetailedUserSerializer
def perform_destroy(self, instance):
instance.is_active = False
instance.save()
@api_view(['POST'])
def deactivate_user(request):
pk = request.data['id']
user = User.objects.get(pk=pk)
user.is_active = False
user.save()
return Response({"success": True, "msg": _('User has been deactivated!')},
status=status.HTTP_200_OK)
@api_view(['POST'])
def activate_user(request):
pk = request.data['id']
user = User.objects.get(pk=pk)
user.is_active = True
user.save()
return Response({"success": True, "msg": _('User has been activated!')},
status=status.HTTP_200_OK)
@api_view(["POST"])
def change_password(request):
user = request.user
old_password = request.data['old_password']
new_password = request.data['new_password']
confirm_password = request.data['confirm_password']
if new_password != confirm_password:
return Response({"success": False, "msg": _(
"The new password doesn't match confirm password!")})
if not check_password(old_password, user.password):
return Response({"success": False,
"msg": _("The original password is not correct!")})
user.set_password(<PASSWORD>)
user.save()
return Response({"success": True, "msg": _(
"Password has been changed! Please login in again.")})
class QuotaList(generics.ListAPIView):
queryset = Quota.living
serializer_class = QuotaSerializer
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
if 'contract_id' in request.query_params:
queryset = queryset.filter(
contract__id=request.query_params['contract_id'])
return Response(self.serializer_class(queryset, many=True).data)
class QuotaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Quota.living
serializer_class = QuotaSerializer
@api_view(['GET'])
def resource_options(request):
return Response(QUOTA_ITEM)
@api_view(['POST'])
def create_quotas(request):
try:
contract = Contract.objects.get(pk=request.data['contract_id'])
quota_ids = request.data.getlist('ids[]')
resources = request.data.getlist('resources[]')
limits = request.data.getlist('limits[]')
for index, quota_id in enumerate(quota_ids):
resource, limit = resources[index], limits[index]
if quota_id and Quota.living.filter(contract=contract,
pk=quota_id).exists():
Quota.objects.filter(pk=quota_id).update(resource=resource,
limit=limit,
update_date=timezone.now())
else:
Quota.objects.create(resource=resource, limit=limit,
contract=contract)
Operation.log(contract, contract.name + " quota", 'update',
udc=contract.udc, user=request.user)
return Response({'success': True,
"msg": _('Quotas have been saved successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to save quotas, msg:[%s]" % e)
return Response({"success": False,
"msg": _('Failed to save quotas for unknown reason.')})
@api_view(['POST'])
def create_quota(request):
try:
contract = Contract.objects.get(pk=request.data['contract'])
resource, limit = request.data['resource'], request.data['limit']
pk = request.data['id'] if 'id' in request.data else None
if pk and Quota.objects.filter(pk=pk).exists():
quota = Quota.objects.get(pk=pk)
quota.limit = limit
quota.save()
else:
quota = Quota.objects.create(resource=resource,
limit=limit,
contract=contract)
return Response({'success': True,
"msg": _('Quota have been saved successfully!'),
"quota": QuotaSerializer(quota).data},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to save quota, msg:[%s]" % e)
return Response({"success": False,
"msg": _('Failed to save quota for unknown reason.')})
@api_view(['POST'])
def delete_quota(request):
try:
Quota.living.filter(pk=request.data['id']).update(deleted=True)
return Response({'success': True,
"msg": _('Quota have been deleted successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to create quota, msg:[%s]" % e)
return Response(
{"success": False,
"msg": _('Failed to create quota for unknown reason.')}
)
@require_GET
def notification_options(request):
return Response(NotificationLevel.OPTIONS)
@require_POST
def broadcast(request):
receiver_ids = request.data.getlist('receiver_ids[]')
level, title, content = retrieve_params(request.data,
'level', 'title', 'content')
send_notifications.delay(title, content, level, receiver_ids)
return Response({"success": True,
"msg": _('Notification is sent successfully!')})
@require_POST
def data_center_broadcast(request):
level, title, content = retrieve_params(
request.data, 'level', 'title', 'content')
dc_ids = request.data.getlist('data_centers[]')
send_notifications_by_data_center.delay(title, content, level, dc_ids)
return Response({"success": True,
"msg": _('Notification is sent successfully!')})
@require_POST
def announce(request):
level, title, content = retrieve_params(request.data, 'level', 'title',
'content')
Notification.objects.create(title=title, content=content,
level=level, is_announcement=True)
return Response({"success": True,
"msg": _('Announcement is sent successfully!')})
class NotificationList(generics.ListAPIView):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
def list(self, request, *args, **kwargs):
queryset = self.get_queryset().filter(is_auto=False).order_by(
'-create_date')
return Response(self.serializer_class(queryset, many=True).data)
class NotificationDetail(generics.RetrieveDestroyAPIView):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
class FeedList(generics.ListAPIView):
queryset = Feed.living.all()
serializer_class = FeedSerializer
def list(self, request, *args, **kwargs):
queryset = self.get_queryset().filter(receiver=request.user).order_by(
'-create_date')
return Response(self.serializer_class(queryset, many=True).data)
class FeedDetail(generics.RetrieveDestroyAPIView):
queryset = Feed.living.all()
serializer_class = FeedSerializer
def perform_destroy(self, instance):
instance.fake_delete()
@require_GET
def feed_status(request):
Notification.pull_announcements(request.user)
num = Feed.living.filter(receiver=request.user, is_read=False).count()
return Response({"num": num})
@require_POST
def mark_read(request, pk):
Feed.living.get(pk=pk).mark_read()
return Response(status=status.HTTP_200_OK)
@require_POST
def initialize_user(request):
user_id = request.data['user_id']
user = User.objects.get(pk=user_id)
link_user_to_dc_task(user, DataCenter.get_default())
return Response({"success": True,
"msg": _("Initialization is successful.")})
@require_POST
def create_user(request):
user = User()
form = CloudUserCreateFormWithoutCapatcha(data=request.POST, instance=user)
if not form.is_valid():
return Response({"success": False, "msg": _("Data is not valid")})
user = form.save()
# If workflow is disabled, then only resrouce user can be created,
# otherwise admin can create resource user and workflow approver user.
if not settings.WORKFLOW_ENABLED:
link_user_to_dc_task(user, DataCenter.get_default())
else:
if 'is_resource_user' in request.data and \
request.data['is_resource_user'] == 'true':
link_user_to_dc_task(user, DataCenter.get_default())
if 'is_approver' in request.data and \
request.data['is_approver'] == 'true':
UserProxy.grant_workflow_approve(user)
return Response({"success": True,
"msg": _("User is created successfully!")})
@require_POST
def grant_workflow_approve(request):
user_id = request.data['user_id']
user = UserProxy.objects.get(pk=user_id)
UserProxy.grant_workflow_approve(user)
msg = _("User %(name)s can approve workflow now!")
return Response({"success": True,
"msg": msg % {'name': user.username}})
@require_POST
def revoke_workflow_approve(request):
user_id = request.data['user_id']
user = UserProxy.objects.get(pk=user_id)
if Step.objects.filter(approver=user).exists():
msg = _("Cannot revoke workflow approve permission from %(name)s, "
"this user is now an approver of some workflows.")
return Response({"success": False,
"msg": msg % {'name': user.username}})
UserProxy.revoke_workflow_approve(user)
msg = _("User %(name)s cannot approve workflow any more!")
return Response({"success": True,
"msg": msg % {'name': user.username}})
@require_GET
def is_username_unique(request):
username = request.GET['username']
return Response(not UserProxy.objects.filter(username=username).exists())
@require_GET
def is_email_unique(request):
email = request.GET['email']
return Response(not UserProxy.objects.filter(email=email).exists())
@require_GET
def is_mobile_unique(request):
mobile = request.GET['mobile']
return Response(not UserProfile.objects.filter(mobile=mobile).exists())
| StarcoderdataPython |
375343 | '''
Created 01.10.2020
@author: ED
'''
from abc import ABC
class commutation_selection_ap_feature(ABC):
def __init__(self, parent):
self._motorInterface = parent
def setMode(self, mode):
self._motorInterface.setAxisParameter(self._motorInterface.AP.CommutationMode, mode)
def mode(self):
return self._motorInterface.axisParameter(self._motorInterface.AP.CommutationMode)
def showConfiguration(self):
print("Commutation selection:")
print("\tMode: " + str(self.mode()))
| StarcoderdataPython |
5052299 | <reponame>sedders123/zoloto<gh_stars>0
from pathlib import Path
from cv2 import VideoCapture, imread
from .base import BaseCamera
class ImageFileCamera(BaseCamera):
def __init__(self, image_path: Path, **kwargs):
self.image_path = image_path
super().__init__(**kwargs)
def capture_frame(self):
return imread(str(self.image_path))
class VideoFileCamera(BaseCamera):
def __init__(self, video_path: Path, **kwargs):
super().__init__(**kwargs)
self.video_capture = self.get_video_capture(video_path)
def get_video_capture(self, video_path: Path):
return VideoCapture(str(video_path))
def capture_frame(self):
_, frame = self.video_capture.read()
return frame
def close(self):
super().close()
self.video_capture.release()
| StarcoderdataPython |
5011517 | import pytest
import eth_account # https://github.com/ethereum/eth-account
from brownie.network.account import Accounts # https://github.com/eth-brownie
ADDRESS_LENGTH = 42
ADDRESS_PREFIX = '0x'
DEFAULT_PATH = "m/44'/60'/0'/0/0"
MNEMONIC_NUM_WORDS = 12
ACCOUNT_MNEMONIC = 'candy maple cake sugar pudding cream honey rich smooth crumble sweet treat'
ACCOUNT_ADDRESS = '0x627306090abaB3A6e1400e9345bC60c78a8BEf57'
ACCOUNT_PRIVATE_KEY = '0xc87509a1c067bbde78beb793e6fa76530b6382a4c0241e5e4a9ec0a0f44dc0d3'
KEYSTORE_DIR = 'keystores'
KEYSTORE_FILE = 'keystore.json'
KEYSTORE_FILE2 = 'keystore2.json'
KEYSTORE_PASSWORD = '<PASSWORD>'
@pytest.fixture
def enable_hd_wallet_features():
eth_account.Account.enable_unaudited_hdwallet_features()
@pytest.fixture
def accounts():
return Accounts()
@pytest.fixture
def candy_maple_account(accounts):
return accounts.from_mnemonic(
mnemonic=ACCOUNT_MNEMONIC,
count=1,
offset=0,
passphrase='')
@pytest.fixture(scope="session")
def keystore_file(tmpdir_factory):
return tmpdir_factory.mktemp(KEYSTORE_DIR).join(KEYSTORE_FILE)
@pytest.fixture(scope="session")
def keystore_file2(tmpdir_factory):
return tmpdir_factory.mktemp(KEYSTORE_DIR).join(KEYSTORE_FILE2)
def test_add(accounts: Accounts):
print('add account [brownie] with new random mnemonic')
account = accounts.add()
assert account.address
assert ADDRESS_LENGTH == len(account.address)
assert '0x' == account.address[:2]
print('account address: {}'.format(account.address))
assert account.private_key
print('account private key: {}'.format(account.private_key))
def test_from_mnemonic(candy_maple_account):
print('create account [brownie] with provided mnemonic: {}'.format(ACCOUNT_MNEMONIC))
assert candy_maple_account
assert ACCOUNT_ADDRESS == candy_maple_account.address
assert ACCOUNT_PRIVATE_KEY == candy_maple_account.private_key
def test_save_and_load(accounts, candy_maple_account, keystore_file):
print('save account into keystore file {}'.format(str(keystore_file)))
candy_maple_account.save(keystore_file, password=KEYSTORE_PASSWORD)
print('load account from keystore')
account = accounts.load(keystore_file, password=KEYSTORE_PASSWORD)
assert account
assert ACCOUNT_ADDRESS == account.address
assert ACCOUNT_PRIVATE_KEY == account.private_key
def test_save_and_load_with_bad_password(accounts, candy_maple_account, keystore_file2):
print('save account into keystore file {}'.format(str(keystore_file2)))
candy_maple_account.save(keystore_file2, password=KEYSTORE_PASSWORD)
bad_password = KEYSTORE_PASSWORD + ' :-('
print('load account from keystore with bad password:"{}"'.format(bad_password))
with pytest.raises(ValueError, match='MAC mismatch'):
account = accounts.load(keystore_file2, password=bad_password)
| StarcoderdataPython |
5143126 |
'''
请定义一个队列并实现函数max得到队列里的最大值,要求函数max、push_back和pop_front 的时间复杂度都是0(1)。
'''
import queue
class MaxQueue:
def __init__(self):
self.deque = queue.deque()
def max_value(self):
return max(self.deque) if self.deque else -1
def push_back(self, value):
self.deque.append(value)
def pop_front(self):
return self.deque.popleft() if self.deque else -1
class MaxQueue1(object):
def __init__(self):
self.deque=queue.deque()
def max_value(self):
return max(self.deque) if self.deque else -1
def push_back(self, value):
self.deque.append(value)
def pop_front(self):
return self.deque.popleft() if self.deque else -1
| StarcoderdataPython |
1717256 | <reponame>dibondar/PyPhotonicReagents
"""
Calibrate shaper
"""
# Add main directory to enable imports
if __name__ == '__main__' :
import os
os.sys.path.append(os.path.abspath('..'))
import wx, h5py, time
import numpy as np
from scipy.interpolate import pchip_interpolate, PchipInterpolator
from scipy.optimize import curve_fit
from scipy.ndimage.filters import gaussian_filter
from scipy.signal import argrelmin
# Real time plotting
import visvis
# GUI components
from libs.gui.basic_window import BasicWindow, SaveSettings
from libs.gui.hardware_control import HardwareGUIControl
# Hardware
from libs.dev.spectrometer_ocean_optics import ManagerOceanOpticsSpectrometer as ManagerSpectrometer
from libs.dev.spectrometer_ocean_optics import OceanOpticsSpectrometerTab as SpectrometerTab
#from libs.dev.camera_istar import ManagerIStarCamera as ManagerSpectrometer
#from libs.dev.camera_istar import IStarCameraTab as SpectrometerTab
from libs.dev.pulse_shaper import *
########################################################################
#
# The following are calibration curves provided by the manufacturer.
# They will be used as initial guesses.
#
########################################################################
voltages_trial = np.array([600, 625, 650, 675, 700, 725, 750, 775, 800, 825, 850, 875, 900, 925, 950,
975, 1000, 1050, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000,
2250, 2500, 2750, 3000, 3250, 3500, 3750, 4095])
# ~633nm (provided by CRi)
vis_modulation = np.array([1882, 1842.8, 1789.9, 1730.6, 1660.9, 1595.2, 1543.6, 1484.2, 1414.1, 1357.2, 1300.1,
1236.5, 1184, 1138.2, 1090, 1042.7, 991.37, 928.2, 850.1, 728.15, 633, 563.15, 507.28,
461.38, 422.72, 389.57, 360.27, 337.86, 297, 265.25, 236.99, 214.65, 196.24, 180.61, 167.26, 151.62])
# Converting wavelength modulation into phase modulation
vis_modulation *= (2*np.pi / 633.)
# ~800nm (provided by CRi)
nir_modulation = np.array([2907.5, 2858.1, 2742.7, 2651.4, 2545.5, 2456, 2357.1, 2261.1, 2168.6, 2074.5,
1989.1, 1899, 1818.2, 1738.4, 1665.2, 1582.5, 1530.8, 1408.2, 1301.5, 1116.1, 971.62, 868.72, 781.03, 711.87,
648.03, 602.2, 564.07, 526.33, 454.12, 401.6, 358.47, 322.02, 299.93, 274.91, 254.31, 230.19])
# Converting wavelength modulation into phase modulation
nir_modulation *= (2*np.pi / 800. )
########################################################################
class CalibrateShaperTab (HardwareGUIControl) :
"""
Settings for shaper calibration
"""
def __init__(self, parent) :
HardwareGUIControl.__init__(self, parent)
sizer = wx.BoxSizer(wx.VERTICAL)
# Starting pixel to calibrate
sizer.Add (wx.StaticText(self, label="Initial pixel"), flag=wx.LEFT, border=5)
initial_pixel_ctr = wx.SpinCtrl (self, value="0", min=0, max=640)
initial_pixel_ctr.SetLabel("initial pixel")
sizer.Add (initial_pixel_ctr, flag=wx.EXPAND, border=5)
# Final pixel to calibrate
sizer.Add (wx.StaticText(self, label="Final pixel"), flag=wx.LEFT, border=5)
final_pixel_ctr = wx.SpinCtrl (self, value="640", min=0, max=640)
final_pixel_ctr.SetLabel("final pixel")
sizer.Add (final_pixel_ctr, flag=wx.EXPAND, border=5)
# Size of pixel bundle
sizer.Add (wx.StaticText(self, label="\nPixels to bundle"), flag=wx.EXPAND, border=5)
self.pixel_bundle_width = wx.SpinCtrl(self, value="1", min=1, max=640)
self.pixel_bundle_width.SetLabel ("pixel bundle width")
sizer.Add (self.pixel_bundle_width, flag=wx.EXPAND, border=5)
# Correspondence between wavelength and pulse shaper pixel number
# entered as dictionary without the "{}"
sizer.Add (wx.StaticText(self,
label="\nPixel to wavelength entered as \n<pixel1> : <lambda1>,\n<pixel2> : <lambda2>, etc.")
, flag=wx.EXPAND, border=5)
pixel2lambda_ctrl = wx.TextCtrl (self, value="", style=wx.TE_MULTILINE|wx.EXPAND)
pixel2lambda_ctrl.__label__ = "pixel_to_lamba"
sizer.Add (pixel2lambda_ctrl, flag=wx.EXPAND, border=5)
# Initial voltage
sizer.Add (wx.StaticText(self, label="\nInitial voltage"), flag=wx.LEFT, border=5)
initial_voltage_ctr = wx.SpinCtrl (self, value="500", min=0, max=PULSESHAPER_MAX_VAL)
initial_voltage_ctr.SetLabel("initial voltage")
sizer.Add (initial_voltage_ctr, flag=wx.EXPAND, border=5)
# Final voltage
sizer.Add (wx.StaticText(self, label="Final voltage"), flag=wx.LEFT, border=5)
final_voltage_ctr = wx.SpinCtrl (self, value="800", min=0, max=PULSESHAPER_MAX_VAL)
final_voltage_ctr.SetLabel("final voltage")
sizer.Add (final_voltage_ctr, flag=wx.EXPAND, border=5)
# Voltage step size
sizer.Add (wx.StaticText(self, label="Voltage scanning step"), flag=wx.LEFT, border=5)
voltage_step_ctr = wx.SpinCtrl (self, value="10", min=0, max=PULSESHAPER_MAX_VAL)
voltage_step_ctr.SetLabel("voltage step")
sizer.Add (voltage_step_ctr, flag=wx.EXPAND, border=5)
self.SetSizer(sizer)
############### GUI is created, now generate settings ######################
self.CreateSettingsDict()
########################################################################
class SettingsNotebook (wx.Notebook) :
"""
GUI for listing all settings
"""
def __init__(self, parent, DevSpectrometer, DevPulseShaper):
wx.Notebook.__init__(self, parent)
self.CalibrateShaper = CalibrateShaperTab(self)
self.AddPage(self.CalibrateShaper, "Calibrate shaper")
self.Spectrometer = SpectrometerTab(self, DevSpectrometer)
self.AddPage (self.Spectrometer, "OO Spectrometer settings")
self.PulseShaper = PulseShaperTab(self, DevPulseShaper)
self.AddPage (self.PulseShaper, "Pulse shaper settings")
# Dictionary to bind names to tabs for saving and loading settings
self.settings_to_tabs = {"Spectrometer" : self.Spectrometer,
"PulseShaper" : self.PulseShaper, "CalibrateShaper" : self.CalibrateShaper }
########################################################################
class CalibrateShaper (BasicWindow) :
def __init__ (self, parent) :
# Starting spectrometer
self.Spectrometer = ManagerSpectrometer()
self.SpectrometerProc = self.Spectrometer.start()
# Starting pulse shaper
self.PulseShaper = ManagerShaper()
self.PulseShaperProc = self.PulseShaper.start()
# Create GUI
dw, dh = wx.DisplaySize()
wx.Frame.__init__ (self, parent, title="Pulse shaper calibration with Ocean Optics Spectrometer",
size=(0.9*dw, 0.88*dh) )
self.ConstructGUI ()
self.Center()
self.Show ()
wx.EVT_CLOSE (self, self.on_close)
def __del__ (self) :
# Close spectrometer
self.Spectrometer.exit(); self.SpectrometerProc.join()
# Close pulse shaper
self.PulseShaper.exit(); self.PulseShaperProc.join()
def ConstructGUI (self) :
""" Build GUI """
self.panel = wx.Panel(self)
sizer = wx.GridBagSizer ()
############################ Settings Notebook ############################
self.SettingsNotebook = SettingsNotebook(self.panel, self.Spectrometer, self.PulseShaper)
sizer.Add(self.SettingsNotebook, pos=(0, 0), span=(1, 1), flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=10)
############################ Command panel ############################
boxsizer = wx.BoxSizer (wx.VERTICAL)
# Interactively display spectrum
boxsizer.Add (self.CreateShowSpectrumButton(), flag=wx.EXPAND, border=5)
# Vary one pixel button
boxsizer.Add (wx.StaticText(self.panel, label="\nPixel to vary"), flag=wx.EXPAND, border=5)
self.pixel_to_vary = wx.SpinCtrl(self.panel, value="320", min=0, max=640)
boxsizer.Add (self.pixel_to_vary, flag=wx.EXPAND, border=5)
self.vary_pixel_bundle_button = wx.Button (self.panel)
self.vary_pixel_bundle_button.__start_label__ = "Vary pixel bundle"
self.vary_pixel_bundle_button.__stop_label__ = "STOP varying"
self.vary_pixel_bundle_button.SetLabel (self.vary_pixel_bundle_button.__start_label__)
self.Bind (wx.EVT_BUTTON, self.VaryPixelBundle, self.vary_pixel_bundle_button)
boxsizer.Add (self.vary_pixel_bundle_button, flag=wx.EXPAND, border=5)
# Separator
boxsizer.Add (wx.StaticText(self.panel), flag=wx.EXPAND, border=5)
################## Calibrate button ##################
self.calibrate_button = wx.Button (self.panel)
self.calibrate_button.Bind (wx.EVT_LEFT_DOWN, self.PerformCalibration)
self.calibrate_button.Bind (wx.EVT_LEFT_DCLICK, self.PerformCalibration)
boxsizer.Add(self.calibrate_button, flag=wx.EXPAND, border=5)
# Define labels
self.calibrate_button.__start_label__ = "Calibrate pulse shaper"
self.calibrate_button.__pause_label__ = "PAUSE calibration"
self.calibrate_button.__resume_label__ = "RESUME calibration"
self.calibrate_button.__stop_label__ = "STOP calibration"
self.calibrate_button.SetLabel (self.calibrate_button.__start_label__)
# Extract phases functions
self.extract_phase_function = wx.Button (self.panel, label="Extract calibration phase functions")
self.Bind (wx.EVT_BUTTON, self.ExtractPhaseFunc, self.extract_phase_function)
boxsizer.Add (self.extract_phase_function, flag=wx.EXPAND, border=5)
# Separator
boxsizer.Add (wx.StaticText(self.panel), flag=wx.EXPAND, border=5)
# Send random phase to the pulse shaper
boxsizer.Add (self.CreateRandomPhaseButton(), flag=wx.EXPAND, border=5)
# Send random amplitude to the pulse shaper
boxsizer.Add (self.CreateRandomAmplitudeButton(), flag=wx.EXPAND, border=5)
# Send zero amplitude and zero phase to the pulse shaper
boxsizer.Add (self.CreateZeroAmplitudeButton(), flag=wx.EXPAND, border=5)
# Open pulse shaper equalizer
boxsizer.Add (self.CreatePulseShaperEqualizerButton(), flag=wx.EXPAND, border=5)
# Separator
boxsizer.Add (wx.StaticText(self.panel), flag=wx.EXPAND, border=5)
# Save settings
boxsizer.Add( self.CreateSaveSettingsButton(), flag=wx.EXPAND, border=5)
# Load settings
boxsizer.Add( self.CreateLoadSettingsButton(), flag=wx.EXPAND, border=5)
sizer.Add(boxsizer, pos=(1, 0), span=(1, 1), flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT|wx.GROW, border=10)
########################### End of constructing panel ######################################
self.panel.SetSizer (sizer)
############################# Setting visvis #######################################
Figure = app.GetFigureClass()
self.fig = Figure(self)
boxsizer = wx.BoxSizer (wx.HORIZONTAL)
boxsizer.Add(self.panel, 0.5, wx.EXPAND)
boxsizer.Add(self.fig._widget, 2, wx.EXPAND)
#########################################################################################
self.SetSizer (boxsizer)
self.SetAutoLayout(True)
self.Layout()
def ScanVoltage (self) :
"""
Using the iterator <self.scan_pixel_voltage_pair> record the spectral response
by applying the voltages
"""
# Pause calibration, if user requested
try :
if self.pause_calibration : return
except AttributeError : return
try :
param = self.scan_pixel_voltage_pair.next()
self.PulseShaper.SetUniformMasks(*param)
# Getting spectrum
spectrum = self.Spectrometer.AcquiredData()
# Save the spectrum
try : self.SpectraGroup["voltages_%d_%d" % param] = spectrum
except RuntimeError : print "There was RuntimeError while saving scan voltages_%d_%d" % param
# Plot the spectra
visvis.gca().Clear()
visvis.plot (self.wavelengths, spectrum)
visvis.xlabel("wavelength (nm)")
visvis.ylabel("counts")
# Scanning progress info
self.scanned += 1.
percentage_completed = 100.*self.scanned/self.scan_length
seconds_left = ( time.clock() - self.initial_time )*(100./percentage_completed - 1.)
# convert to hours:min:sec
m, s = divmod(seconds_left, 60)
h, m = divmod(m, 60)
title_info = param + (percentage_completed, h, m, s)
visvis.title ("Scanning spectrum by applying voltages %d/%d. Progress: %.1f%% completed. Time left: %d:%02d:%02d." % title_info)
self.fig.DrawNow()
# Measure the next pair
wx.CallAfter(self.ScanVoltage)
except StopIteration :
# Perform processing of the measured data
wx.CallAfter(self.ExtractPhaseFunc, filename=self.calibration_file.filename)
# All voltages are scanned
self.StopAllJobs()
# Sop using the shaper
self.PulseShaper.StopDevice()
def FitSpectralScans (self, scans, voltages, pixels_edges) :
"""
Perform fitting to the pulse shaper's mask transmission coefficient
"""
def FitIndividualPixel (voltages, pixel, modulation, p0=None) :
"""
Find fit for individual pixel with initial guess for phase function given by `modulation`.
`voltages` voltage values for which `pixel` was measured
`pixel` measured transmission (to be fitted)
`p0` is the initial guess for fitting parametres
"""
def GetVM (V0, V1, m0, m1) :
"""
Return voltage and phase modulation from parameters
"""
M = m0 + m1*modulation
V = np.linspace(V0, V1, M.size)
return V, M
def FittedTransmission (voltages, offset, amplitude, *params) :
"""
Return the transmission function for a shaper
"""
V, M = GetVM(*params)
return amplitude*np.cos( pchip_interpolate(V,M,voltages) )**2 + offset
# Set fitting parameters to their default values
if p0 is None : p0 = [0., 1., voltages.min(), voltages.max(), 0., 1.]
# Fitting the transmission
try : popt, _ = curve_fit(FittedTransmission, voltages, pixel, p0=p0)
except RuntimeError : popt = p0
# Get fitting error
fitting_error = np.sum( ( FittedTransmission(voltages, *popt) - pixel )**2 )
return fitting_error, GetVM(*popt[2:]), popt
############################################################################
# Selecting the voltage range
V_min = max( voltages_trial.min(), voltages.min() )
V_max = min( voltages_trial.max(), voltages.max() )
indx = np.nonzero( (V_min <= voltages)&(voltages <= V_max) )
# Number of calibration points lying within the voltage region
num_vol_trial = np.sum( (V_min <= voltages_trial)&(voltages_trial <= V_max) )
if num_vol_trial < 2 : num_vol_trial = 2
# Re-sample modulation provided by CRi so that the voltage is equidistantly spaced
resampled_vis_modulation = pchip_interpolate(voltages_trial, vis_modulation,
np.linspace(V_min, V_max, min(len(voltages), num_vol_trial) )
)
resampled_nir_modulation = pchip_interpolate(voltages_trial, nir_modulation,
np.linspace(V_min, V_max, min(len(voltages), num_vol_trial) )
)
# Normalizing scans
scans -= scans.min(axis=0); scans /= scans.max(axis=0)
# Bin the spectrum into pixels
spectral_slices = map( lambda begin,end : scans[:,begin:end].mean(axis=1), pixels_edges[:-1], pixels_edges[1:] )
# List containing calibration data for each pixel
calibration = []
# Initial guesses for fitting
vis_p0 = None; nir_p0 = None;
# Fit individual pulse shaper pixels them
for pixel_num, pixel in enumerate(spectral_slices) :
# Smoothing and normalizing each pixel
#pixel = gaussian_filter(pixel,sigma=1)
pixel -= pixel.min(); pixel /= pixel.max()
# Fit the pixel by using the vis calibration curve as the initial guess
vis_err, vis_calibration, vis_p0 = FitIndividualPixel(voltages[indx], pixel[indx],
resampled_vis_modulation, vis_p0)
# Fit the pixel by using the nir calibration curve as the initial guess
nir_err, nir_calibration, nir_p0 = FitIndividualPixel(voltages[indx], pixel[indx],
resampled_nir_modulation, nir_p0)
# Choose the best fit
if nir_err > vis_err :
calibation_voltage, calibration_phase = vis_calibration
fit_err = vis_err
else :
calibation_voltage, calibration_phase = nir_calibration
fit_err = nir_err
###################### Plot ########################
visvis.clf()
# Plot measured data
visvis.plot( voltages, pixel, lc='r',ms='*', mc='r')
# Plot fitted data
plot_voltages = np.linspace( calibation_voltage.min(), calibation_voltage.max(), 500)
transmission_fit = np.cos( pchip_interpolate(calibation_voltage, calibration_phase, plot_voltages) )**2
visvis.plot( plot_voltages, transmission_fit, lc='b')
visvis.title ('Calibrating pixel %d / %d' % (pixel_num, len(spectral_slices)-1) )
visvis.legend(['measured', 'fitted'])
visvis.xlabel ('voltages')
visvis.ylabel ('Transmission coefficient')
self.fig.DrawNow()
############ Save the calibration data ##################
calibration.append( ( calibation_voltage, calibration_phase, fit_err ) )
return calibration
def GetDispersionPhaseCurves (self, wavelengths, scans, voltages, pixels_edges,
calibration, pixel2lambda_func, pulse_shaper_pixel_num ) :
"""
Surface calibration:
Find dispersion and phase curves based on a result of the method `self.FitSpectralScans`
`calibration` is the return of `self.FitSpectralScans`
"""
import operator
# Find the calibration function that was best fitted
best_calibation_voltage, best_calib_phase, _ = min(calibration, key=operator.itemgetter(2))
# Form a function representing calibration curve (best fitted)
# Normalizing calibration curve
best_calib_phase -= best_calib_phase.min()
best_calib_phase /= best_calib_phase.max()
best_calibration_curve = PchipInterpolator (best_calibation_voltage, best_calib_phase)
# Selecting the voltage range
V_min = max( best_calibation_voltage.min(), voltages.min() )
V_max = min( best_calibation_voltage.max(), voltages.max() )
V_indx = np.nonzero( (V_min <= voltages)&(voltages <= V_max) )
# Select scanning range that corresponds to a valid region of voltages and wavelength
scans = scans[V_indx[0], pixels_edges[0]:pixels_edges[-1]]
# Wavelength position of pixels in a new sliced scan
wavelengths_cut = wavelengths[ pixels_edges[0]:pixels_edges[-1] ]
# Central wavelength of each pixels
logical_pixel_lambda = 0.5*( wavelengths[pixels_edges[1:]] + wavelengths[pixels_edges[:-1]] )
# Construct the initial guess for the dispersion curve
# use the cubic polynomial interpolation
# normalize it to the best fitted calibration curve
best_min = best_calibration_curve(V_min)
best_max = best_calibration_curve(V_max)
# values of the calibration curves
calibration_values = best_calibration_curve( voltages[V_indx] )[:,np.newaxis]
def TransmissionC (calibration_values, params) :
"""
Transmission coefficient for fitting
"""
offset = params[:len(params)/2]
multiplier = params[len(params)/2:]
phase = calibration_values * np.polyval(multiplier, wavelengths_cut)
phase += np.polyval(offset, wavelengths_cut)
return np.cos( phase )**2
def Fit_TransmissionC (calibration_values, *params) :
return np.ravel( TransmissionC(calibration_values, params) )
# Initial guess for fitting parameters
c_min, c_max = zip(*[ pchip_interpolate(c[0],c[1],[V_min, V_max]) for c in calibration ])
c_min = np.array(c_min); c_max = np.array(c_max)
# Use different power fits
power_fits = []
for power in [1, 3, 5, 7] :
offset = np.polyfit(logical_pixel_lambda, (best_max*c_min-best_min*c_max)/(best_max-best_min), power)
multiplier = np.polyfit(logical_pixel_lambda, (c_max-c_min)/(best_max-best_min), power)
p0=np.append(offset, multiplier)
try :
popt, _ = curve_fit(Fit_TransmissionC, calibration_values, np.ravel(scans), p0=p0)
except RuntimeError : popt = p0
# Calculate the Transmission coefficients for plotting
TC_fitted = TransmissionC(calibration_values, popt)
# Calculate fitting error
error = np.sum( (TC_fitted - scans)**2 )
power_fits.append( (error, popt, TC_fitted) )
# Select the best power fit
_, popt, TC_fitted = min(power_fits)
# Extracted the fitted parameters
offset = popt[:len(popt)/2]
multiplier = popt[len(popt)/2:]
# Get wavelength for each physical pixel in the pulse shaper
physical_pixel_lambda = pixel2lambda_func(np.arange(pulse_shaper_pixel_num))
# Calculate offset and multiplier for each physical pixel
offset = np.polyval(offset, physical_pixel_lambda)
multiplier = np.polyval(multiplier, physical_pixel_lambda)
# Return results
return TC_fitted, scans, { "offset" : offset,
"multiplier" : multiplier,
"calibration_curve_voltage" : best_calibation_voltage,
"calibration_curve_phase" : best_calib_phase }
def ExtractPhaseFunc (self, event=None, filename=None) :
"""
This function is the last stage of calibration,
when measured data is mathematically processed to obtain the calibration curves.
"""
# If filename is not specified, open the file dialogue
if filename is None :
filename = self.LoadSettings (title="Load calibration file...")
# Update the name of pulse shaper calibration file
import os
self.SettingsNotebook.PulseShaper.SetSettings({"calibration_file_name" : os.path.abspath(filename)})
visvis.clf()
# Loading the file calibration file
with h5py.File(filename, 'a') as calibration_file :
############### Loading data ####################
wavelengths = calibration_file["calibration_settings/wavelengths"][...]
fixed_voltage = calibration_file["calibration_settings/fixed_voltage"][...]
pulse_shaper_pixel_num = calibration_file["calibration_settings/pulse_shaper_pixel_num"][...]
initial_pixel = calibration_file["settings/CalibrateShaper/initial_pixel"][...]
final_pixel = calibration_file["settings/CalibrateShaper/final_pixel"][...]
pixel_bundle_width = calibration_file["settings/CalibrateShaper/pixel_bundle_width"][...]
pixel_to_lamba = str(calibration_file["settings/CalibrateShaper/pixel_to_lamba"][...])
# Convert to dict
pixel_to_lamba = eval( "{%s}" % pixel_to_lamba )
# Loading scans of slave and masker masks
master_mask_scans = []; slave_mask_scans = []
for key, spectrum in calibration_file["spectra_from_uniform_masks"].items() :
master_volt, slave_volt = map(int, key.split('_')[-2:])
if master_volt == fixed_voltage : slave_mask_scans.append( (slave_volt, spectrum[...]) )
if slave_volt == fixed_voltage : master_mask_scans.append( (master_volt, spectrum[...]) )
# Sort by voltage
master_mask_scans.sort(); slave_mask_scans.sort()
# Extract spectral scans and voltages for each mask
master_mask_voltage, master_mask_scans = zip(*master_mask_scans)
master_mask_voltage = np.array(master_mask_voltage); master_mask_scans = np.array(master_mask_scans)
slave_mask_voltage, slave_mask_scans = zip(*slave_mask_scans)
slave_mask_voltage = np.array(slave_mask_voltage); slave_mask_scans = np.array(slave_mask_scans)
################### Find the edges of pixels #################
# function that converts pixel number to pulse shaper
# `pixel_to_lamba` is a dictionary with key = pixel, value = lambda
deg = 1 #min(1,len(pixel_to_lamba)-1)
print np.polyfit(pixel_to_lamba.keys(), pixel_to_lamba.values(), 1 )
pixel2lambda_func = np.poly1d( np.polyfit(pixel_to_lamba.keys(), pixel_to_lamba.values(), deg=deg ) )
#lambda2pixel_func = np.poly1d( np.polyfit(pixel_to_lamba.values(), pixel_to_lamba.keys(), deg=deg ) )
# Get pixels_edges in shaper pixels
pixels_edges_num = np.arange(initial_pixel, final_pixel, pixel_bundle_width)
if pixels_edges_num[-1] < final_pixel :
pixels_edges_num = np.append( pixels_edges_num, [final_pixel])
# Get pixels_edges in logical_pixel_lambda
pixels_edges = pixel2lambda_func(pixels_edges_num)
# Get pixels_edges in positions of the spectrometer spectra
pixels_edges = np.abs(wavelengths - pixels_edges[:,np.newaxis]).argmin(axis=1)
# Sorting
indx = np.argsort(pixels_edges)
pixels_edges = pixels_edges[indx]
pixels_edges_num = pixels_edges_num[indx]
# Plot
visvis.cla(); visvis.clf()
visvis.plot( pixel_to_lamba.values(), pixel_to_lamba.keys(), ls=None, ms='*', mc='g', mw=15)
visvis.plot ( wavelengths[pixels_edges], pixels_edges_num, ls='-',lc='r')
visvis.xlabel('wavelength (nm)')
visvis.ylabel ('pulse shaper pixel')
visvis.legend( ['measured', 'interpolated'])
self.fig.DrawNow()
################ Perform fitting of the phase masks #################
master_mask_fits = self.FitSpectralScans( master_mask_scans, master_mask_voltage, pixels_edges )
slave_mask_fits = self.FitSpectralScans( slave_mask_scans, slave_mask_voltage, pixels_edges )
################# Perform fitting of dispersion and phase functions #################
master_mask_TC_fitted, master_mask_scans, master_mask_disp_ph_curves = \
self.GetDispersionPhaseCurves (wavelengths, master_mask_scans, master_mask_voltage,
pixels_edges, master_mask_fits, pixel2lambda_func, pulse_shaper_pixel_num )
slave_mask_TC_fitted, slave_mask_scans, slave_mask_disp_ph_curves = \
self.GetDispersionPhaseCurves (wavelengths, slave_mask_scans, slave_mask_voltage,
pixels_edges, slave_mask_fits, pixel2lambda_func, pulse_shaper_pixel_num )
################ Saving fitting parameters ####################
#################################################################
# Save surface calibration
try : del calibration_file["calibrated_surface"]
except KeyError : pass
CalibratedSurfaceGroupe = calibration_file.create_group("calibrated_surface")
master_mask_calibrated_surface = CalibratedSurfaceGroupe.create_group("master_mask")
for key, value in master_mask_disp_ph_curves.items() :
master_mask_calibrated_surface[key] = value
slave_mask_calibrated_surface = CalibratedSurfaceGroupe.create_group("slave_mask")
for key, value in slave_mask_disp_ph_curves.items() :
slave_mask_calibrated_surface[key] = value
#################################################################
# Clear the group, if it exits
try : del calibration_file["calibrated_pixels"]
except KeyError : pass
# This group is self consistent, thus the redundancy in saved data (with respect to other group in the file)
CalibratedPixelsGroupe = calibration_file.create_group ("calibrated_pixels")
CalibratedPixelsGroupe["pixels_edges"] = pixels_edges
# Finding spectral bounds for each pixel
pixels_spectral_bounds = zip( wavelengths[pixels_edges[:-1]], wavelengths[pixels_edges[1:]] )
# Pulse shaper pixel bounds
pixels_bounds = zip(pixels_edges_num[:-1], pixels_edges_num[1:])
PixelsGroup = CalibratedPixelsGroupe.create_group("pixels")
for pixel_num, master_mask_calibration, slave_mask_calibration, \
spectral_bound, pixel_bound in zip( range(len(master_mask_fits)), \
master_mask_fits, slave_mask_fits, pixels_spectral_bounds, pixels_bounds ) :
pixel = PixelsGroup.create_group("pixel_%d" % pixel_num)
pixel["spectral_bound"] = spectral_bound
pixel["pixel_bound"] = pixel_bound
# Saving fitted calibration data in the tabular form
pixel["voltage_master_mask"] = master_mask_calibration[0]
pixel["phase_master_mask"] = master_mask_calibration[1]
pixel["voltage_slave_mask"] = slave_mask_calibration[0]
pixel["phase_slave_mask"] = slave_mask_calibration[1]
################ Plotting results of calibration ####################
visvis.cla(); visvis.clf();
visvis.subplot(2,2,1)
visvis.imshow( master_mask_scans, cm=visvis.CM_JET )
visvis.title ("Master mask (measured data)")
visvis.ylabel ('voltage'); visvis.xlabel ('wavelength (nm)')
visvis.subplot(2,2,3)
visvis.imshow( master_mask_TC_fitted, cm=visvis.CM_JET )
visvis.title ("Master mask (fitted data)")
visvis.ylabel ('voltage'); visvis.xlabel ('wavelength (nm)')
visvis.subplot(2,2,2)
visvis.imshow( slave_mask_scans, cm=visvis.CM_JET )
visvis.title ("Slave mask (measured data)")
visvis.ylabel ('voltage'); visvis.xlabel ('wavelength (nm)')
visvis.subplot(2,2,4)
visvis.imshow( slave_mask_TC_fitted, cm=visvis.CM_JET )
visvis.title ("Slave mask (fitted data)")
visvis.ylabel ('voltage'); visvis.xlabel ('wavelength (nm)')
def PerformCalibration (self, event=None) :
"""
<self.calibrate_button> was clicked.
The calibration of pulse shaper for
"""
# Creating the reference
button = self.calibrate_button
try :
# Mouse double clicking stops scanning
if event.GetEventType() == wx.wxEVT_LEFT_DCLICK : button.SetLabel (button.__stop_label__)
except AttributeError : pass
if button.GetLabel() == button.__start_label__ :
self.StopAllJobs ()
filename = self.SaveSettings(title= button.__start_label__, filename="pulse_shaper_calibration.hdf5")
if filename is None : return
# Update the name of pulse shaper calibration file
import os
self.SettingsNotebook.PulseShaper.SetSettings({"calibration_file_name" : os.path.abspath(filename)})
# get spectrometer's settings
settings = self.SettingsNotebook.Spectrometer.GetSettings()
# Initiate spectrometer
if self.Spectrometer.SetSettings(settings) == RETURN_FAIL : return
# Initiate pulse shaper
settings = self.SettingsNotebook.PulseShaper.GetSettings()
if self.PulseShaper.Initialize(settings) == RETURN_FAIL : return
# Open the file for saving calibration data
self.calibration_file = h5py.File (filename, 'a')
# The HDF5 group where spectral scans will be saved
try : del self.calibration_file["spectra_from_uniform_masks"]
except KeyError : pass
self.SpectraGroup = self.calibration_file.create_group ("spectra_from_uniform_masks")
# The HDF5 group where the calibration parameters are stored
try : del self.calibration_file["calibration_settings"]
except KeyError : pass
self.CalibrationGroup = self.calibration_file.create_group ("calibration_settings")
# Get wavelengths
self.wavelengths = self.Spectrometer.GetWavelengths()
self.CalibrationGroup["wavelengths"] = self.wavelengths
# Save zero mask
self.fixed_mask = self.PulseShaper.GetZeroMask()
# Save the pulse shaper resolution (physical pixels)
self.CalibrationGroup["pulse_shaper_pixel_num"] = self.PulseShaper.GetPixelNumber()
# Define the iterator for scanning the voltage difference
# The general idea is that by measuring spectra we measure the transmission
# coefficients T(V1, V2) = cos^2 ( phi_m(V1) - phi_s(V2) )
# However, we do not need to measure T for all V1 and V2 if we use the following
# strategy:
# [ phi_m(V1) - phi_s(V2) ] = [ phi_m(V1) - phi_s(V0) ] + [ phi_m(V0) - phi_s(V2) ]
# - [ phi_m(V0) - phi_s(V0) ]
# where Vo is an an arbitrary fixed voltage
# Set the iterator
settings = self.SettingsNotebook.CalibrateShaper.GetSettings()
initial_voltage = settings["initial_voltage"]
final_voltage = settings["final_voltage"]
voltage_step = settings["voltage_step"]
self.fixed_voltage = int( 0.5*(final_voltage + initial_voltage) ) # This is V0
self.CalibrationGroup["fixed_voltage"] = self.fixed_voltage
if self.fixed_voltage > final_voltage or self.fixed_voltage < initial_voltage :
voltage = np.arange(initial_voltage, final_voltage+1, voltage_step)
else :
# Skip the fixed voltage
voltage = np.append( np.arange(initial_voltage, self.fixed_voltage, voltage_step),
np.arange(self.fixed_voltage+1, final_voltage+1, voltage_step) )
from itertools import product, chain
self.scan_pixel_voltage_pair = chain( product(voltage, [self.fixed_voltage] ),
product([self.fixed_voltage], voltage ) )
#[(self.fixed_voltage, self.fixed_voltage)] )
# Variables for progress info
self.scan_length = 2*len(voltage) + 1
self.scanned = 0.
self.initial_time = time.clock()
# Changing the button's label
button.SetLabel (button.__pause_label__)
visvis.clf()
# Start calibrating
self.pause_calibration = False
wx.CallAfter(self.ScanVoltage)
elif button.GetLabel() == button.__pause_label__ :
self.pause_calibration = True; button.SetLabel (button.__resume_label__)
elif button.GetLabel() == button.__resume_label__ :
self.pause_calibration = False
wx.CallAfter(self.ScanVoltage)
button.SetLabel (button.__pause_label__)
elif button.GetLabel() == button.__stop_label__ :
# Closing the file
self.calibration_file.close()
# Delete all the attributes associated with scanning
del self.pause_calibration, self.calibration_file, self.SpectraGroup, self.CalibrationGroup
button.SetLabel (button.__start_label__)
else : raise ValueError ("Unrecognised button's label")
def ShowSpectra_by_VaryingPixelBundle (self) :
"""
This method is affiliated to the method <self.VaryPixelBundle>
"""
# Exit if the iterator is not defined
try : self.pixel_bundel_value_iter
except AttributeError : return
try :
voltage = self.pixel_bundel_value_iter.next()
# Set the mask for pixel bundle
width = self.SettingsNotebook.CalibrateShaper.pixel_bundle_width.GetValue() / 2
start_pixel_bundle = self.pixel_to_vary.GetValue()
mask = np.copy(self.fixed_mask)
if width:
mask[max(start_pixel_bundle-width, 0):min(mask.size, start_pixel_bundle+width)] = voltage
else:
# Enforce single pixel width
mask[min(max(start_pixel_bundle,0),mask.size)] = voltage
self.PulseShaper.SetMasks( mask, self.fixed_mask)
# Getting spectrum
spectrum = self.Spectrometer.AcquiredData()
# Plot the spectra
visvis.gca().Clear()
visvis.plot (self.wavelengths, spectrum)
visvis.xlabel("wavelength (nm)")
visvis.ylabel("counts")
visvis.title ("Voltage %d / %d " % (voltage, self.fixed_mask[0]) )
self.fig.DrawNow()
# Going to the next iteration
wx.CallAfter (self.ShowSpectra_by_VaryingPixelBundle)
except StopIteration :
# Finish the job
self.StopAllJobs()
def VaryPixelBundle (self, event) :
"""
<self.vary_pixel_bundle_button> was clicked
"""
button = self.vary_pixel_bundle_button
if button.GetLabel() == button.__start_label__ :
self.StopAllJobs ()
# get spectrometer's settings
settings = self.SettingsNotebook.Spectrometer.GetSettings()
# Initiate spectrometer
if self.Spectrometer.SetSettings(settings) == RETURN_FAIL : return
# Initiate pulse shaper
settings = self.SettingsNotebook.PulseShaper.GetSettings()
if self.PulseShaper.Initialize(settings) == RETURN_FAIL : return
# Get wavelengths
self.wavelengths = self.Spectrometer.GetWavelengths()
# Set the iterator
settings = self.SettingsNotebook.CalibrateShaper.GetSettings()
voltage_step = settings["voltage_step"]
initial_voltage = settings["initial_voltage"]
final_voltage = settings["final_voltage"]
self.pixel_bundel_value_iter = iter(xrange(initial_voltage, final_voltage+voltage_step, voltage_step))
# Save fixed mask
self.fixed_mask = self.PulseShaper.GetZeroMask()
self.fixed_mask += PULSESHAPER_MAX_VAL
# Start variation
wx.CallAfter (self.ShowSpectra_by_VaryingPixelBundle)
# Change button's label
button.SetLabel (button.__stop_label__)
elif button.GetLabel() == button.__stop_label__ :
del self.pixel_bundel_value_iter
button.SetLabel (button.__start_label__)
else : raise ValueError("Label is not recognized")
#########################################################################
if __name__ == '__main__' :
app = visvis.use('wx')
app.Create()
CalibrateShaper (None)
app.Run() | StarcoderdataPython |
5039075 | import os
import shutil
def apply_license(license):
license_file = get_license_file(license)
license_folder = ".licenses"
# Copy the chosen license file to root
shutil.copy(os.path.join(license_folder, license_file), "LICENSE")
# Remove the license folder
shutil.rmtree(license_folder)
def get_license_file(license):
license_files = {
"Apache-2.0": "APACHE_LICENSE",
"MIT": "MIT_LICENSE",
"BSD-3": "BSD_LICENSE",
"GNU GPL v3.0": "GNU_LICENSE",
}
if license not in license_files:
raise ValueError(f"Unknown license {license}")
return license_files[license]
license = "{{ cookiecutter.license }}"
apply_license(license)
| StarcoderdataPython |
11344725 | <reponame>ckamtsikis/cmssw<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
import CalibTracker.SiStripESProducers.SiStripQualityESProducer_cfi
ssqcabling = CalibTracker.SiStripESProducers.SiStripQualityESProducer_cfi.siStripQualityESProducer.clone()
ssqcabling.appendToDataLabel = cms.string("onlyCabling")
ssqcabling.ListOfRecordToMerge=cms.VPSet(
cms.PSet(record=cms.string('SiStripDetCablingRcd'),tag=cms.string(''))
)
ssqruninfo = CalibTracker.SiStripESProducers.SiStripQualityESProducer_cfi.siStripQualityESProducer.clone()
ssqruninfo.appendToDataLabel = cms.string("CablingRunInfo")
ssqruninfo.ListOfRecordToMerge=cms.VPSet(
cms.PSet(record=cms.string('SiStripDetCablingRcd'),tag=cms.string('')),
cms.PSet(record=cms.string('RunInfoRcd'),tag=cms.string(''))
)
ssqbadch = CalibTracker.SiStripESProducers.SiStripQualityESProducer_cfi.siStripQualityESProducer.clone()
ssqbadch.appendToDataLabel = cms.string("BadChannel")
ssqbadch.ListOfRecordToMerge=cms.VPSet(
cms.PSet(record=cms.string('SiStripDetCablingRcd'),tag=cms.string('')),
cms.PSet(record=cms.string('RunInfoRcd'),tag=cms.string('')),
cms.PSet(record=cms.string('SiStripBadChannelRcd'),tag=cms.string(''))
)
ssqdcs = CalibTracker.SiStripESProducers.SiStripQualityESProducer_cfi.siStripQualityESProducer.clone()
ssqdcs.appendToDataLabel = cms.string("dcsBadModules")
ssqdcs.ListOfRecordToMerge=cms.VPSet(
cms.PSet(record=cms.string('SiStripDetCablingRcd'),tag=cms.string('')),
cms.PSet(record=cms.string('RunInfoRcd'),tag=cms.string('')),
cms.PSet(record=cms.string('SiStripBadChannelRcd'),tag=cms.string('')),
cms.PSet(record=cms.string('SiStripDetVOffRcd'),tag=cms.string(''))
)
ssqbadfiber = CalibTracker.SiStripESProducers.SiStripQualityESProducer_cfi.siStripQualityESProducer.clone()
ssqbadfiber.appendToDataLabel = cms.string("BadFiber")
ssqbadfiber.ListOfRecordToMerge=cms.VPSet(
cms.PSet(record=cms.string('SiStripDetCablingRcd'),tag=cms.string('')),
cms.PSet(record=cms.string('RunInfoRcd'),tag=cms.string('')),
cms.PSet(record=cms.string('SiStripBadChannelRcd'),tag=cms.string('')),
cms.PSet(record=cms.string('SiStripDetVOffRcd'),tag=cms.string('')),
cms.PSet(record=cms.string('SiStripBadFiberRcd'),tag=cms.string(''))
)
from DPGAnalysis.SiStripTools.sistripqualityhistory_cfi import *
ssqhistory.monitoredSiStripQuality = cms.VPSet(
cms.PSet( name = cms.string("Cabling"), ssqLabel = cms.string("onlyCabling")),
cms.PSet( name = cms.string("RunInfo"), ssqLabel = cms.string("CablingRunInfo")),
cms.PSet( name = cms.string("BadChannel"), ssqLabel = cms.string("BadChannel")),
cms.PSet( name = cms.string("DCS"), ssqLabel = cms.string("dcsBadModules")),
cms.PSet( name = cms.string("BadFiber"), ssqLabel = cms.string("BadFiber"))
)
| StarcoderdataPython |
6436377 | """Async Driver Method."""
import inspect
import os
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed
from ...app import App
from ...contracts import QueueContract
from ...drivers import BaseQueueDriver
from ...exceptions import QueueException
from ...helpers import HasColoredCommands, config
class QueueAsyncDriver(BaseQueueDriver, HasColoredCommands, QueueContract):
"""Queue Aysnc Driver."""
def __init__(self, app: App):
"""Queue Async Driver.
Arguments:
Container {masonite.app.App} -- The application container.
"""
self.container = app
def _get_processor(self, mode, max_workers):
"""Set processor to use either threads or multiprocesses
Arguments:
mode {str} - async mode
max_workers {int} - number of threads/processes to use
"""
# Necessary for Python 3.4, can be removed in 3.5+
if max_workers is None:
# Use this number because ThreadPoolExecutor is often
# used to overlap I/O instead of CPU work.
max_workers = (os.cpu_count() or 1) * 5
if max_workers <= 0:
raise QueueException("max_workers must be greater than 0")
# Determine Mode for Processing
if mode == "threading":
processor = ThreadPoolExecutor(max_workers)
elif mode == "multiprocess":
processor = ProcessPoolExecutor(max_workers)
else:
raise QueueException("Queue mode {} not recognized".format(mode))
return processor
def push(self, *objects, args=(), kwargs={}, **options):
"""Push objects onto the async stack.
Arguments:
objects {*args of objects} - This can be several objects as parameters into this method.
options {**kwargs of options} - Additional options for async driver
"""
# Initialize Extra Options
callback = options.get("callback", "handle")
mode = options.get("mode", config("queue.drivers.async.mode", "threading"))
workers = options.get("workers", None)
# Set processor to either use threads or processes
processor = self._get_processor(mode=mode, max_workers=workers)
is_blocking = config("queue.drivers.async.blocking", False)
ran = []
for obj in objects:
obj = self.container.resolve(obj) if inspect.isclass(obj) else obj
try:
future = processor.submit(getattr(obj, callback), *args, **kwargs)
except AttributeError:
# Could be wanting to call only a method asyncronously
future = processor.submit(obj, *args, **kwargs)
ran.append(future)
if is_blocking:
for job in as_completed(ran):
self.info("Job Ran: {}".format(job))
| StarcoderdataPython |
1766237 | #!/usr/bin/env python3
"""Tests for the ClientSocket class."""
import unittest
from ibapipy.core.client_socket import ClientSocket
from multiprocessing import Queue
TEST_ACCOUNT_NAME = 'DU109588'
class ClientSocketTests(unittest.TestCase):
"""Test cases for the ClientSocket class."""
def test_constructor(self):
client = ClientSocket()
self.assertFalse(client.is_connected)
def test_connect(self):
result_queue = Queue()
class MockClientSocket(ClientSocket):
def __init__(self):
ClientSocket.__init__(self)
def next_valid_id(self, req_id):
result_queue.put(req_id)
result_queue.put('next_valid_id')
client = MockClientSocket()
self.assertFalse(client.is_connected)
client.connect()
self.assertTrue(client.is_connected)
while True:
result = result_queue.get()
self.assertIsNotNone(result)
if result == 'next_valid_id':
break
client.disconnect()
def test_disconnect(self):
client = ClientSocket()
self.assertFalse(client.is_connected)
client.connect()
self.assertTrue(client.is_connected)
client.disconnect()
self.assertFalse(client.is_connected)
def test_req_account_updates(self):
result_queue = Queue()
class MockClientSocket(ClientSocket):
def __init__(self):
ClientSocket.__init__(self)
def account_download_end(self, account_name):
result_queue.put(account_name)
result_queue.put('account_download_end')
def update_account_time(self, timestamp):
result_queue.put(timestamp)
def update_account_value(self, key, value, currency, account_name):
result_queue.put(key)
result_queue.put(value)
result_queue.put(currency)
result_queue.put(account_name)
client = MockClientSocket()
client.connect()
client.req_account_updates(True, TEST_ACCOUNT_NAME)
while True:
result = result_queue.get()
self.assertIsNotNone(result)
if result == 'account_download_end':
break
client.disconnect()
def test_req_all_open_orders(self):
result_queue = Queue()
class MockClientSocket(ClientSocket):
def __init__(self):
ClientSocket.__init__(self)
def open_order(self, req_id, contract, order):
result_queue.put(req_id)
result_queue.put(contract)
result_queue.put(order)
def open_order_end(self):
result_queue.put('open_order_end')
def order_status(self, req_id, status, filled, remaining,
avg_fill_price, perm_id, parent_id,
last_fill_price, client_id, why_held):
result_queue.put(req_id)
result_queue.put(status)
result_queue.put(filled)
result_queue.put(remaining)
result_queue.put(avg_fill_price)
result_queue.put(perm_id)
result_queue.put(parent_id)
result_queue.put(last_fill_price)
result_queue.put(client_id)
result_queue.put(why_held)
client = MockClientSocket()
client.connect()
client.req_all_open_orders()
while True:
result = result_queue.get()
self.assertIsNotNone(result)
if result == 'open_order_end':
break
client.disconnect()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11247571 | <gh_stars>1-10
import re
from typing import NoReturn, Optional, TypeVar, Union
import unittest.mock as mock
import discord
__all__ = (
'MagicMock_',
'get_embeds', 'get_contents',
'assert_success', 'assert_warning', 'assert_error', 'assert_info',
'assert_no_reply', 'assert_one_if_list', 'assert_in', 'assert_regex'
)
V = TypeVar('V')
# region Misc helper functions
# noinspection PyPep8Naming
class MagicMock_(mock.MagicMock):
"""
Identical to MagicMock, but the ``name`` kwarg will be parsed as a regular
kwarg (assigned to the mock as an attribute).
"""
def __init__(self, *args, _name_: Optional[str] = None, **kwargs):
if _name_ is None:
_name_ = ''
name_attr = kwargs.pop('name', None)
super().__init__(*args, name=_name_, **kwargs)
self.name = name_attr
def get_embeds(mock_: MagicMock_) -> list[discord.Embed]:
"""
:param mock_: a mock ``send`` method
:return: a list of embeds sent in each message
"""
return [
embed for call in mock_.call_args_list
if (embed := call.kwargs.get('embed'))
]
def get_contents(mock_: MagicMock_) -> list[str]:
"""
:param mock_: a mock ``send`` method
:return: a list of each message's contents
"""
# noinspection PyUnboundLocalVariable
return [
content for call in mock_.call_args_list
if len(call.args) > 0 and (content := call.args[0]) is not None
]
# endregion
# region Embed assertions
def assert_success(
embed: Union[discord.Embed, list[discord.Embed]], *substrings: str
):
"""
Assert ``embed`` is a success embed and that its description contains
each substring in ``substrings``.
"""
__tracebackhide__ = True
embed = assert_one_if_list(embed)
assert 'Success' in embed.title
assert_in(embed.description, *substrings)
def assert_warning(
embed: Union[discord.Embed, list[discord.Embed]], *substrings: str
):
"""
Assert ``embed`` is a warning embed and that its description contains
each substring in ``substrings``.
"""
__tracebackhide__ = True
embed = assert_one_if_list(embed)
assert 'Warning' in embed.title
assert_in(embed.description, *substrings)
def assert_error(
embed: Union[discord.Embed, list[discord.Embed]], *substrings: str
):
"""
Assert ``embed`` is an error embed and that its description contains
each substring in ``substrings``.
"""
__tracebackhide__ = True
embed = assert_one_if_list(embed)
assert 'Error' in embed.title
assert_in(embed.description, *substrings)
def assert_info(
embed: Union[discord.Embed, list[discord.Embed]], *substrings: str
):
"""
Assert ``embed`` is an info embed and that its description contains
each substring in ``substrings``.
"""
__tracebackhide__ = True
embed = assert_one_if_list(embed)
assert 'Info' in embed.title
assert_in(embed.description, *substrings)
# endregion
# region Misc assertions
def assert_no_reply(send: mock.Mock) -> NoReturn:
"""Assert that the bot didn't reply with the `send` mock."""
__tracebackhide__ = True
assert not send.called, "Bot replied when it shouldn't have"
def assert_one_if_list(x: Union[list[V], V]) -> V:
__tracebackhide__ = True
if isinstance(x, list):
assert len(x) == 1, f"Expected only one item in list, got {len(x)}"
return x[0]
return x
def assert_in(str_: str, *substrings: str) -> NoReturn:
"""
Dispatch ``msg`` to the bot and assert that it replies with one
message and contains each substring in ``substrings``.
"""
__tracebackhide__ = True
for substr in substrings:
assert substr in str_
def assert_regex(str_: str, *patterns: str) -> NoReturn:
"""
Dispatch ``msg`` to the bot and assert that it replies with one
message and matches each regex pattern in ``patterns``.
"""
__tracebackhide__ = True
for pattern in patterns:
assert re.search(pattern, str_), (
f'Pattern "{pattern}" did not match any part of "{str_}"'
)
# endregion
| StarcoderdataPython |
3232601 | <reponame>bkryza/atlssncli
# -*- coding: utf-8 -*-
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import logging as LOG
from os.path import expanduser, join
from urllib.parse import urlparse
HOME_DIR = expanduser("~")
CONFIG_DIR = join(HOME_DIR, ".atlssncli")
CONFIG_PATH = join(CONFIG_DIR, "config.ini")
CACHE_DIR = join(CONFIG_DIR, "cache")
# Cached list of projects for autocompletion
PROJECTS_CACHE = join(CACHE_DIR, "_projects")
# Cached list of issues for autocompletion
ISSUES_CACHE = join(CACHE_DIR, "_issues")
# Cached list of issue types for autocompletion
ISSUETYPES_CACHE = join(CACHE_DIR, "_issuetypes")
# Cached list of build plans for autocompletion
PLANS_CACHE = join(CACHE_DIR, "_plans")
# Cached list of users for autocompletion
USERS_CACHE = join(CACHE_DIR, "_users")
# Required config sections
REQUIRED_SECTIONS = set(["common", "jira", "bitbucket", "bamboo"])
class Config(object):
def __init__(self, config_path=CONFIG_PATH):
self.path = config_path
self.config = configparser.ConfigParser()
LOG.debug("Reading configuration from %s", self.path)
self.config.read(self.path)
LOG.debug(
"Got configuration sections: %s", ",".join(self.config.sections())
)
def validate(self):
"""Validate the config file"""
if not REQUIRED_SECTIONS.issubset(set(self.config.sections())):
raise Exception(
"Missing required config sections: %s"
% (
",".join(
REQUIRED_SECTIONS.difference(
set(self.config.sections())
)
)
)
)
return True
def get_auth(self, service=None):
"""Return the authentication credentials for service"""
return (
self.config.get("common", "username"),
self.config.get("common", "password"),
)
def get_endpoint(self, service):
"""Get endpoint of specific service"""
return self.config.get(service, "endpoint")
def get_endpoint_host(self, service):
"""Get hostname of a specific service"""
return urlparse(self.get_endpoint(service)).hostname
def get_board(self):
"""Get active board"""
return self.config.get("agile", "board")
def set_board(self, board_id):
"""Set default board"""
self.config.set("agile", "board", board_id)
self.sync()
def get_sprint_duration(self):
"""Get active board"""
return self.config.get("agile", "sprint_duration")
def get_project(self):
"""Get active project"""
active_project = self.config.get("common", "active_project")
return active_project
def set_project(self, project):
"""Set active project"""
self.config.set("common", "active_project", project)
self.sync()
def get_repo_plan_ids(self, repo):
"""Get Bamboo plan ids related to a repository by repository name"""
return tuple(self.config.get("bamboo", repo).split(","))
def sync(self):
"""Update configuration file"""
LOG.debug("SYNC, sections: %s", self.config.sections())
with open(self.path, "w") as configfile:
self.config.write(configfile)
| StarcoderdataPython |
3250551 | <filename>tests/test_LCSQ.py
import algorithms.LCSQ as algo
import unittest
import os
script_dir = os.path.dirname(__file__) # absolute dir the script is in
class TestAlgo(unittest.TestCase):
def test_LCSQ_1(self):
"""
Rosalind Test
:return:
"""
sample_answer = 'AACTTG'
rel_path = "../datasets/LCSQ_1.txt"
abs_file_path = os.path.join(script_dir, rel_path)
result = algo.main_LCSQ(abs_file_path)
self.assertEqual(sample_answer, result)
def test_LCSQ_2(self):
"""
Test for invalid input
:return:
"""
rel_path = "../datasets/LCSQ_2.txt"
abs_file_path = os.path.join(script_dir, rel_path)
with self.assertRaises(IndexError) :
algo.main_LCSQ(abs_file_path)
def test_LCSQ_3(self):
"""
Test for empty input
:return:
"""
rel_path = "../datasets/LCSQ_3.txt"
abs_file_path = os.path.join(script_dir, rel_path)
with self.assertRaises(IndexError):
algo.main_LCSQ(abs_file_path)
if __name__ == '__main__':
unittest.main() # pragma: no cover | StarcoderdataPython |
12836658 | <reponame>Podcastindex-org/podping.cloud<filename>hive-watcher/simple-watcher.py
# simple-watcher.py
#
# Simple version of Hive Podping watcher - no options, just runs
# The only external library needed is "beem" - pip install beem
# Beem is the official Hive accessing library for Python.
#
# Version 1.1
from datetime import datetime, timedelta
from typing import Set
import json
import beem
from beem.account import Account
from beem.blockchain import Blockchain
WATCHED_OPERATION_IDS = ["podping"]
def get_allowed_accounts(acc_name="podping") -> Set[str]:
"""get a list of all accounts allowed to post by acc_name (podping)
and only react to these accounts"""
# This is giving an error if I don't specify api server exactly.
# TODO reported as Issue on Beem library https://github.com/holgern/beem/issues/301
h = beem.Hive(node="https://api.hive.blog")
master_account = Account(acc_name, blockchain_instance=h, lazy=True)
return set(master_account.get_following())
def allowed_op_id(operation_id) -> bool:
"""Checks if the operation_id is in the allowed list"""
if operation_id in WATCHED_OPERATION_IDS:
return True
else:
return False
def block_num_back_in_minutes(blockchain: Blockchain, m: int) -> int:
"""Takes in a time in minutes and returns a block_number to start watching from"""
back_time = datetime.utcnow() - timedelta(minutes=m)
block_num = blockchain.get_estimated_block_num(back_time)
return block_num
def main():
"""Outputs URLs one by one as they appear on the Hive Podping stream"""
allowed_accounts = get_allowed_accounts()
hive = beem.Hive()
blockchain = Blockchain(mode="head", blockchain_instance=hive)
# Look back 15 minutes
start_block = block_num_back_in_minutes(blockchain, 15)
# If you want instant confirmation, you need to instantiate
# class:beem.blockchain.Blockchain with mode="head",
# otherwise, the call will wait until confirmed in an irreversible block.
# noinspection PyTypeChecker
# Filter only for "custom_json" operations on Hive.
stream = blockchain.stream(
opNames=["custom_json"], raw_ops=False, threading=False, start=start_block
)
for post in stream:
# Filter only on post ID from the list above.
if allowed_op_id(post["id"]):
# Filter by the accounts we have authorised to podping
if set(post["required_posting_auths"]) & allowed_accounts:
data = json.loads(post.get("json"))
if data.get("url"):
print(data.get("url"))
elif data.get("urls"):
for url in data.get("urls"):
print(url)
if __name__ == "__main__":
# Runs until terminated with Ctrl-C
# The main readers is reliable however sometimes the Hive network temporarily gives transient
# errors while fetching blocks. These almost always clear up.
while True:
try:
main()
except Exception as ex:
print(ex)
main()
| StarcoderdataPython |
11279142 | import gym
import numpy as np
import random
from keras.layers import Dense, InputLayer
from keras.models import Sequential
from collections import deque
from keras.optimizers import Adam, SGD
model = Sequential()
model.add(InputLayer(batch_input_shape=(None, 4)))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(2, activation='linear'))
model.compile(loss='mse', optimizer=Adam(lr=0.001), metrics=['mae'], )
model.summary()
class CartPole():
def __init__(self, env):
self.episode = 10000
self.gamma = 0.95
self.decay = 0.995
self.total_reward = 0
self.eps = 1.0
self.min_eps = 0.01
self.action_num = env.action_space.n
self.model = model
self.memory = deque(maxlen=20000)
def select_action(self, state, time, flag):
if flag == False:
if np.random.random() < self.eps:
return np.random.randint(0, self.action_num)
else:
return np.argmax(model.predict(state)[0])
else:
return np.argmax(model.predict(state)[0])
def training(self, current_state, env, time):
if self.eps > self.min_eps:
self.eps *= self.decay
else:
self.eps = self.min_eps
current_state = current_state.reshape((-1, 4))
self.total_reward = 0
flag_3000 = False
while True:
if flag_3000==True:
env.render()
# select_action
action = self.select_action(current_state, time, flag_3000)
# next_state
next_state, reward, done, _ = env.step(action)
next_state = next_state.reshape((-1, 4))
self.total_reward += reward
self.memory.append([current_state, next_state, reward, action, done])
if self.total_reward > 3000:
flag_3000 = True
if done:
flag_3000 = False
break
# update state
current_state = next_state
# print total reward in this episode
print('time: {}, Reward: {}, eps: {}'.format(time, self.total_reward, self.eps))
# replay
if len(self.memory) >= 128:
X = []
Y = []
batch_data = random.sample(self.memory, 128)
for state_ , next_state_, reward_, action_, done_ in batch_data:
if done_:
target_q_value = -10.#reward_ # reward_恒等于1.0 done_等于True的情况下,也学要学习, 学习惩罚?
if not done_: # 如果 done_ 为 False reward
# Compute target q value
target_q_value = self.gamma * np.max(self.model.predict(next_state_)[0]) + reward_
# Compute predict q value
action_vec = self.model.predict(state_)
action_vec[0][action_] = target_q_value
X.append(state_[0])
Y.append(action_vec.reshape(1, 2)[0])
self.model.fit(np.array(X), np.array(Y), epochs=1, verbose=0)
#self.memory.clear()
if __name__ == '__main__':
env = gym.make("CartPole-v0").unwrapped
cartpole = CartPole(env)
for epi in range(cartpole.episode):
init_state = env.reset()
cartpole.training(init_state, env, epi)
| StarcoderdataPython |
9697366 | from os import path
import glob
import json
from stix_shifter.stix_translation.src.modules.base.base_data_mapper import BaseDataMapper
class DataMapper(BaseDataMapper):
def __init__(self, options):
mapping_json = options['mapping'] if 'mapping' in options else {}
basepath = path.dirname(__file__)
self.from_stix_files_cnt = self.json_files_to_fetch(path.abspath(
path.join(basepath, "json", "from_*.json")))
self.map_data = mapping_json or self.fetch_mapping()
@staticmethod
def json_files_to_fetch(file_path):
return glob.glob(file_path)
def fetch_mapping(self):
"""
Fetches STIX-to-datasource mapping JSON from the module's from_stix_map.json file
:param basepath: path of data source translation module
:type basepath: str
"""
map_data = {}
try:
for each_file in self.from_stix_files_cnt:
map_file = open(each_file).read()
map_data[path.basename(each_file)] = json.loads(map_file)
return map_data
except Exception as ex:
print('exception in main():', ex)
return {}
def map_field(self, stix_object_name, stix_property_name):
"""
:param stix_object_name: str, stix object
:param stix_property_name: str, stix field
:return: list
"""
mapped_field_lst = []
for each_model_mapper in self.map_data.values():
if stix_object_name in each_model_mapper and stix_property_name in \
each_model_mapper[stix_object_name]["fields"]:
mapped_field_lst.append(each_model_mapper[stix_object_name]["fields"][stix_property_name])
return mapped_field_lst
def map_field_json(self, stix_object_name, stix_property_name, json_file):
"""
Return mapped fields from json file
:param stix_object_name:str, stix object
:param stix_property_name:str, stix field
:param json_file:str, json file name
:return: list
"""
if stix_object_name in self.map_data.get(json_file) and stix_property_name in \
self.map_data.get(json_file)[stix_object_name]["fields"]:
return self.map_data.get(json_file)[stix_object_name]["fields"][stix_property_name]
else:
return []
| StarcoderdataPython |
3588687 | <filename>src/sql_queries.py
# tables name
TABLES_NAME = """
SELECT TABLE_NAME
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_TYPE='BASE TABLE'
"""
# get columns name
COLUMNS_NAME = """
SELECT COLUMN_NAME
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = ?
"""
# get columns auto increment
COLUMNS_PRIMARY_KEY = """
SELECT OBJECT_SCHEMA_NAME(object_id) + '.' + OBJECT_NAME(object_id), name
FROM sys.identity_columns
WHERE OBJECT_NAME(object_id) = ?;
"""
# insert values
INSERT_TO_TABLE = """
INSERT INTO {} ({})
VALUES ({})
"""
| StarcoderdataPython |
327755 | from .models import ImagerProfile
from django.forms import ModelForm
class ProfileEditForm(ModelForm):
"""Instantiate user profile edit forms."""
class Meta:
model = ImagerProfile
fields = [
'bio',
'phone',
'location',
'website',
'fee',
'camera',
'services',
'photostyles']
def __init__(self, *args, **kwargs)::
username = kwargs.pop('username')
super().__init__(*args, **kwargs)
user = ImagerProfile.objects.get(user__username=username)
self.fields['bio'].initial = user.bio
self.fields['phone'].initial = user.phone
self.fields['location'].initial = user.location
self.fields['website'].initial = user.bio
self.fields['fee'].initial = user.phone
self.fields['camera'].initial = user.phone
self.fields['services'].initial = user.bio
self.fields['photostyles'].initial = user.phone
| StarcoderdataPython |
5191198 | #!/usr/bin/env python3
#############################################
# #
# Test module for Edge2.py #
# Author: <NAME> #
# Date: 06/Apr/2019 #
# Modified: 07/Nov/2019 #
# #
#############################################
from testrun.TestRun import TestRun
import sys
sys.path.append('..') # allow Edge2 to be detected
from edgy_lines.Edge2 import *
class EdgeTests(TestRun):
''' A class for testing the Edge class methods. '''
def test_is_point(self):
''' Tests the 'is_point' method. '''
assert Edge.is_point([0,0,0,0]), "Should have been point."
assert not Edge.is_point([0,0,1,1]), "Should not have been point."
def test_get_line_points(self):
''' Tests the 'get_line_points' method. '''
assert (Edge.get_line_points(np.array([[0,0,1,1]]),0) ==\
np.array([[0,0],[1,1]])).all()
def test_distsq(self):
''' Tests the 'distsq' method. '''
assert Edge.distsq(np.array([0,0]),np.array([0,1])) == 1,\
"Dist should have been 1."
def test_get_line(self):
''' '''
res = Edge.get_line(np.array([[0,0],[0,1],[0,2],[0,3]]))
assert (res == np.array([0,0,0,3])).all(), "{}".format(res)
def test_equal_lines(self):
''' '''
assert Edge.equal_lines(np.array([0,0,0,1]),np.array([0,1,0,0]))
assert not Edge.equal_lines(np.array([0,1,8,2]),np.array([3,4,5,6]))
def test_add_new_lines(self):
''' '''
assert (Edge.add_new_lines(np.array([[0,0,0,1]]),
np.array([0,0,0,1])) == \
np.array([[0,0,0,1]])).all()
assert (Edge.add_new_lines(np.array([[0,0,0,1]]),
np.array([0,0,0,2])) == np.array([[0,0,0,1],
[0,0,0,2]])).all()
def test_duplicate_point(self):
''' '''
assert Edge.duplicate_point([[1,1],[1,1]]), "[1,1] == [1,1]"
assert not Edge.duplicate_point([[1,2],[1,1]]), "[1,2] != [1,1]"
def test_line_angle(self):
''' '''
assert Edge.line_angle([0,0,0,1]) == 90
assert Edge.line_angle([0,0,1,1]) == 45
assert Edge.line_angle([0,0,1,0]) == 0
def test_angle_between_lines(self):
''' '''
assert Edge.angle_between_lines([0,0,0,1],[0,0,1,0]) == -90
def test_dist_point_to_segment(self):
''' '''
assert Edge.dist_point_to_segment(np.array([0,0]), np.array([0,0]),
np.array([0,1])) == 0
assert Edge.dist_point_to_segment(np.array([0,0]), np.array([1,0]),
np.array([1,0])) == 1
assert Edge.dist_point_to_segment(np.array([0,0]), np.array([0,1]),
np.array([1,1])) == 1
def test_min_dist_segments(self):
''' '''
res = Edge.min_dist_segments(np.array([[0,0,0,1],[1,0,2,1]]))
assert res == 1, "{}".format(res)
def test_offset_line(self):
''' '''
assert (Edge.offset_line(np.array([0,0,0,1]),np.array([1,2])) ==\
np.array([1,2,1,3])).all()
def test_get_intersection_points(self):
''' '''
res = Edge.get_intersection_points(
np.array([[[0,1],[1,0]],[[0,2],[2,0]]]), np.array([[0,1],[1,0]]))
assert (res == np.array([[1,2]])).all(), \
'[[[0,1],[1,0]]],[[0,2],[2,0]]] became ' + str(ret) + ', not ' +\
'[[1,2]]'
def test_get_intersections(self):
''' '''
assert (Edge.get_intersections(np.array([[0,0,0,1],[1,0,1,1]])) ==\
np.array([[0,0],[0,0]])).all()
assert (Edge.get_intersections(np.array([[-1,0,1,0],[0,1,0,-1]])) ==\
np.array([[0,1],[1,0]])).all()
def test_get_joined_lines(self):
''' '''
pass
def test_reduce_lines(self):
''' '''
pass
if __name__ == '__main__':
Tests = EdgeTests()
Tests.run_failed_tests()
| StarcoderdataPython |
3333754 | import glob
import logging
import os
import shutil
import subprocess
from urlparse import urlparse
from bd2k.util.exceptions import require
from toil.lib.docker import dockerCall
_log = logging.getLogger(__name__)
def download_url(job, url, work_dir='.', name=None, s3_key_path=None, cghub_key_path=None):
"""
Downloads URL, can pass in file://, http://, s3://, or ftp://, gnos://cghub/analysisID, or gnos:///analysisID
If downloading S3 URLs, the S3AM binary must be on the PATH
:param toil.job.Job job: Toil job that is calling this function
:param str url: URL to download from
:param str work_dir: Directory to download file to
:param str name: Name of output file, if None, basename of URL is used
:param str s3_key_path: Path to 32-byte encryption key if url points to S3 file that uses SSE-C
:param str cghub_key_path: Path to cghub key used to download from CGHub.
:return: Path to the downloaded file
:rtype: str
"""
file_path = os.path.join(work_dir, name) if name else os.path.join(work_dir, os.path.basename(url))
if cghub_key_path:
_download_with_genetorrent(job, url, file_path, cghub_key_path)
elif urlparse(url).scheme == 's3':
_s3am_with_retry(job, num_cores=1, file_path=file_path, s3_url=url, mode='download', s3_key_path=s3_key_path)
elif urlparse(url).scheme == 'file':
shutil.copy(urlparse(url).path, file_path)
else:
subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path])
assert os.path.exists(file_path)
return file_path
def download_url_job(job, url, name=None, s3_key_path=None, cghub_key_path=None):
"""Job version of `download_url`"""
work_dir = job.fileStore.getLocalTempDir()
fpath = download_url(job=job, url=url, work_dir=work_dir, name=name,
s3_key_path=s3_key_path, cghub_key_path=cghub_key_path)
return job.fileStore.writeGlobalFile(fpath)
def _download_with_genetorrent(job, url, file_path, cghub_key_path):
parsed_url = urlparse(url)
analysis_id = parsed_url.path[1:]
assert parsed_url.scheme == 'gnos', 'Improper format. gnos://cghub/ID. User supplied: {}'.format(parsed_url)
work_dir = os.path.dirname(file_path)
folder_path = os.path.join(work_dir, os.path.basename(analysis_id))
parameters = ['-vv', '-c', cghub_key_path, '-d', analysis_id]
dockerCall(job=job, tool='quay.io/ucsc_cgl/genetorrent:3.8.7--9911761265b6f08bc3ef09f53af05f56848d805b',
workDir=work_dir, parameters=parameters)
sample = glob.glob(os.path.join(folder_path, '*tar*'))
assert len(sample) == 1, 'More than one sample tar in CGHub download: {}'.format(analysis_id)
def s3am_upload(job, fpath, s3_dir, num_cores=1, s3_key_path=None):
"""
Uploads a file to s3 via S3AM
S3AM binary must be on the PATH to use this function
For SSE-C encryption: provide a path to a 32-byte file
:param toil.job.Job job: Toil job that is calling this function
:param str fpath: Path to file to upload
:param str s3_dir: Ouptut S3 path. Format: s3://bucket/[directory]
:param int num_cores: Number of cores to use for up/download with S3AM
:param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption
"""
require(s3_dir.startswith('s3://'), 'Format of s3_dir (s3://) is incorrect: %s', s3_dir)
s3_dir = os.path.join(s3_dir, os.path.basename(fpath))
_s3am_with_retry(job=job, num_cores=num_cores, file_path=fpath,
s3_url=s3_dir, mode='upload', s3_key_path=s3_key_path)
def s3am_upload_job(job, file_id, file_name, s3_dir, s3_key_path=None):
"""Job version of s3am_upload"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, file_name))
s3am_upload(job=job, fpath=fpath, s3_dir=s3_dir, num_cores=job.cores, s3_key_path=s3_key_path)
def _s3am_with_retry(job, num_cores, file_path, s3_url, mode='upload', s3_key_path=None):
"""
Run s3am with 3 retries
:param toil.job.Job job: Toil job that is calling this function
:param int num_cores: Number of cores to pass to upload/download slots
:param str file_path: Full path to the file
:param str s3_url: S3 URL
:param str mode: Mode to run s3am in. Either "upload" or "download"
:param str s3_key_path: Path to the SSE-C key if using encryption
"""
container_key_file = None
# try to find suitable credentials
base_boto = '.boto'
base_aws = '.aws/credentials'
docker_home_dir = '/root'
# map existing credential paths to their mount point within the container
credentials_to_mount = {os.path.join(os.path.expanduser("~"), path): os.path.join(docker_home_dir, path)
for path in [base_aws, base_boto]
if os.path.exists(os.path.join(os.path.expanduser("~"), path))}
require(os.path.isabs(file_path), "'file_path' parameter must be an absolute path")
dir_path, file_name = file_path.rsplit('/', 1)
# Mirror user specified paths to simplify debugging
container_dir_path = '/data' + dir_path
container_file = os.path.join(container_dir_path, file_name)
mounts = {dir_path: container_dir_path}
if s3_key_path:
require(os.path.isabs(s3_key_path), "'s3_key_path' parameter must be an absolute path")
key_dir_path, key_name = s3_key_path.rsplit('/', 1)
container_key_dir_path = '/data' + key_dir_path
container_key_file = os.path.join(container_key_dir_path, key_name)
# if the key directory is identical to the file directory this assignment is idempotent
mounts[key_dir_path] = container_key_dir_path
for k, v in credentials_to_mount.iteritems():
mounts[k] = v
arguments = []
url_arguments = []
if mode == 'upload':
arguments.extend(['upload', '--force', '--upload-slots=%s' % num_cores, '--exists=overwrite'])
url_arguments.extend(['file://' + container_file, s3_url])
elif mode == 'download':
arguments.extend(['download', '--file-exists=overwrite', '--download-exists=discard'])
url_arguments.extend([s3_url, 'file://' + container_file])
else:
raise ValueError('Improper mode specified. mode must be equal to "upload" or "download".')
if s3_key_path:
arguments.extend(['--sse-key-is-master', '--sse-key-file', container_key_file])
arguments.extend(['--part-size=50M', '--download-slots=%s' % num_cores])
# finally, add the url path arguments after all the tool parameters are set
arguments.extend(url_arguments)
# Pass credential-related environment variables into container
env = {}
if 'AWS_PROFILE' in os.environ:
env['AWS_PROFILE'] = os.environ['AWS_PROFILE']
# Create parameters to pass to Docker
docker_parameters = ['--rm', '--log-driver', 'none']
if mounts:
for k, v in mounts.iteritems():
docker_parameters.extend(['-v', k + ':' + v])
if env:
for e, v in env.iteritems():
docker_parameters.extend(['-e', '{}={}'.format(e, v)])
# Run s3am with retries
retry_count = 3
for i in xrange(retry_count):
try:
dockerCall(job=job, tool='quay.io/ucsc_cgl/s3am:2.0--fed932897e7fd40f4ec878362e5dd6afe15caaf0',
parameters=arguments, dockerParameters=docker_parameters)
except subprocess.CalledProcessError:
_log.debug('S3AM %s failed', mode, exc_info=True)
else:
_log.debug('S3AM %s succeeded', mode)
return
raise RuntimeError("S3AM failed to %s after %i retries with arguments %s. Enable 'debug' "
"level logging to see more information about the failed attempts." %
(mode, retry_count, arguments))
| StarcoderdataPython |
5188988 | <reponame>rdzeldenrust/Honeybee<filename>src/Honeybee_Radiance Metal Material By Color.py
# By <NAME>
# <EMAIL>
# Honeybee started by <NAME> is licensed
# under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
"""
Radiance Metal Material By Color
Create a Standard Radiance Metal Material. Many thanks to <NAME> for his help and all the great resources he provided at jaloxa.eu
Check out the color picker to see some great examples > http://www.jaloxa.eu/resources/radiance/colour_picker/index.shtml
-
Provided by Honeybee 0.0.55
Args:
_materialName: Material name
_color: Material color
_roughness_: Roughness values above 0.2 are uncommon
_specularity_: Specularity values above 0.9 is typical for metal
Returns:
avrgRef: Average diffuse reflectance of the material
RADMaterial: Radiance Material string
"""
ghenv.Component.Name = "Honeybee_Radiance Metal Material By Color"
ghenv.Component.NickName = 'radMetalMaterialByColor'
ghenv.Component.Message = 'VER 0.0.55\nSEP_11_2014'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "01 | Daylight | Material"
#compatibleHBVersion = VER 0.0.55\nAUG_25_2014
#compatibleLBVersion = VER 0.0.58\nAUG_20_2014
try: ghenv.Component.AdditionalHelpFromDocStrings = "0"
except: pass
import math
import scriptcontext as sc
import Grasshopper.Kernel as gh
# read here to understand RAD materials
# http://www.artifice.com/radiance/rad_materials.html
def createRadMaterial(modifier, name, *args):
# I should check the inputs here
radMaterial = "void " + modifier + " " + name + "\n" + \
"0\n" + \
"0\n" + \
`int(len(args))`
for arg in args: radMaterial = radMaterial + (" " + "%.3f"%arg)
return radMaterial + "\n"
def main():
modifier = "metal"
if sc.sticky.has_key('honeybee_release'):
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
"Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
if _materialName and _color!=None:
R = _color.R/255
G = _color.G/255
B = color.B/255
if 0 <= R <= 1 and 0 <= G <= 1 and 0 <= B <= 1:
avrgRef = (0.265 * R + 0.670 * G + 0.065 * B) * (1 - specularity) + specularity
materialName = _materialName.Replace(" ", "_")
RADMaterial = createRadMaterial(modifier, materialName, R, G, B, specularity, roughness)
if roughness > 0.2:
msg = "Roughness values above 0.2 are uncommon"
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
if specularity < 0.9:
msg = "Specularity values less than 0.9 are uncommon for metal"
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return RADMaterial
else:
msg = "Reflectance values should be between 0 and 1"
e = gh.GH_RuntimeMessageLevel.Error
ghenv.Component.AddRuntimeMessage(e, msg)
else:
print "You should first let both Ladybug and Honeybee to fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let both Ladybug and Honeybee to fly...")
RADMaterial = main() | StarcoderdataPython |
1637987 | import os
file_chars_reference = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def create_letters_text_files():
try:
# Obtenemos la ruta absoluta del directorio en el que estamos trabajando
script_directory = os.path.dirname(__file__)
for letter in file_chars_reference:
file_path = f"{script_directory}/{letter}.txt"
open(file_path, "w")
except FileExistsError:
print("Fichero existe")
return []
except Exception as except_message:
print(except_message)
return []
create_letters_text_files()
| StarcoderdataPython |
235210 | <filename>src/stop_words.py
import nltk
from nltk.corpus import stopwords
NLTK_MAPPING = { "en": "english", "pt": "portuguese" }
custom_stop_words = {}
# Portuguese
custom_stop_words['pt'] = []
# English
custom_stop_words['en'] = [
"app",
"good",
"excellent",
"awesome",
"please",
"they",
"very",
"too",
"like",
"love",
"nice",
"yeah",
"amazing",
"lovely",
"perfect",
"much",
"bad",
"best",
"yup",
"suck",
"super",
"thank",
"great",
"really",
"omg",
"gud",
"yes",
"cool",
"fine",
"hello",
"alright",
"poor",
"plz",
"pls",
"google",
"facebook",
"three",
"ones",
"one",
"two",
"five",
"four",
"old",
"new",
"asap",
"version",
"times",
"update",
"star",
"first",
"rid",
"bit",
"annoying",
"beautiful",
"dear",
"master",
"evernote",
"per",
"line",
"oh",
"ah",
"cannot",
"doesnt",
"won't",
"dont",
"unless",
"you're",
"aren't",
"i'd",
"can't",
"wouldn't",
"around",
"i've",
"i'll",
"gonna",
"ago",
"you'll",
"you'd",
"28th",
"gen",
"it'll",
"vice",
"would've",
"wasn't",
"year",
"boy",
"they'd",
"isnt",
"1st",
"i'm",
"nobody",
"youtube",
"isn't",
"don't",
"2016",
"2017",
"since",
"near",
"god",
]
def get_stop_words(language):
custom_list = custom_stop_words[language]
nltk_list = stopwords.words(NLTK_MAPPING[language])
return nltk_list + custom_list | StarcoderdataPython |
4840507 | import sys
from getpass import getpass
from json import dumps
from typing import Any, Dict, List, Optional, TextIO, Union
from streamlink.plugin.plugin import UserInputRequester
from streamlink_cli.utils import JSONEncoder
class ConsoleUserInputRequester(UserInputRequester):
"""
Request input from the user on the console using the standard ask/askpass methods
"""
def __init__(self, console):
self.console = console
def ask(self, prompt: str) -> str:
if not sys.stdin.isatty():
raise OSError("no TTY available")
return self.console.ask(f"{prompt.strip()}: ")
def ask_password(self, prompt: str) -> str:
if not sys.stdin.isatty():
raise OSError("no TTY available")
return self.console.askpass(f"{prompt.strip()}: ")
class ConsoleOutput:
def __init__(self, output: TextIO, json: bool = False):
self.json = json
self.output = output
def ask(self, prompt: str) -> Optional[str]:
if not sys.stdin.isatty():
return None
self.output.write(prompt)
# noinspection PyBroadException
try:
return input().strip()
except Exception:
return None
def askpass(self, prompt: str) -> Optional[str]:
if not sys.stdin.isatty():
return None
return getpass(prompt, self.output)
def msg(self, msg: str) -> None:
if self.json:
return
self.output.write(f"{msg}\n")
def msg_json(self, *objs: Any, **keywords: Any) -> None:
if not self.json:
return
out: Union[List, Dict]
if objs and isinstance(objs[0], list):
out = []
for obj in objs:
if isinstance(obj, list):
for item in obj:
out.append(item)
else:
if hasattr(obj, "__json__") and callable(obj.__json__):
obj = obj.__json__()
out.append(obj)
if keywords:
out.append(keywords)
else:
out = {}
for obj in objs:
if hasattr(obj, "__json__") and callable(obj.__json__):
obj = obj.__json__()
if not isinstance(obj, dict):
continue
out.update(**obj)
out.update(**keywords)
msg = dumps(out, cls=JSONEncoder, indent=2)
self.output.write(f"{msg}\n")
if type(out) is dict and out.get("error"):
sys.exit(1)
def exit(self, msg: str) -> None:
if self.json:
self.msg_json(error=msg)
else:
self.msg(f"error: {msg}")
sys.exit(1)
__all__ = ["ConsoleOutput", "ConsoleUserInputRequester"]
| StarcoderdataPython |
8165727 | <filename>app/user_api.py
from flask import abort, request, jsonify, url_for
from app import app, database
from app.api_auth import token_auth
from app.api_tools import get_single_json_entity
from app.errors import bad_request, error_response
from app.models import Users
A_USER_QUERY_TEMPLATE = """
SELECT users.id FROM users WHERE users.{} = '{}' LIMIT 1
"""
FULL_USER_QUERY_TEMPLATE = """
SELECT
users.id, users.username, users.email, users.common_name
FROM users WHERE users.id = '{}'
"""
@app.route('/api/v1/users/create', methods=['POST'])
def create_user():
app.logger.debug(f'Receive request: {request.data}')
data = request.get_json() or {}
if 'username' not in data or 'email' not in data or 'password' not in data:
return bad_request('must include username, email and password fields')
name_query = A_USER_QUERY_TEMPLATE.format('username', data['username'])
query_result_proxy = database.session.execute(name_query)
query_result = [r for r in query_result_proxy]
if query_result:
return bad_request('please use a different username')
user = Users()
user.from_dict(data, new_user=True)
insert_command = f"""
INSERT INTO users (username, email, common_name, password_hash)
VALUES (
'{user.username}', '{user.email}', '{user.common_name}',
'{user.password_hash}'
)
RETURNING users.id
"""
query_result = database.session.execute(insert_command)
database.session.commit()
new_user_id = [r for r in query_result][0][0]
response = jsonify({'user_id': new_user_id})
response.status_code = 201
response.headers['Location'] = url_for('get_user', user_id=new_user_id)
return response
@app.route('/api/v1/users/<int:user_id>', methods=['GET'])
@token_auth.login_required
def get_user(user_id):
json_user = get_single_json_entity(
FULL_USER_QUERY_TEMPLATE.format(user_id)
)
if json_user:
response = jsonify(json_user)
else:
response = error_response(404)
return response
@app.route('/api/v1/users/<int:user_id>', methods=['PUT'])
@token_auth.login_required
def update_user(user_id):
if token_auth.current_user().id != user_id:
abort(403)
query = A_USER_QUERY_TEMPLATE.format('id', user_id)
json_user = get_single_json_entity(query)
if not json_user:
return error_response(404)
request_data = request.get_json() or {}
mutable_field_names = ['username', 'email', 'common_name']
fields_to_update = {
k: v for k, v in request_data.items() if k in mutable_field_names
}
if not fields_to_update:
return bad_request('must include username, email or common_name')
if 'username' in fields_to_update:
name_query = A_USER_QUERY_TEMPLATE.format(
'username',
request_data['username']
)
query_result_proxy = database.session.execute(name_query)
new_username_is_not_unique = bool([r for r in query_result_proxy])
if new_username_is_not_unique:
return bad_request(f'please use a different username')
update_query_template = "UPDATE users SET {} WHERE users.id = {}"
updating_set = ','.join(
[f"{f} = '{v}'" for f, v in fields_to_update.items()]
)
update_query = update_query_template.format(updating_set, user_id)
database.session.execute(update_query)
query = FULL_USER_QUERY_TEMPLATE.format(user_id)
updated_user = get_single_json_entity(query)
database.session.commit()
return jsonify(updated_user)
@app.route('/api/v1/users/<int:user_id>/posts', methods=['GET'])
@token_auth.login_required
def get_user_posts(user_id):
app.logger.debug(f'Receive request: {request.data}')
query = A_USER_QUERY_TEMPLATE.format('id', user_id)
json_user = get_single_json_entity(query)
if not json_user:
return error_response(404)
posts_query = f"""
SELECT post.id, post.text, post.creation_timestamp, post.user_id FROM post
WHERE post.user_id = {user_id} AND post.deleted = FALSE
"""
query_result_proxy = database.session.execute(posts_query)
database.session.commit()
posts = [{k: v for k, v in row.items()} for row in query_result_proxy]
response = jsonify({'user_posts': posts})
return response
| StarcoderdataPython |
3582461 | <filename>steambird/boecie/forms.py<gh_stars>0
"""
This module contains the Django Form classes which are used in the Boecie views.
"""
from enum import Enum, auto
from django import forms
from django.forms import HiddenInput, MultipleHiddenInput
from django.urls import reverse_lazy
# noinspection PyUnresolvedReferences
# pylint: disable=no-name-in-module
from django_addanother.widgets import AddAnotherWidgetWrapper
from django_select2.forms import ModelSelect2MultipleWidget, ModelSelect2Widget
from steambird.models import Course, Teacher, CourseStudy, Study, Config, \
StudyMaterialEdition, MSPLine, MSP, StudyYear
def get_course_form(course_id=None):
class CourseForm(forms.ModelForm):
"""
ModelForm for either showing/editing or inputting information related to
a course. Makes use of the Materials model and Teachers model.
"""
teachers = Course.teachers
materials = Course.materials
if course_id is None:
study_year = forms.ChoiceField(choices=((i.value, i.name) for i in StudyYear))
class Meta:
model = Course
fields = [
'id',
'course_code',
'name',
'materials',
'teachers',
'updated_associations',
'updated_teacher',
'calendar_year',
'coordinator',
'period',
*(['study_year'] if course_id is None else []),
]
widgets = {
'id': HiddenInput(),
"materials": AddAnotherWidgetWrapper(ModelSelect2MultipleWidget(
queryset=MSP.objects.all(),
search_fields=[
"mspline__materials__name__icontains",
"mspline__materials__book__ISBN__icontains",
"mspline__materials__book__author__icontains",
"mspline__materials__book__year_of_publishing__icontains",
"mspline__materials__scientificarticle__DOI__icontains",
"mspline__materials__scientificarticle__author__icontains",
"mspline__materials__scientificarticle__year_of_publishing__icontains",
]
), reverse_lazy('boecie:msp.create',
kwargs={'course': course_id})) if course_id
else MultipleHiddenInput(),
'teachers': AddAnotherWidgetWrapper(ModelSelect2MultipleWidget(
model=Teacher,
search_fields=[
'titles__icontains',
'initials__icontains',
'first_name__icontains',
'surname_prefix__icontains',
'last_name__icontains',
'email__icontains'
]
), reverse_lazy('boecie:teacher.create')),
'coordinator': AddAnotherWidgetWrapper(ModelSelect2Widget(
model=Teacher,
search_fields=[
'titles__icontains',
'initials__icontains',
'first_name__icontains',
'surname_prefix__icontains',
'last_name__icontains',
'email__icontains'
]
), reverse_lazy('boecie:teacher.create')),
}
return CourseForm
class TeacherForm(forms.ModelForm):
"""
Modelform for viewing or editing data related to Teacher users.
"""
class Meta:
model = Teacher
fields = [
'titles',
'initials',
'first_name',
'surname_prefix',
'last_name',
'email',
'active',
'retired',
'user',
]
# pylint: disable=invalid-name
def StudyCourseForm(has_course_field: bool = False):
"""
Function which returns a modelform for usage in a page. The function is used to create a form to
link information between studies anc courses.
E.g.
form_class = StudyCourseForm(True)
Will return a form with Course field and Study-year visible, study-field is hidden input.
While:
form_class = StudyCourseForm(False)
Will return a form with Study field and Study-year visible, Course-field is hidden input.
:param has_course_field: bool
:return: ModelForm with either Study or Course field
"""
class _cls(forms.ModelForm):
class Meta:
model = CourseStudy
fields = [
'study_year',
] + (['course'] if has_course_field else ['study'])
if has_course_field:
widgets = {
'course': AddAnotherWidgetWrapper(ModelSelect2Widget(
model=Course,
search_fields=[
'name__icontains',
'course_code__icontains'
]
), reverse_lazy('boecie:index')),
'study': HiddenInput(),
}
else:
widgets = {
'study': AddAnotherWidgetWrapper(ModelSelect2Widget(
model=Study,
search_fields=[
'name__icontains',
'slug__icontains'
]
), reverse_lazy('boecie:index')),
'course': HiddenInput(),
}
return _cls
class LmlExportOptions(Enum):
"""
An enum used to define the options for which selections to export. Used as the export options
are association-linked and can contain Pre-Masters and Masters, which are less 'year' focused
than Bachelors tend to be.
"""
YEAR_1 = auto()
YEAR_2 = auto()
YEAR_3 = auto()
MASTER = auto()
PREMASTER = auto()
class LmlExportForm(forms.Form):
"""
Form to offer users to download a CSV file containing books for the period
based on the options selected. Options are presented by LmlExportOptions, combined with Quartile
"""
# TODO: make sure this will only give the downloads for books within your
# association (e.g. we shouldn't get EE)
option = forms.ChoiceField(choices=((i.value, i.name) for i in LmlExportOptions))
period = forms.ChoiceField(
choices=(('Q{}'.format(i), 'Quartile {}'.format(i)) for i in range(1, 5))
)
class ConfigForm(forms.ModelForm):
"""
Modelform to offer users the possibility to change the defined periods of the year. Currently
affects all users due to how the model is set up.
"""
class Meta:
model = Config
fields = [
'year',
'period',
]
class MSPCreateForm(forms.ModelForm):
class Meta:
model = MSPLine
fields = [
'comment', 'materials'
]
widgets = {
'comment': forms.Textarea(),
'materials': AddAnotherWidgetWrapper(ModelSelect2MultipleWidget(
queryset=StudyMaterialEdition.objects.all(),
search_fields=[
"name__icontains",
"book__ISBN__icontains",
"book__author__icontains",
"book__year_of_publishing__icontains",
"scientificarticle__DOI__icontains",
"scientificarticle__author__icontains",
"scientificarticle__year_of_publishing__icontains",
]
), reverse_lazy('material_management:material.create')),
}
def clean(self):
cleaned_data = super().clean()
if not cleaned_data.get('materials'):
self.add_error('materials',
'At least one material should be specified.')
return cleaned_data
| StarcoderdataPython |
1787661 | # -*- coding: utf-8 -*-
"""Utilities for ComPath."""
import logging
from typing import Collection, Mapping
import pandas as pd
__all__ = [
'write_dict',
'dict_to_df',
]
logger = logging.getLogger(__name__)
def write_dict(data: Mapping[str, Collection[str]], path: str) -> None:
"""Write a dictionary to a file as an Excel document."""
gene_sets_df = dict_to_df(data)
logger.info("Exporting gene sets to %s", path)
gene_sets_df.to_excel(path, index=False)
logger.info("Exported gene sets to %s", path)
def dict_to_df(data: Mapping[str, Collection[str]]) -> pd.DataFrame:
"""Convert a dictionary to a DataFrame."""
return pd.DataFrame({
key: pd.Series(list(values))
for key, values in data.items()
})
| StarcoderdataPython |
12822035 | # Generated by Django 3.0.5 on 2020-04-16 15:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0013_auto_20200415_0345'),
]
operations = [
migrations.AlterField(
model_name='applicant',
name='applicant_status',
field=models.CharField(choices=[('pending', 'Pending'), ('rejected', 'Rejected'), ('interview', 'Interview'), ('hire', 'Hire')], max_length=50),
),
]
| StarcoderdataPython |
8148131 | <gh_stars>0
def get_data():
data = []
data_file = open("data.txt")
for val in data_file:
data.append(val.strip())
data_file.close()
print(f"read {len(data)} lines\n")
return data
def check_data(data):
return None
def main():
data = get_data()
check_data(data)
main()
# with hash instead of array
def get_data():
data = {}
data_file = open("data.txt")
for idx, val in enumerate(data_file):
data[idx] = val.split()
data_file.close()
print(f"read {len(data)} lines\n")
return data
def check_data(data):
for k, d in data.items():
print(k, d)
return None
def main():
data = get_data()
check_data(data)
main()
| StarcoderdataPython |
11213023 | <reponame>kaitlin/afsbirez
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('sbirez', '0029_auto_20150804_2055'),
]
operations = [
migrations.CreateModel(
name='PasswordHistory',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('password', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='sbirezuser',
name='password_expires',
field=models.DateTimeField(null=True),
),
migrations.RunSQL(sql="UPDATE sbirez_sbirezuser set password_expires=now()+'60 days'"),
migrations.AddField(
model_name='passwordhistory',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='prior_passwords'),
),
]
| StarcoderdataPython |
8192148 |
import numpy as np
from text_selection.kld.kld_iterator import get_minimun_indices
def test_one_entry__returns_zero():
array = np.array([1.2], dtype=np.float64)
min_value, min_indices = get_minimun_indices(array)
assert min_value == 1.2
np.testing.assert_array_equal(min_indices, np.array([0]))
def test_two_same_entries__returns_zero_and_one():
array = np.array([1.2, 1.2], dtype=np.float64)
min_value, min_indices = get_minimun_indices(array)
assert min_value == 1.2
np.testing.assert_array_equal(min_indices, np.array([0, 1]))
def test_two_different_entries__returns_min():
array = np.array([1, 0.2], dtype=np.float64)
min_value, min_indices = get_minimun_indices(array)
assert min_value == 0.2
np.testing.assert_array_equal(min_indices, np.array([1]))
def test_two_same_entries_with_one_different_entry__returns_min():
array = np.array([0.2, 1, 0.2], dtype=np.float64)
min_value, min_indices = get_minimun_indices(array)
assert min_value == 0.2
np.testing.assert_array_equal(min_indices, np.array([0, 2]))
def test_inf_and_one__returns_min():
array = np.array([np.inf, 1.2], dtype=np.float64)
min_value, min_indices = get_minimun_indices(array)
assert min_value == 1.2
np.testing.assert_array_equal(min_indices, np.array([1]))
def test_only_inf__returns_min():
array = np.array([np.inf, np.inf], dtype=np.float64)
min_value, min_indices = get_minimun_indices(array)
assert np.isinf(min_value)
np.testing.assert_array_equal(min_indices, np.array([0, 1]))
| StarcoderdataPython |
5139028 | from django.shortcuts import render
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
class SignUp(generic.CreateView): # generic view default django
form_class = UserCreationForm # form default
success_url = reverse_lazy('login') # like a redirect, where the user will go after signup, use when u create a baseview
template_name = 'registration/register.html'
| StarcoderdataPython |
12814792 | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
| StarcoderdataPython |
8012705 | from requests import get
from enum import Enum
def all_node_conected_in_node_url(node_url: str = None) -> dict:
if node_url == None:
full_url = f'{Node.mainnet_url.value}/peers/all'
else:
full_url = f'{node_url}/peers/all' # You have pass your node url with https or other contents
response = get(full_url)
if response.status_code in range(200, 300):
return {
'status': 'ok',
'response': response.json()
}
else:
return {
'status': 'error',
'response': response.text
}
def node_version(node_url: str = None) -> dict:
if node_url == None:
full_url = f'{Node.mainnet_url.value}/utils/lunesnode/version'
else:
full_url = f'{node_url}/utils/lunesnode/version' # You have pass your node url with https or other contents
response = get(full_url)
if response.status_code in range(200, 300):
return {
'status': 'ok',
'response': response.json()
}
else:
return {
'status': 'error',
'response': response.text
}
def version_all_lunes_node_conected(node_url: str = None) -> dict:
if node_url == None:
full_url = f'{Node.mainnet_url.value}/peers/connected'
else:
full_url = f'{node_url}/peers/connected' # You have pass your node url with https or other contents
response = get(full_url)
if response.status_code in range(200, 300):
return {
'status': 'ok',
'response': [
{
'node_url': data['address'][1:],
'version': data['applicationVersion']
}
for data in response.json()['peers']
]
}
else:
return {
'status': 'error',
'response': response.text
}
class Node(Enum):
mainnet_url: str = 'https://lunesnode.lunes.io'
testnet_url: str = 'https://lunesnode-testnet.lunes.io'
mainnet_blockexplorer: str = 'https://blockexplorer.lunes.io'
testnet_blockexplorer: str = 'https://blockexplorer-testnet.lunes.io'
mainnet_total_supply: float = 150_728_537.61498705
testnet_total_supply: float = 800_100_000.00000000 | StarcoderdataPython |
3347053 | import numpy as np
import torch
import torch.nn.functional as F
from .strategy import Strategy
from tqdm import tqdm
class AdversarialBIM(Strategy):
def __init__(self, dataset, net, eps=0.05):
super(AdversarialBIM, self).__init__(dataset, net)
self.eps = eps
def cal_dis(self, x):
nx = torch.unsqueeze(x, 0)
nx.requires_grad_()
eta = torch.zeros(nx.shape)
out, e1 = self.net.clf(nx+eta)
py = out.max(1)[1]
ny = out.max(1)[1]
while py.item() == ny.item():
loss = F.cross_entropy(out, ny)
loss.backward()
eta += self.eps * torch.sign(nx.grad.data)
nx.grad.data.zero_()
out, e1 = self.net.clf(nx+eta)
py = out.max(1)[1]
return (eta*eta).sum()
def query(self, n):
unlabeled_idxs, unlabeled_data = self.dataset.get_unlabeled_data()
self.net.clf.cpu()
self.net.clf.eval()
dis = np.zeros(unlabeled_idxs.shape)
for i in tqdm(range(len(unlabeled_idxs)), ncols=100):
x, y, idx = unlabeled_data[i]
dis[i] = self.cal_dis(x)
self.net.clf.cuda()
return unlabeled_idxs[dis.argsort()[:n]]
| StarcoderdataPython |
30090 | """Models and utilities for processing SMIRNOFF data."""
import abc
import copy
import functools
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from openff.toolkit.topology import Molecule
from openff.toolkit.typing.engines.smirnoff.parameters import (
AngleHandler,
BondHandler,
ChargeIncrementModelHandler,
ConstraintHandler,
ElectrostaticsHandler,
ImproperTorsionHandler,
LibraryChargeHandler,
ParameterHandler,
ProperTorsionHandler,
ToolkitAM1BCCHandler,
UnassignedProperTorsionParameterException,
UnassignedValenceParameterException,
VirtualSiteHandler,
vdWHandler,
)
from openff.units import unit
from openff.units.openmm import from_openmm
from openmm import unit as omm_unit
from pydantic import Field
from typing_extensions import Literal
from openff.interchange.components.potentials import (
Potential,
PotentialHandler,
WrappedPotential,
)
from openff.interchange.exceptions import (
InvalidParameterHandlerError,
MissingParametersError,
SMIRNOFFParameterAttributeNotImplementedError,
)
from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey
from openff.interchange.types import FloatQuantity
kcal_mol = omm_unit.kilocalorie_per_mole
kcal_mol_angstroms = kcal_mol / omm_unit.angstrom ** 2
kcal_mol_radians = kcal_mol / omm_unit.radian ** 2
if TYPE_CHECKING:
from openff.toolkit.topology import Topology
from openff.interchange.components.mdtraj import _OFFBioTop
ElectrostaticsHandlerType = Union[
ElectrostaticsHandler,
ChargeIncrementModelHandler,
LibraryChargeHandler,
ToolkitAM1BCCHandler,
]
T = TypeVar("T", bound="SMIRNOFFPotentialHandler")
TP = TypeVar("TP", bound="PotentialHandler")
class SMIRNOFFPotentialHandler(PotentialHandler, abc.ABC):
"""Base class for handlers storing potentials produced by SMIRNOFF force fields."""
@classmethod
@abc.abstractmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def supported_parameters(cls):
"""Return a list of parameter attributes supported by this handler."""
raise NotImplementedError()
# @classmethod
# @abc.abstractmethod
# def valence_terms(cls, topology):
# """Return an interable of all of one type of valence term in this topology."""
# raise NotImplementedError()
@classmethod
def check_supported_parameters(cls, parameter_handler: ParameterHandler):
"""Verify that a parameter handler is in an allowed list of handlers."""
for parameter in parameter_handler.parameters:
for parameter_attribute in parameter._get_defined_parameter_attributes():
if parameter_attribute not in cls.supported_parameters():
raise SMIRNOFFParameterAttributeNotImplementedError(
parameter_attribute,
)
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""Populate self.slot_map with key-val pairs of [TopologyKey, PotentialKey]."""
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
# TODO: Should the slot_map always be reset, or should we be able to partially
# update it? Also Note the duplicated code in the child classes
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
topology_key = TopologyKey(atom_indices=key)
potential_key = PotentialKey(
id=val.parameter_type.smirks, associated_handler=parameter_handler_name
)
self.slot_map[topology_key] = potential_key
if self.__class__.__name__ in ["SMIRNOFFBondHandler", "SMIRNOFFAngleHandler"]:
valence_terms = self.valence_terms(topology) # type: ignore[attr-defined]
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=valence_terms,
exception_cls=UnassignedValenceParameterException,
)
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: TP,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFPotentialHandler from toolkit data.
"""
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(type(parameter_handler))
handler = cls()
if hasattr(handler, "fractional_bond_order_method"):
if getattr(parameter_handler, "fractional_bondorder_method", None):
handler.fractional_bond_order_method = ( # type: ignore[attr-defined]
parameter_handler.fractional_bondorder_method # type: ignore[attr-defined]
)
handler.fractional_bond_order_interpolation = ( # type: ignore[attr-defined]
parameter_handler.fractional_bondorder_interpolation # type: ignore[attr-defined]
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFBondHandler(SMIRNOFFPotentialHandler):
"""Handler storing bond potentials as produced by a SMIRNOFF force field."""
type: Literal["Bonds"] = "Bonds"
expression: Literal["k/2*(r-length)**2"] = "k/2*(r-length)**2"
fractional_bond_order_method: Literal["AM1-Wiberg"] = "AM1-Wiberg"
fractional_bond_order_interpolation: Literal["linear"] = "linear"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [BondHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "length", "k_bondorder", "length_bondorder"]
@classmethod
def valence_terms(cls, topology):
"""Return all bonds in this topology."""
return [list(b.atoms) for b in topology.topology_bonds]
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
# TODO: Should the slot_map always be reset, or should we be able to partially
# update it? Also Note the duplicated code in the child classes
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
param = val.parameter_type
if param.k_bondorder or param.length_bondorder:
top_bond = topology.get_bond_between(*key)
fractional_bond_order = top_bond.bond.fractional_bond_order
if not fractional_bond_order:
raise RuntimeError(
"Bond orders should already be assigned at this point"
)
else:
fractional_bond_order = None
topology_key = TopologyKey(
atom_indices=key, bond_order=fractional_bond_order
)
potential_key = PotentialKey(
id=val.parameter_type.smirks,
associated_handler=parameter_handler_name,
bond_order=fractional_bond_order,
)
self.slot_map[topology_key] = potential_key
valence_terms = self.valence_terms(topology)
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=valence_terms,
exception_cls=UnassignedValenceParameterException,
)
def store_potentials(self, parameter_handler: "BondHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
if self.potentials:
self.potentials = dict()
for topology_key, potential_key in self.slot_map.items():
smirks = potential_key.id
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
if topology_key.bond_order: # type: ignore[union-attr]
bond_order = topology_key.bond_order # type: ignore[union-attr]
if parameter.k_bondorder:
data = parameter.k_bondorder
else:
data = parameter.length_bondorder
coeffs = _get_interpolation_coeffs(
fractional_bond_order=bond_order,
data=data,
)
pots = []
map_keys = [*data.keys()]
for map_key in map_keys:
pots.append(
Potential(
parameters={
"k": parameter.k_bondorder[map_key],
"length": parameter.length_bondorder[map_key],
},
map_key=map_key,
)
)
potential = WrappedPotential(
{pot: coeff for pot, coeff in zip(pots, coeffs)}
)
else:
potential = Potential( # type: ignore[assignment]
parameters={
"k": parameter.k,
"length": parameter.length,
},
)
self.potentials[potential_key] = potential
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: "BondHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFBondHandler from toolkit data.
"""
# TODO: This method overrides SMIRNOFFPotentialHandler.from_toolkit in order to gobble up
# a ConstraintHandler. This seems like a good solution for the interdependence, but is also
# not a great practice. A better solution would involve not overriding the method with a
# different function signature.
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError
handler: T = cls(type="Bonds", expression="k/2*(r-length)**2")
if (
any(
getattr(p, "k_bondorder", None) is not None
for p in parameter_handler.parameters
)
) or (
any(
getattr(p, "length_bondorder", None) is not None
for p in parameter_handler.parameters
)
):
for ref_mol in topology.reference_molecules:
# TODO: expose conformer generation and fractional bond order assigment
# knobs to user via API
ref_mol.generate_conformers(n_conformers=1)
ref_mol.assign_fractional_bond_orders(
bond_order_model=handler.fractional_bond_order_method.lower(), # type: ignore[attr-defined]
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFConstraintHandler(SMIRNOFFPotentialHandler):
"""Handler storing constraint potentials as produced by a SMIRNOFF force field."""
type: Literal["Constraints"] = "Constraints"
expression: Literal[""] = ""
constraints: Dict[
PotentialKey, bool
] = dict() # should this be named potentials for consistency?
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [BondHandler, ConstraintHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "length", "distance"]
@classmethod
def _from_toolkit( # type: ignore[override]
cls: Type[T],
parameter_handler: List,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFPotentialHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
for parameter_handler in parameter_handlers:
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(type(parameter_handler))
handler = cls()
handler.store_constraints( # type: ignore[attr-defined]
parameter_handlers=parameter_handlers, topology=topology
)
return handler
def store_constraints(
self,
parameter_handlers: Any,
topology: "_OFFBioTop",
) -> None:
"""Store constraints."""
if self.slot_map:
self.slot_map = dict()
constraint_handler = [
p for p in parameter_handlers if type(p) == ConstraintHandler
][0]
constraint_matches = constraint_handler.find_matches(topology)
if any([type(p) == BondHandler for p in parameter_handlers]):
bond_handler = [p for p in parameter_handlers if type(p) == BondHandler][0]
bonds = SMIRNOFFBondHandler._from_toolkit(
parameter_handler=bond_handler,
topology=topology,
)
else:
bond_handler = None
bonds = None
for key, match in constraint_matches.items():
topology_key = TopologyKey(atom_indices=key)
smirks = match.parameter_type.smirks
distance = match.parameter_type.distance
if distance is not None:
# This constraint parameter is fully specified
potential_key = PotentialKey(
id=smirks, associated_handler="Constraints"
)
distance = match.parameter_type.distance
else:
# This constraint parameter depends on the BondHandler ...
if bond_handler is None:
raise MissingParametersError(
f"Constraint with SMIRKS pattern {smirks} found with no distance "
"specified, and no corresponding bond parameters were found. The distance "
"of this constraint is not specified."
)
# ... so use the same PotentialKey instance as the BondHandler to look up the distance
potential_key = bonds.slot_map[topology_key] # type: ignore[union-attr]
self.slot_map[topology_key] = potential_key
distance = bonds.potentials[potential_key].parameters["length"] # type: ignore[union-attr]
potential = Potential(
parameters={
"distance": distance,
}
)
self.constraints[potential_key] = potential # type: ignore[assignment]
class SMIRNOFFAngleHandler(SMIRNOFFPotentialHandler):
"""Handler storing angle potentials as produced by a SMIRNOFF force field."""
type: Literal["Angles"] = "Angles"
expression: Literal["k/2*(theta-angle)**2"] = "k/2*(theta-angle)**2"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [AngleHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attributes."""
return ["smirks", "id", "k", "angle"]
@classmethod
def valence_terms(cls, topology):
"""Return all angles in this topology."""
return list(topology.angles)
def store_potentials(self, parameter_handler: "AngleHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for potential_key in self.slot_map.values():
smirks = potential_key.id
# ParameterHandler.get_parameter returns a list, although this
# should only ever be length 1
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
potential = Potential(
parameters={
"k": parameter.k,
"angle": parameter.angle,
},
)
self.potentials[potential_key] = potential
@classmethod
def f_from_toolkit(
cls: Type[T],
parameter_handler: "AngleHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFAngleHandler from toolkit data.
"""
handler = cls()
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFProperTorsionHandler(SMIRNOFFPotentialHandler):
"""Handler storing proper torsions potentials as produced by a SMIRNOFF force field."""
type: Literal["ProperTorsions"] = "ProperTorsions"
expression: Literal[
"k*(1+cos(periodicity*theta-phase))"
] = "k*(1+cos(periodicity*theta-phase))"
fractional_bond_order_method: Literal["AM1-Wiberg"] = "AM1-Wiberg"
fractional_bond_order_interpolation: Literal["linear"] = "linear"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [ProperTorsionHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "periodicity", "phase", "idivf", "k_bondorder"]
def store_matches(
self,
parameter_handler: "ProperTorsionHandler",
topology: "_OFFBioTop",
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
param = val.parameter_type
n_terms = len(val.parameter_type.phase)
for n in range(n_terms):
smirks = param.smirks
if param.k_bondorder:
# The relevant bond order is that of the _central_ bond in the torsion
top_bond = topology.get_bond_between(key[1], key[2])
fractional_bond_order = top_bond.bond.fractional_bond_order
if not fractional_bond_order:
raise RuntimeError(
"Bond orders should already be assigned at this point"
)
else:
fractional_bond_order = None
topology_key = TopologyKey(
atom_indices=key, mult=n, bond_order=fractional_bond_order
)
potential_key = PotentialKey(
id=smirks,
mult=n,
associated_handler="ProperTorsions",
bond_order=fractional_bond_order,
)
self.slot_map[topology_key] = potential_key
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=list(topology.propers),
exception_cls=UnassignedProperTorsionParameterException,
)
def store_potentials(self, parameter_handler: "ProperTorsionHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for topology_key, potential_key in self.slot_map.items():
smirks = potential_key.id
n = potential_key.mult
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
# n_terms = len(parameter.k)
if topology_key.bond_order: # type: ignore[union-attr]
bond_order = topology_key.bond_order # type: ignore[union-attr]
data = parameter.k_bondorder[n]
coeffs = _get_interpolation_coeffs(
fractional_bond_order=bond_order,
data=data,
)
pots = []
map_keys = [*data.keys()]
for map_key in map_keys:
parameters = {
"k": parameter.k_bondorder[n][map_key],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": parameter.idivf[n] * unit.dimensionless,
}
pots.append(
Potential(
parameters=parameters,
map_key=map_key,
)
)
potential = WrappedPotential(
{pot: coeff for pot, coeff in zip(pots, coeffs)}
)
else:
parameters = {
"k": parameter.k[n],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": parameter.idivf[n] * unit.dimensionless,
}
potential = Potential(parameters=parameters) # type: ignore[assignment]
self.potentials[potential_key] = potential
class SMIRNOFFImproperTorsionHandler(SMIRNOFFPotentialHandler):
"""Handler storing improper torsions potentials as produced by a SMIRNOFF force field."""
type: Literal["ImproperTorsions"] = "ImproperTorsions"
expression: Literal[
"k*(1+cos(periodicity*theta-phase))"
] = "k*(1+cos(periodicity*theta-phase))"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [ImproperTorsionHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "periodicity", "phase", "idivf"]
def store_matches(
self, parameter_handler: "ImproperTorsionHandler", topology: "_OFFBioTop"
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
parameter_handler._assert_correct_connectivity(
val,
[
(0, 1),
(1, 2),
(1, 3),
],
)
n_terms = len(val.parameter_type.k)
for n in range(n_terms):
smirks = val.parameter_type.smirks
non_central_indices = [key[0], key[2], key[3]]
for permuted_key in [
(
non_central_indices[i],
non_central_indices[j],
non_central_indices[k],
)
for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]
]:
topology_key = TopologyKey(
atom_indices=(key[1], *permuted_key), mult=n
)
potential_key = PotentialKey(
id=smirks, mult=n, associated_handler="ImproperTorsions"
)
self.slot_map[topology_key] = potential_key
def store_potentials(self, parameter_handler: "ImproperTorsionHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for potential_key in self.slot_map.values():
smirks = potential_key.id
n = potential_key.mult
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
parameters = {
"k": parameter.k[n],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": 3.0 * unit.dimensionless,
}
potential = Potential(parameters=parameters)
self.potentials[potential_key] = potential
class _SMIRNOFFNonbondedHandler(SMIRNOFFPotentialHandler, abc.ABC):
"""Base class for handlers storing non-bonded potentials produced by SMIRNOFF force fields."""
type: Literal["nonbonded"] = "nonbonded"
cutoff: FloatQuantity["angstrom"] = Field( # type: ignore
9.0 * unit.angstrom,
description="The distance at which pairwise interactions are truncated",
)
scale_13: float = Field(
0.0, description="The scaling factor applied to 1-3 interactions"
)
scale_14: float = Field(
0.5, description="The scaling factor applied to 1-4 interactions"
)
scale_15: float = Field(
1.0, description="The scaling factor applied to 1-5 interactions"
)
class SMIRNOFFvdWHandler(_SMIRNOFFNonbondedHandler):
"""Handler storing vdW potentials as produced by a SMIRNOFF force field."""
type: Literal["vdW"] = "vdW" # type: ignore[assignment]
expression: Literal[
"4*epsilon*((sigma/r)**12-(sigma/r)**6)"
] = "4*epsilon*((sigma/r)**12-(sigma/r)**6)"
method: Literal["cutoff", "pme", "no-cutoff"] = Field("cutoff")
mixing_rule: Literal["lorentz-berthelot", "geometric"] = Field(
"lorentz-berthelot",
description="The mixing rule (combination rule) used in computing pairwise vdW interactions",
)
switch_width: FloatQuantity["angstrom"] = Field( # type: ignore
1.0 * unit.angstrom,
description="The width over which the switching function is applied",
)
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [vdWHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attributes."""
return ["smirks", "id", "sigma", "epsilon", "rmin_half"]
def store_potentials(self, parameter_handler: vdWHandler) -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
self.method = parameter_handler.method.lower()
self.cutoff = parameter_handler.cutoff
for potential_key in self.slot_map.values():
smirks = potential_key.id
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
try:
potential = Potential(
parameters={
"sigma": parameter.sigma,
"epsilon": parameter.epsilon,
},
)
except AttributeError:
# Handle rmin_half pending https://github.com/openforcefield/openff-toolkit/pull/750
potential = Potential(
parameters={
"sigma": parameter.sigma,
"epsilon": parameter.epsilon,
},
)
self.potentials[potential_key] = potential
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: "vdWHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFvdWHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
for parameter_handler in parameter_handlers:
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(
f"Found parameter handler type {type(parameter_handler)}, which is not "
f"supported by potential type {type(cls)}"
)
handler = cls(
scale_13=parameter_handler.scale13,
scale_14=parameter_handler.scale14,
scale_15=parameter_handler.scale15,
cutoff=parameter_handler.cutoff,
mixing_rule=parameter_handler.combining_rules.lower(),
method=parameter_handler.method.lower(),
switch_width=parameter_handler.switch_width,
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
@classmethod
def parameter_handler_precedence(cls) -> List[str]:
"""
Return the order in which parameter handlers take precedence when computing charges.
"""
return ["vdw", "VirtualSites"]
def _from_toolkit_virtual_sites(
self,
parameter_handler: "VirtualSiteHandler",
topology: "Topology",
):
# TODO: Merge this logic into _from_toolkit
if not all(
isinstance(
p,
(
VirtualSiteHandler.VirtualSiteBondChargeType,
VirtualSiteHandler.VirtualSiteMonovalentLonePairType,
VirtualSiteHandler.VirtualSiteDivalentLonePairType,
VirtualSiteHandler.VirtualSiteTrivalentLonePairType,
),
)
for p in parameter_handler.parameters
):
raise NotImplementedError("Found unsupported virtual site types")
matches = parameter_handler.find_matches(topology)
for atoms, parameter_match in matches.items():
virtual_site_type = parameter_match[0].parameter_type
top_key = VirtualSiteKey(
atom_indices=atoms,
type=virtual_site_type.type,
match=virtual_site_type.match,
)
pot_key = PotentialKey(
id=virtual_site_type.smirks, associated_handler=virtual_site_type.type
)
pot = Potential(
parameters={
"sigma": virtual_site_type.sigma,
"epsilon": virtual_site_type.epsilon,
# "distance": virtual_site_type.distance,
}
)
# if virtual_site_type.type in {"MonovalentLonePair", "DivalentLonePair"}:
# pot.parameters.update(
# {
# "outOfPlaneAngle": virtual_site_type.outOfPlaneAngle,
# }
# )
# if virtual_site_type.type in {"MonovalentLonePair"}:
# pot.parameters.update(
# {
# "inPlaneAngle": virtual_site_type.inPlaneAngle,
# }
# )
self.slot_map.update({top_key: pot_key})
self.potentials.update({pot_key: pot})
class SMIRNOFFElectrostaticsHandler(_SMIRNOFFNonbondedHandler):
"""
A handler which stores any electrostatic parameters applied to a topology.
This handler is responsible for grouping together
* global settings for the electrostatic interactions such as the cutoff distance
and the intramolecular scale factors.
* partial charges which have been assigned by a ``ToolkitAM1BCC``,
``LibraryCharges``, or a ``ChargeIncrementModel`` parameter
handler.
* charge corrections applied by a ``SMIRNOFFChargeIncrementHandler``.
rather than having each in their own handler.
"""
type: Literal["Electrostatics"] = "Electrostatics" # type: ignore[assignment]
expression: Literal["coul"] = "coul"
method: Literal["pme", "cutoff", "reaction-field", "no-cutoff"] = Field("pme")
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [
LibraryChargeHandler,
ChargeIncrementModelHandler,
ToolkitAM1BCCHandler,
ElectrostaticsHandler,
]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
pass
@property
def charges(self) -> Dict[Union[TopologyKey, VirtualSiteKey], unit.Quantity]:
"""Get the total partial charge on each atom, excluding virtual sites."""
return self.get_charges(include_virtual_sites=False)
@property
def charges_with_virtual_sites(
self,
) -> Dict[Union[VirtualSiteKey, TopologyKey], unit.Quantity]:
"""Get the total partial charge on each atom, including virtual sites."""
return self.get_charges(include_virtual_sites=True)
def get_charges(
self, include_virtual_sites=False
) -> Dict[Union[VirtualSiteKey, TopologyKey], unit.Quantity]:
"""Get the total partial charge on each atom or particle."""
charges: DefaultDict[
Union[TopologyKey, VirtualSiteKey], FloatQuantity
] = defaultdict(lambda: 0.0 * unit.e)
for topology_key, potential_key in self.slot_map.items():
potential = self.potentials[potential_key]
for parameter_key, parameter_value in potential.parameters.items():
if parameter_key == "charge_increments":
if type(topology_key) != VirtualSiteKey:
raise RuntimeError
charge = -1.0 * np.sum(parameter_value)
# assumes virtual sites can only have charges determined in one step
# also, topology_key is actually a VirtualSiteKey
charges[topology_key] = charge
elif parameter_key in ["charge", "charge_increment"]:
charge = parameter_value
charges[topology_key.atom_indices[0]] += charge # type: ignore
else:
raise NotImplementedError()
returned_charges: Dict[
Union[VirtualSiteKey, TopologyKey], unit.Quantity
] = dict()
for index, charge in charges.items():
if isinstance(index, int):
returned_charges[TopologyKey(atom_indices=(index,))] = charge
else:
if include_virtual_sites:
returned_charges[index] = charge
return returned_charges
@classmethod
def parameter_handler_precedence(cls) -> List[str]:
"""
Return the order in which parameter handlers take precedence when computing charges.
"""
return ["LibraryCharges", "ChargeIncrementModel", "ToolkitAM1BCC"]
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: Any,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFElectrostaticsHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
toolkit_handler_with_metadata = [
p for p in parameter_handlers if type(p) == ElectrostaticsHandler
][0]
handler = cls(
type=toolkit_handler_with_metadata._TAGNAME,
scale_13=toolkit_handler_with_metadata.scale13,
scale_14=toolkit_handler_with_metadata.scale14,
scale_15=toolkit_handler_with_metadata.scale15,
cutoff=toolkit_handler_with_metadata.cutoff,
method=toolkit_handler_with_metadata.method.lower(),
)
handler.store_matches(parameter_handlers, topology)
return handler
def _from_toolkit_virtual_sites(
self,
parameter_handler: "VirtualSiteHandler",
topology: "Topology",
):
# TODO: Merge this logic into _from_toolkit
if not all(
isinstance(
p,
(
VirtualSiteHandler.VirtualSiteBondChargeType,
VirtualSiteHandler.VirtualSiteMonovalentLonePairType,
VirtualSiteHandler.VirtualSiteDivalentLonePairType,
VirtualSiteHandler.VirtualSiteTrivalentLonePairType,
),
)
for p in parameter_handler.parameters
):
raise NotImplementedError("Found unsupported virtual site types")
matches = parameter_handler.find_matches(topology)
for atom_indices, parameter_match in matches.items():
virtual_site_type = parameter_match[0].parameter_type
virtual_site_key = VirtualSiteKey(
atom_indices=atom_indices,
type=virtual_site_type.type,
match=virtual_site_type.match,
)
virtual_site_potential_key = PotentialKey(
id=virtual_site_type.smirks,
associated_handler="VirtualSiteHandler",
)
virtual_site_potential = Potential(
parameters={
"charge_increments": from_openmm(
virtual_site_type.charge_increment
),
}
)
matches = {}
potentials = {}
self.slot_map.update({virtual_site_key: virtual_site_potential_key})
self.potentials.update({virtual_site_potential_key: virtual_site_potential})
# TODO: Counter-intuitive that toolkit regression tests pass by using the counter
# variable i as if it was the atom index - shouldn't it just use atom_index?
for i, atom_index in enumerate(atom_indices): # noqa
topology_key = TopologyKey(atom_indices=(i,), mult=2)
potential_key = PotentialKey(
id=virtual_site_type.smirks,
mult=i,
associated_handler="VirtualSiteHandler",
)
charge_increment = getattr(
virtual_site_type, f"charge_increment{i + 1}"
)
potential = Potential(
parameters={"charge_increment": from_openmm(charge_increment)}
)
matches[topology_key] = potential_key
potentials[potential_key] = potential
self.slot_map.update(matches)
self.potentials.update(potentials)
@classmethod
@functools.lru_cache(None)
def _compute_partial_charges(cls, molecule: Molecule, method: str) -> unit.Quantity:
"""Call out to the toolkit's toolkit wrappers to generate partial charges."""
molecule = copy.deepcopy(molecule)
molecule.assign_partial_charges(method)
return from_openmm(molecule.partial_charges)
@classmethod
def _library_charge_to_potentials(
cls,
atom_indices: Tuple[int, ...],
parameter: LibraryChargeHandler.LibraryChargeType,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Map a matched library charge parameter to a set of potentials.
"""
matches = {}
potentials = {}
for i, (atom_index, charge) in enumerate(zip(atom_indices, parameter.charge)):
topology_key = TopologyKey(atom_indices=(atom_index,))
potential_key = PotentialKey(
id=parameter.smirks, mult=i, associated_handler="LibraryCharges"
)
potential = Potential(parameters={"charge": from_openmm(charge)})
matches[topology_key] = potential_key
potentials[potential_key] = potential
return matches, potentials
@classmethod
def _charge_increment_to_potentials(
cls,
atom_indices: Tuple[int, ...],
parameter: ChargeIncrementModelHandler.ChargeIncrementType,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Map a matched charge increment parameter to a set of potentials.
"""
matches = {}
potentials = {}
for i, atom_index in enumerate(atom_indices):
topology_key = TopologyKey(atom_indices=(atom_index,))
potential_key = PotentialKey(
id=parameter.smirks, mult=i, associated_handler="ChargeIncrementModel"
)
# TODO: Handle the cases where n - 1 charge increments have been defined,
# maybe by implementing this in the TK?
charge_increment = getattr(parameter, f"charge_increment{i + 1}")
potential = Potential(
parameters={"charge_increment": from_openmm(charge_increment)}
)
matches[topology_key] = potential_key
potentials[potential_key] = potential
return matches, potentials
@classmethod
def _find_slot_matches(
cls,
parameter_handler: Union["LibraryChargeHandler", "ChargeIncrementModelHandler"],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Construct a slot and potential map for a slot based parameter handler.
"""
# Ideally this would be made redundant by OpenFF TK #971
unique_parameter_matches = {
tuple(sorted(key)): (key, val)
for key, val in parameter_handler.find_matches(
reference_molecule.to_topology()
).items()
}
parameter_matches = {key: val for key, val in unique_parameter_matches.values()}
matches, potentials = {}, {}
for key, val in parameter_matches.items():
parameter = val.parameter_type
if isinstance(parameter_handler, LibraryChargeHandler):
(
parameter_matches,
parameter_potentials,
) = cls._library_charge_to_potentials(key, parameter)
elif isinstance(parameter_handler, ChargeIncrementModelHandler):
(
parameter_matches,
parameter_potentials,
) = cls._charge_increment_to_potentials(key, parameter)
else:
raise NotImplementedError()
matches.update(parameter_matches)
potentials.update(parameter_potentials)
return matches, potentials
@classmethod
def _find_am1_matches(
cls,
parameter_handler: Union["ToolkitAM1BCCHandler", ChargeIncrementModelHandler],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""Construct a slot and potential map for a charge model based parameter handler."""
reference_molecule = copy.deepcopy(reference_molecule)
reference_smiles = reference_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
method = getattr(parameter_handler, "partial_charge_method", "am1bcc")
partial_charges = cls._compute_partial_charges(
reference_molecule, method=method
)
matches = {}
potentials = {}
for i, partial_charge in enumerate(partial_charges):
potential_key = PotentialKey(
id=reference_smiles, mult=i, associated_handler="ToolkitAM1BCC"
)
potentials[potential_key] = Potential(parameters={"charge": partial_charge})
matches[TopologyKey(atom_indices=(i,))] = potential_key
return matches, potentials
@classmethod
def _find_reference_matches(
cls,
parameter_handlers: Dict[str, "ElectrostaticsHandlerType"],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Construct a slot and potential map for a particular reference molecule and set of parameter handlers.
"""
matches = {}
potentials = {}
expected_matches = {i for i in range(reference_molecule.n_atoms)}
for handler_type in cls.parameter_handler_precedence():
if handler_type not in parameter_handlers:
continue
parameter_handler = parameter_handlers[handler_type]
slot_matches, am1_matches = None, None
slot_potentials: Dict = {}
am1_potentials: Dict = {}
if handler_type in ["LibraryCharges", "ChargeIncrementModel"]:
slot_matches, slot_potentials = cls._find_slot_matches(
parameter_handler, reference_molecule
)
if handler_type in ["ToolkitAM1BCC", "ChargeIncrementModel"]:
am1_matches, am1_potentials = cls._find_am1_matches(
parameter_handler, reference_molecule
)
if slot_matches is None and am1_matches is None:
raise NotImplementedError()
elif slot_matches is not None and am1_matches is not None:
am1_matches = {
TopologyKey(
atom_indices=topology_key.atom_indices, mult=0
): potential_key
for topology_key, potential_key in am1_matches.items()
}
slot_matches = {
TopologyKey(
atom_indices=topology_key.atom_indices, mult=1
): potential_key
for topology_key, potential_key in slot_matches.items()
}
matched_atom_indices = {
index for key in slot_matches for index in key.atom_indices
}
matched_atom_indices.intersection_update(
{index for key in am1_matches for index in key.atom_indices}
)
elif slot_matches is not None:
matched_atom_indices = {
index for key in slot_matches for index in key.atom_indices
}
else:
matched_atom_indices = {
index for key in am1_matches for index in key.atom_indices # type: ignore[union-attr]
}
if matched_atom_indices != expected_matches:
# Handle the case where a handler could not fully assign the charges
# to the whole molecule.
continue
matches.update(slot_matches if slot_matches is not None else {})
matches.update(am1_matches if am1_matches is not None else {})
potentials.update(slot_potentials)
potentials.update(am1_potentials)
break
found_matches = {index for key in matches for index in key.atom_indices}
if found_matches != expected_matches:
raise RuntimeError(
f"{reference_molecule.to_smiles(explicit_hydrogens=False)} could "
f"not be fully assigned charges."
)
return matches, potentials
def store_matches(
self,
parameter_handler: Union[
"ElectrostaticsHandlerType", List["ElectrostaticsHandlerType"]
],
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
# Reshape the parameter handlers into a dictionary for easier referencing.
parameter_handlers = {
handler._TAGNAME: handler
for handler in (
parameter_handler
if isinstance(parameter_handler, list)
else [parameter_handler]
)
}
self.potentials = dict()
self.slot_map = dict()
reference_molecules = [*topology.reference_molecules]
for reference_molecule in reference_molecules:
matches, potentials = self._find_reference_matches(
parameter_handlers, reference_molecule
)
match_mults = defaultdict(set)
for top_key in matches:
match_mults[top_key.atom_indices].add(top_key.mult)
self.potentials.update(potentials)
for top_mol in topology._reference_molecule_to_topology_molecules[
reference_molecule
]:
for topology_particle in top_mol.atoms:
reference_index = topology_particle.atom.molecule_particle_index
topology_index = topology_particle.topology_particle_index
for mult in match_mults[(reference_index,)]:
top_key = TopologyKey(atom_indices=(topology_index,), mult=mult)
self.slot_map[top_key] = matches[
TopologyKey(atom_indices=(reference_index,), mult=mult)
]
def store_potentials(
self,
parameter_handler: Union[
"ElectrostaticsHandlerType", List["ElectrostaticsHandlerType"]
],
) -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
# This logic is handled by ``store_matches`` as we may need to create potentials
# to store depending on the handler type.
pass
class SMIRNOFFVirtualSiteHandler(SMIRNOFFPotentialHandler):
"""
A handler which stores the information necessary to construct virtual sites (virtual particles).
"""
type: Literal["Bonds"] = "Bonds"
expression: Literal[""] = ""
virtual_site_key_topology_index_map: Dict["VirtualSiteKey", int] = Field(
dict(),
description="A mapping between VirtualSiteKey objects (stored analogously to TopologyKey objects"
"in other handlers) and topology indices describing the associated virtual site",
)
exclusion_policy: Literal["parents"] = "parents"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [VirtualSiteHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of parameter attributes supported by this handler."""
return ["distance", "outOfPlaneAngle", "inPlaneAngle"]
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of [TopologyKey, PotentialKey].
Differs from SMIRNOFFPotentialHandler.store_matches because each key
can point to multiple potentials (?); each value in the dict is a
list of parametertypes, whereas conventional handlers don't have lists
"""
virtual_site_index = topology.n_topology_atoms
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val_list in matches.items():
for val in val_list:
virtual_site_key = VirtualSiteKey(
atom_indices=key,
type=val.parameter_type.type,
match=val.parameter_type.match,
)
potential_key = PotentialKey(
id=val.parameter_type.smirks,
associated_handler=parameter_handler_name,
)
self.slot_map[virtual_site_key] = potential_key
self.virtual_site_key_topology_index_map[
virtual_site_key
] = virtual_site_index
virtual_site_index += 1
def store_potentials(self, parameter_handler: ParameterHandler) -> None:
"""Store VirtualSite-specific parameter-like data."""
if self.potentials:
self.potentials = dict()
for potential_key in self.slot_map.values():
smirks = potential_key.id
parameter_type = parameter_handler.get_parameter({"smirks": smirks})[0]
potential = Potential(
parameters={
"distance": parameter_type.distance,
},
)
for attr in ["outOfPlaneAngle", "inPlaneAngle"]:
if hasattr(parameter_type, attr):
potential.parameters.update(
{attr: from_openmm(getattr(parameter_type, attr))}
)
self.potentials[potential_key] = potential
def _get_local_frame_weights(self, virtual_site_key: "VirtualSiteKey"):
if virtual_site_key.type == "BondCharge":
origin_weight = [1.0, 0.0]
x_direction = [-1.0, 1.0]
y_direction = [-1.0, 1.0]
elif virtual_site_key.type == "MonovalentLonePair":
origin_weight = [1, 0.0, 0.0]
x_direction = [-1.0, 1.0, 0.0]
y_direction = [-1.0, 0.0, 1.0]
elif virtual_site_key.type == "DivalentLonePair":
origin_weight = [0.0, 1.0, 0.0]
x_direction = [0.5, -1.0, 0.5]
y_direction = [1.0, -1.0, 1.0]
elif virtual_site_key.type == "TrivalentLonePair":
origin_weight = [0.0, 1.0, 0.0, 0.0]
x_direction = [1 / 3, -1.0, 1 / 3, 1 / 3]
y_direction = [1.0, -1.0, 0.0, 0.0]
return origin_weight, x_direction, y_direction
def _get_local_frame_position(self, virtual_site_key: "VirtualSiteKey"):
potential_key = self.slot_map[virtual_site_key]
potential = self.potentials[potential_key]
if virtual_site_key.type == "BondCharge":
distance = potential.parameters["distance"]
local_frame_position = np.asarray([-1.0, 0.0, 0.0]) * distance
elif virtual_site_key.type == "MonovalentLonePair":
distance = potential.parameters["distance"]
theta = potential.parameters["inPlaneAngle"].m_as(unit.radian) # type: ignore
psi = potential.parameters["outOfPlaneAngle"].m_as(unit.radian) # type: ignore
factor = np.array(
[np.cos(theta) * np.cos(psi), np.sin(theta) * np.cos(psi), np.sin(psi)]
)
local_frame_position = factor * distance
elif virtual_site_key.type == "DivalentLonePair":
distance = potential.parameters["distance"]
theta = potential.parameters["inPlaneAngle"].m_as(unit.radian) # type: ignore
factor = np.asarray([-1.0 * np.cos(theta), 0.0, np.sin(theta)])
local_frame_position = factor * distance
elif virtual_site_key.type == "TrivalentLonePair":
distance = potential.parameters["distance"]
local_frame_position = np.asarray([-1.0, 0.0, 0.0]) * distance
return local_frame_position
def library_charge_from_molecule(
molecule: "Molecule",
) -> LibraryChargeHandler.LibraryChargeType:
"""Given an OpenFF Molecule with charges, generate a corresponding LibraryChargeType."""
if molecule.partial_charges is None:
raise ValueError("Input molecule is missing partial charges.")
smirks = molecule.to_smiles(mapped=True)
charges = molecule.partial_charges
library_charge_type = LibraryChargeHandler.LibraryChargeType(
smirks=smirks, charge=charges
)
return library_charge_type
def _get_interpolation_coeffs(fractional_bond_order, data):
x1, x2 = data.keys()
coeff1 = (x2 - fractional_bond_order) / (x2 - x1)
coeff2 = (fractional_bond_order - x1) / (x2 - x1)
return coeff1, coeff2
SMIRNOFF_POTENTIAL_HANDLERS = [
SMIRNOFFBondHandler,
SMIRNOFFConstraintHandler,
SMIRNOFFAngleHandler,
SMIRNOFFProperTorsionHandler,
SMIRNOFFImproperTorsionHandler,
SMIRNOFFvdWHandler,
SMIRNOFFElectrostaticsHandler,
]
| StarcoderdataPython |
1966156 | # Copyright (c) 2022 Mira Geoscience Ltd.
#
# This file is part of geoapps.
#
# geoapps is distributed under the terms and conditions of the MIT License
# (see LICENSE file at the root of this source code package).
from copy import deepcopy
import numpy as np
from geoh5py.objects import Points
from geoh5py.workspace import Workspace
from geoapps.drivers.components import (
InversionData,
InversionMesh,
InversionModel,
InversionModelCollection,
InversionTopography,
InversionWindow,
)
from geoapps.io.MagneticVector import MagneticVectorParams
from geoapps.io.MagneticVector.constants import default_ui_json
from geoapps.utils import rotate_xy
from geoapps.utils.testing import Geoh5Tester
geoh5 = Workspace("./FlinFlon.geoh5")
def setup_params(path):
geotest = Geoh5Tester(
geoh5, path, "test.geoh5", deepcopy(default_ui_json), MagneticVectorParams
)
geotest.set_param("data_object", "{538a7eb1-2218-4bec-98cc-0a759aa0ef4f}")
geotest.set_param("tmi_channel_bool", True)
geotest.set_param("tmi_channel", "{44822654-b6ae-45b0-8886-2d845f80f422}")
geotest.set_param("window_center_x", 314183.0)
geotest.set_param("window_center_y", 6071014.0)
geotest.set_param("window_width", 1000.0)
geotest.set_param("window_height", 1000.0)
geotest.set_param("out_group", "MVIInversion")
geotest.set_param("mesh", "{e334f687-df71-4538-ad28-264e420210b8}")
geotest.set_param("topography_object", "{ab3c2083-6ea8-4d31-9230-7aad3ec09525}")
geotest.set_param("topography", "{a603a762-f6cb-4b21-afda-3160e725bf7d}")
geotest.set_param("starting_model", 1e-04)
geotest.set_param("inducing_field_inclination", 79.0)
geotest.set_param("inducing_field_declination", 11.0)
geotest.set_param("reference_model", 0.0)
geotest.set_param("reference_inclination", 79.0)
geotest.set_param("reference_declination", 11.0)
return geotest.make()
def test_zero_reference_model(tmp_path):
ws, params = setup_params(tmp_path)
inversion_window = InversionWindow(ws, params)
inversion_data = InversionData(ws, params, inversion_window.window)
inversion_topography = InversionTopography(ws, params, inversion_window.window)
inversion_mesh = InversionMesh(ws, params, inversion_data, inversion_topography)
model = InversionModel(ws, params, inversion_mesh, "reference")
incl = np.unique(ws.get_entity("reference_inclination")[0].values)
decl = np.unique(ws.get_entity("reference_declination")[0].values)
assert len(incl) == 1
assert len(decl) == 1
assert np.isclose(incl[0], 79.0)
assert np.isclose(decl[0], 11.0)
def test_collection(tmp_path):
ws, params = setup_params(tmp_path)
inversion_window = InversionWindow(ws, params)
inversion_data = InversionData(ws, params, inversion_window.window)
inversion_topography = InversionTopography(ws, params, inversion_window.window)
inversion_mesh = InversionMesh(ws, params, inversion_data, inversion_topography)
active_cells = inversion_topography.active_cells(inversion_mesh)
models = InversionModelCollection(ws, params, inversion_mesh)
models.remove_air(active_cells)
starting = InversionModel(ws, params, inversion_mesh, "starting")
starting.remove_air(active_cells)
np.testing.assert_allclose(models.starting, starting.model)
def test_initialize(tmp_path):
ws, params = setup_params(tmp_path)
inversion_window = InversionWindow(ws, params)
inversion_data = InversionData(ws, params, inversion_window.window)
inversion_topography = InversionTopography(ws, params, inversion_window.window)
inversion_mesh = InversionMesh(ws, params, inversion_data, inversion_topography)
starting_model = InversionModel(ws, params, inversion_mesh, "starting")
assert len(starting_model.model) == 3 * inversion_mesh.nC
assert len(np.unique(starting_model.model)) == 3
def test_model_from_object(tmp_path):
# Test behaviour when loading model from Points object with non-matching mesh
ws, params = setup_params(tmp_path)
inversion_window = InversionWindow(ws, params)
inversion_data = InversionData(ws, params, inversion_window.window)
inversion_topography = InversionTopography(ws, params, inversion_window.window)
inversion_mesh = InversionMesh(ws, params, inversion_data, inversion_topography)
cc = inversion_mesh.mesh.cell_centers
m0 = np.array([2.0, 3.0, 1.0])
vals = (m0[0] * cc[:, 0]) + (m0[1] * cc[:, 1]) + (m0[2] * cc[:, 2])
point_object = Points.create(ws, name=f"test_point", vertices=cc)
point_object.add_data({"test_data": {"values": vals}})
data_object = ws.get_entity("test_data")[0]
params.associations[data_object.uid] = point_object.uid
params.lower_bound_object = point_object.uid
params.lower_bound = data_object.uid
lower_bound = InversionModel(ws, params, inversion_mesh, "lower_bound")
nc = int(len(lower_bound.model) / 3)
A = lower_bound.mesh.mesh.cell_centers
b = lower_bound.model[:nc]
from scipy.linalg import lstsq
m = lstsq(A, b)[0]
np.testing.assert_array_almost_equal(m, m0, decimal=1)
def test_permute_2_octree(tmp_path):
ws, params = setup_params(tmp_path)
params.lower_bound = 0.0
inversion_window = InversionWindow(ws, params)
inversion_data = InversionData(ws, params, inversion_window.window)
inversion_topography = InversionTopography(ws, params, inversion_window.window)
inversion_mesh = InversionMesh(ws, params, inversion_data, inversion_topography)
lower_bound = InversionModel(ws, params, inversion_mesh, "lower_bound")
cc = inversion_mesh.mesh.cell_centers
center = np.mean(cc, axis=0)
dx = inversion_mesh.mesh.h[0].min()
dy = inversion_mesh.mesh.h[1].min()
dz = inversion_mesh.mesh.h[2].min()
xmin = center[0] - (5 * dx)
xmax = center[0] + (5 * dx)
ymin = center[1] - (5 * dy)
ymax = center[1] + (5 * dy)
zmin = center[2] - (5 * dz)
zmax = center[2] + (5 * dz)
xind = (cc[:, 0] > xmin) & (cc[:, 0] < xmax)
yind = (cc[:, 1] > ymin) & (cc[:, 1] < ymax)
zind = (cc[:, 2] > zmin) & (cc[:, 2] < zmax)
ind = xind & yind & zind
lower_bound.model[np.tile(ind, 3)] = 1
lb_perm = lower_bound.permute_2_octree()
octree_mesh = ws.get_entity(params.mesh)[0]
locs_perm = octree_mesh.centroids[lb_perm[: octree_mesh.n_cells] == 1, :]
origin = [float(octree_mesh.origin[k]) for k in ["x", "y", "z"]]
locs_perm_rot = rotate_xy(locs_perm, origin, -octree_mesh.rotation)
assert xmin <= locs_perm_rot[:, 0].min()
assert xmax >= locs_perm_rot[:, 0].max()
assert ymin <= locs_perm_rot[:, 1].min()
assert ymax >= locs_perm_rot[:, 1].max()
assert zmin <= locs_perm_rot[:, 2].min()
assert zmax >= locs_perm_rot[:, 2].max()
def test_permute_2_treemesh(tmp_path):
ws, params = setup_params(tmp_path)
octree_mesh = ws.get_entity(params.mesh)[0]
cc = octree_mesh.centroids
center = np.mean(cc, axis=0)
dx = octree_mesh.u_cell_size.min()
dy = octree_mesh.v_cell_size.min()
dz = np.abs(octree_mesh.w_cell_size.min())
xmin = center[0] - (5 * dx)
xmax = center[0] + (5 * dx)
ymin = center[1] - (5 * dy)
ymax = center[1] + (5 * dy)
zmin = center[2] - (5 * dz)
zmax = center[2] + (5 * dz)
xind = (cc[:, 0] > xmin) & (cc[:, 0] < xmax)
yind = (cc[:, 1] > ymin) & (cc[:, 1] < ymax)
zind = (cc[:, 2] > zmin) & (cc[:, 2] < zmax)
ind = xind & yind & zind
model = np.zeros(octree_mesh.n_cells, dtype=float)
model[ind] = 1
octree_mesh.add_data({"test_model": {"values": model}})
params.upper_bound = ws.get_entity("test_model")[0].uid
params.associations[params.upper_bound] = octree_mesh.uid
inversion_window = InversionWindow(ws, params)
inversion_data = InversionData(ws, params, inversion_window.window)
inversion_topography = InversionTopography(ws, params, inversion_window.window)
inversion_mesh = InversionMesh(ws, params, inversion_data, inversion_topography)
upper_bound = InversionModel(ws, params, inversion_mesh, "upper_bound")
locs = inversion_mesh.mesh.cell_centers
locs_rot = rotate_xy(
locs, inversion_mesh.rotation["origin"], inversion_mesh.rotation["angle"]
)
locs_rot = locs_rot[upper_bound.model[: inversion_mesh.mesh.nC] == 1, :]
assert xmin <= locs_rot[:, 0].min()
assert xmax >= locs_rot[:, 0].max()
assert ymin <= locs_rot[:, 1].min()
assert ymax >= locs_rot[:, 1].max()
assert zmin <= locs_rot[:, 2].min()
assert zmax >= locs_rot[:, 2].max()
| StarcoderdataPython |
3531508 | <reponame>s-broda/nmt
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import unicodedata
import re
import io
import os
# Converts the unicode file to ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# replacing everything with space except (a-z, A-Z, ".", "?", "!", ",")
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.rstrip().strip()
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
w = '<start> ' + w + ' <end>'
return w
# 1. Remove the accents
# 2. Clean the sentences
# 3. Return word pairs in the format: [ENGLISH, SPANISH]
def create_wmt_dataset(path, num_examples):
lines_en = io.open(os.path.join(path, 'english.txt'), encoding='UTF-8').read().strip().split('\n')
lines_de = io.open(os.path.join(path, 'german.txt'), encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(w) for w in l] for l in zip(lines_en[:num_examples], lines_de[:num_examples])]
return zip(*word_pairs)
def max_length(tensor):
return max(len(t) for t in tensor)
def tokenize(lang, dict_size):
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=dict_size, filters='', oov_token='<unk>')
lang_tokenizer.fit_on_texts(lang)
tensor = lang_tokenizer.texts_to_sequences(lang)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,
padding='post')
return tensor, lang_tokenizer
def convert(lang, tensor):
for t in tensor:
if t!=0:
print ("%d ----> %s" % (t, lang.index_word[t]))
def load_wmt_dataset(path, num_examples=None, dict_size=None):
# creating cleaned input, output pairs
targ_lang, inp_lang = create_wmt_dataset(path, num_examples)
input_tensor, inp_lang_tokenizer = tokenize(inp_lang, dict_size)
target_tensor, targ_lang_tokenizer = tokenize(targ_lang, dict_size)
return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# hidden shape == (batch_size, hidden size)
# hidden_with_time_axis shape == (batch_size, 1, hidden size)
# we are doing this to perform addition to calculate the score
hidden_with_time_axis = tf.expand_dims(query, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
# the shape of the tensor before applying self.V is (batch_size, max_length, units)
score = self.V(tf.nn.tanh(
self.W1(values) + self.W2(hidden_with_time_axis)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.attention = BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
# enc_output shape == (batch_size, max_length, hidden_size)
context_vector, attention_weights = self.attention(hidden, enc_output)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size * 1, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size, vocab)
x = self.fc(output)
return x, state, attention_weights
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
| StarcoderdataPython |
399488 | <gh_stars>1-10
from django.db import models
from django.urls import reverse_lazy
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from projects.models import Attribute
class FooterLink(models.Model):
link_text = models.CharField(max_length=255, verbose_name=_("link text"))
url = models.CharField(max_length=255, verbose_name=_("url"))
index = models.PositiveIntegerField(verbose_name=_("index"), default=0)
section = models.ForeignKey(
"FooterSection",
verbose_name=_("section"),
related_name=_("links"),
null=False,
on_delete=models.CASCADE,
)
class Meta:
verbose_name = _("footer link")
verbose_name_plural = _("footer links")
ordering = ("index",)
def __str__(self):
return self.link_text
class FooterSection(models.Model):
title = models.CharField(max_length=255, verbose_name=_("title"))
index = models.PositiveIntegerField(verbose_name=_("index"), default=0)
class Meta:
verbose_name = _("footer section")
verbose_name_plural = _("footer sections")
ordering = ("index",)
def __str__(self):
return self.title
class ListViewAttributeColumn(models.Model):
"""Defines custom ordering of attribute columns in project list view"""
index = models.PositiveIntegerField(default=0)
attribute = models.OneToOneField( \
Attribute, primary_key=True, on_delete=models.CASCADE)
class Meta(object):
verbose_name = _("list view attribute column")
verbose_name_plural = _("list view attribute columns")
ordering = ("index",)
| StarcoderdataPython |
5084431 | from django.contrib.auth import get_user_model
import pytest
from rest_framework import status
from quiz.users.views import AuthViewSet
User = get_user_model()
pytestmark = pytest.mark.django_db
def test_signup_status_created(user_payload, rf, ct):
"""Test vaild user payload may sinup with status code 201."""
view = AuthViewSet.as_view({'post': 'signup'})
request = rf.post('/fake-url/', data=user_payload, content_type=ct)
response = view(request)
assert response.status_code == status.HTTP_201_CREATED
@pytest.mark.parametrize('attr', ['password'])
def test_signup_return_has_no_attr(user_payload, rf, ct, attr):
"""Assert succeded signup return data has no given attribute."""
view = AuthViewSet.as_view({'post': 'signup'})
request = rf.post('/fake-url/', data=user_payload, content_type=ct)
response = view(request)
assert attr not in response.data
@pytest.mark.parametrize('attr', ['username', 'password', 'role'])
def test_signup_missing_attr_return_bad_request(user_payload, rf, ct, attr):
"""Assert signup request with misssing attribute return bad request."""
view = AuthViewSet.as_view({'post': 'signup'})
del user_payload[attr]
request = rf.post('/fake-url/', data=user_payload, content_type=ct)
response = view(request)
assert response.status_code == status.HTTP_400_BAD_REQUEST
# Login
def test_login_signedup_ok(user_credentials, rf, ct):
"""Assert existing user is able to login with status ok."""
view = AuthViewSet.as_view({'post': 'login'})
request = rf.post('/fake-url/', data=user_credentials, content_type=ct)
response = view(request)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.parametrize('attr', ['access', 'refresh'])
def test_login_return_has_attr(user_credentials, rf, ct, attr):
"""Assert succeded login return data has given attribute."""
view = AuthViewSet.as_view({'post': 'login'})
request = rf.post('/fake-url/', data=user_credentials, content_type=ct)
response = view(request)
assert attr in response.data
def test_login_non_signedup_unauthorized(user_payload, rf, ct):
"""Assert invalid credentials return status unauthorized."""
view = AuthViewSet.as_view({'post': 'login'})
request = rf.post('/fake-url/', data=user_payload, content_type=ct)
response = view(request)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
| StarcoderdataPython |
8187693 | # encoding: utf-8
# module _sha
# from (built-in)
# by generator 1.147
# no doc
# no imports
# Variables with simple values
blocksize = 1
digestsize = 20
digest_size = 20
# functions
def new(*args, **kwargs): # real signature unknown
"""
Return a new SHA hashing object. An optional string argument
may be provided; if present, this string will be automatically
hashed.
"""
pass
# no classes
| StarcoderdataPython |
3465834 | <gh_stars>0
name = 'ru_sent_tokenize'
from .tokenizer import ru_sent_tokenize, PAIRED_SHORTENINGS, SHORTENINGS, JOINING_SHORTENINGS
| StarcoderdataPython |
6533357 | # Given a list of rules, find how many bag colours can contain one shiny gold bag
# Assume all colour descriptors contain 2 words (e.g., "mirrored chartreuse")
# pattern = re.compile('([a-z]+ [a-z]+) bag')
# For each rule, get colours = pattern.findall(rule) => returns array
# First colour in array = outermost colour; others are inner colours
# Make a dict with outer colour as key, inner colours as values
# For shiny gold as inner colour, find all possible dict keys (direct outer colours)
# Recursively, for each key above find all possible dict keys that can contain it
# If this key does not also appear as a value elsewhere, then it is the outermost colour.
# Make all possible outer colours a set and count its members
import re
file = open("input.txt", "r")
rules_dict = {}
for rule in file:
pattern = re.compile('([a-z]+ [a-z]+) bag')
colours = pattern.findall(rule.strip())
# Dict with outer colour as key, inner colour(s) as value(s)
rules_dict[colours[0]] = colours[1:]
# Find key(s) (outer colour(s)) associated with each value (inner colour) in array
def find_outer_colours(colours):
all_outer_colours = set()
for colour in colours:
outermost = False
while outermost == False:
# outer colours than can contain colour
outer_colours = [outer for outer, inner in rules_dict.items() if colour in inner]
if outer_colours:
for outer_colour in outer_colours:
contains_shiny_gold.add(outer_colour)
find_outer_colours(outer_colours) # repeat recursively for each outer colour
break
else:
# reached outermost colour for current sequence
contains_shiny_gold.add(colour)
outermost = True
contains_shiny_gold = set()
find_outer_colours(["shiny gold"])
print("Number of colours that can contain shiny gold: ", len(contains_shiny_gold))
# An alternative approach would be to find all colours that occur only once in input file.
# These are so-called "stop colours" -- no other colours can be outside of it.
# So we can recurse over each colour until we reach a stop colour, then count number of steps
# Or probably this is the type of problem where graphs (nodes and edges) would be appropriate... | StarcoderdataPython |
1648342 | <reponame>liaison/LeetCode
class Solution:
def findShortestSubArray(self, nums: List[int]) -> int:
num_range_dict = {}
max_count = 0
for index, num in enumerate(nums):
if num in num_range_dict:
start, end, count = num_range_dict[num]
new_count = count + 1
num_range_dict[num] = (start, index, new_count)
else:
new_count = 1
num_range_dict[num] = (index, index, new_count)
if new_count > max_count:
max_count = new_count
min_range = float("inf")
for _, value in num_range_dict.items():
start, end, count = value
if count == max_count:
min_range = min(min_range, end-start+1)
return min_range
| StarcoderdataPython |
11206849 | <reponame>GersbachKa/tabletop_pta
from __future__ import division, print_function
import numpy as np
def zeropadtimeseries(x, T):
'''
zero pad the time-series x by duration T
to nearest power of 2
'''
######################
# special case: no zero-padding if T=0
if T==0:
y = x
return y
######################
# normal case
else:
# extract relevant time-domain quantities
deltaT = x[1,0]-x[0,0]
N = len(x[:,0])
# determine number of bins for zero-padding
dN = int(np.floor(T/deltaT))
# number of samples for zero-padded timeseries
Nz = N+dN
# extend Nz to nearest power of two
np2 = int(np.ceil(np.log2(Nz)))
Nz = 2**np2
# construct zero-padded time-series
y = np.zeros([Nz,2])
y[:,0] = np.linspace(0, (Nz-1)*deltaT, Nz)
y[0:N,1] = x[:,1]
return y
| StarcoderdataPython |
3433277 | # -*- coding: utf-8 -*-
"""hypertext transfer protocol (HTTP/2)
:mod:`pcapkit.protocols.application.httpv2` contains
:class:`~pcapkit.protocols.application.httpv2.HTTPv2`
only, which implements extractor for Hypertext Transfer
Protocol (HTTP/2) [*]_, whose structure is described as
below:
======= ========= ===================== ==========================
Octets Bits Name Description
======= ========= ===================== ==========================
0 0 ``http.length`` Length
3 24 ``http.type`` Type
4 32 ``http.flags`` Flags
5 40 Reserved
5 41 ``http.sid`` Stream Identifier
9 72 ``http.payload`` Frame Payload
======= ========= ===================== ==========================
.. [*] https://en.wikipedia.org/wiki/HTTP/2
"""
import collections
from typing import TYPE_CHECKING
from pcapkit.const.http.error_code import ErrorCode as RegType_ErrorCode
from pcapkit.const.http.frame import Frame as RegType_Frame
from pcapkit.const.http.setting import Setting as RegType_Setting
from pcapkit.corekit.multidict import OrderedMultiDict
from pcapkit.protocols.application.http import HTTP
from pcapkit.protocols.data.application.httpv2 import HTTP as DataType_HTTP
from pcapkit.protocols.data.application.httpv2 import \
ContinuationFrame as DataType_ContinuationFrame
from pcapkit.protocols.data.application.httpv2 import \
ContinuationFrameFlags as DataType_ContinuationFrameFlags
from pcapkit.protocols.data.application.httpv2 import DataFrame as DataType_DataFrame
from pcapkit.protocols.data.application.httpv2 import DataFrameFlags as DataType_DataFrameFlags
from pcapkit.protocols.data.application.httpv2 import GoawayFrame as DataType_GoawayFrame
from pcapkit.protocols.data.application.httpv2 import HeadersFrame as DataType_HeadersFrame
from pcapkit.protocols.data.application.httpv2 import \
HeadersFrameFlags as DataType_HeadersFrameFlags
from pcapkit.protocols.data.application.httpv2 import PingFrame as DataType_PingFrame
from pcapkit.protocols.data.application.httpv2 import PingFrameFlags as DataType_PingFrameFlags
from pcapkit.protocols.data.application.httpv2 import PriorityFrame as DataType_PriorityFrame
from pcapkit.protocols.data.application.httpv2 import PushPromiseFrame as DataType_PushPromiseFrame
from pcapkit.protocols.data.application.httpv2 import \
PushPromiseFrameFlags as DataType_PushPromiseFrameFlags
from pcapkit.protocols.data.application.httpv2 import RstStreamFrame as DataType_RstStreamFrame
from pcapkit.protocols.data.application.httpv2 import SettingsFrame as DataType_SettingsFrame
from pcapkit.protocols.data.application.httpv2 import \
SettingsFrameFlags as DataType_SettingsFrameFlags
from pcapkit.protocols.data.application.httpv2 import UnassignedFrame as DataType_UnassignedFrame
from pcapkit.protocols.data.application.httpv2 import \
WindowUpdateFrame as DataType_WindowUpdateFrame
from pcapkit.utilities.exceptions import ProtocolError
if TYPE_CHECKING:
from typing import Any, Callable, DefaultDict, NoReturn, Optional
from typing_extensions import Literal
FrameParser = Callable[['HTTPv2', RegType_Frame, int, str, int], DataType_HTTP]
__all__ = ['HTTPv2']
class HTTPv2(HTTP):
"""This class implements Hypertext Transfer Protocol (HTTP/2)."""
#: Parsed packet data.
_info: 'DataType_HTTP'
##########################################################################
# Defaults.
##########################################################################
#: DefaultDict[RegType_Frame, str | FrameParser]: Frame code to method
#: mapping, c.f. :meth:`read`. Method names are expected to be referred to
#: the class by ``_read_http_${name}``, and if such name not found, the
#: value should then be a method that can parse the frame by itself.
__frame__ = collections.defaultdict(
lambda: 'none',
{
RegType_Frame.DATA: 'data', # DATA
RegType_Frame.HEADERS: 'headers', # HEADERS
RegType_Frame.PRIORITY: 'priority', # PRIORITY
RegType_Frame.RST_STREAM: 'rst_stream', # RST_STREAM
RegType_Frame.SETTINGS: 'settings', # SETTINGS
RegType_Frame.PUSH_PROMISE: 'push_promise', # PUSH_PROMISE
RegType_Frame.PING: 'ping', # PING
RegType_Frame.GOAWAY: 'goaway', # GOAWAY
RegType_Frame.WINDOW_UPDATE: 'window_update', # WINDOW_UPDATE
RegType_Frame.CONTINUATION: 'continuation', # CONTINUATION
},
) # type: DefaultDict[int, str | FrameParser]
##########################################################################
# Properties.
##########################################################################
@property
def alias(self) -> 'Literal["HTTP/2"]':
"""Acronym of current protocol."""
return 'HTTP/2'
@property
def length(self) -> 'Literal[9]':
"""Header length of current protocol."""
return 9
##########################################################################
# Methods.
##########################################################################
def read(self, length: 'Optional[int]' = None, **kwargs: 'Any') -> 'DataType_HTTP':
"""Read Hypertext Transfer Protocol (HTTP/2).
Structure of HTTP/2 packet [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+=+=============================================================+
| Frame Payload (0...) ...
+---------------------------------------------------------------+
Args:
length: Length of packet data.
Keyword Args:
**kwargs: Arbitrary keyword arguments.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length is None:
length = len(self)
if length < 9:
raise ProtocolError('HTTP/2: invalid format', quiet=True)
_tlen = self._read_unpack(3)
_type = self._read_unpack(1)
_flag = self._read_binary(1)
_rsid = self._read_binary(4)
if _tlen != length:
raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True)
http_type = RegType_Frame.get(_type)
http_sid = int(_rsid[1:], base=2)
if http_type in (RegType_Frame.SETTINGS, RegType_Frame.PING) and http_sid != 0:
raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True)
name = self.__frame__[http_type] # type: str | FrameParser
if isinstance(name, str):
meth_name = f'_read_http_{name}'
meth = getattr(self, meth_name, self._read_http_none) # type: Callable[[RegType_Frame, int, str, int], DataType_HTTP]
http = meth(http_type, length, _flag, http_sid)
else:
http = name(self, http_type, length, _flag, http_sid)
return http
def make(self, **kwargs: 'Any') -> 'NoReturn':
"""Make (construct) packet data.
Keyword Args:
**kwargs: Arbitrary keyword arguments.
Returns:
Constructed packet data.
"""
raise NotImplementedError
@classmethod
def id(cls) -> 'tuple[Literal["HTTPv2"]]': # type: ignore[override]
"""Index ID of the protocol.
Returns:
Index ID of the protocol.
"""
return (cls.__name__,) # type: ignore[return-value]
@classmethod
def register_frame(cls, code: 'RegType_Frame', meth: 'str | FrameParser') -> 'None':
"""Register a frame parser.
Args:
code: HTTP frame type code.
meth: Method name or callable to parse the frame.
"""
cls.__frame__[code] = meth
##########################################################################
# Data models.
##########################################################################
def __length_hint__(self) -> 'Literal[9]':
"""Total length of corresponding protocol."""
return 9
##########################################################################
# Utilities.
##########################################################################
def _read_http_none(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_UnassignedFrame':
"""Read HTTP packet with unassigned type.
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if any((int(bit, base=2) for bit in flags)):
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
data = DataType_UnassignedFrame(
length=length,
type=frame,
flags=None,
sid=sid,
data=self._read_fileng(length - 9) or None,
)
return data
def _read_http_data(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_DataFrame':
"""Read HTTP/2 ``DATA`` frames.
Structure of HTTP/2 ``DATA`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------+-----------------------------------------------+
|Pad Length? (8)|
+---------------+-----------------------------------------------+
| Data (*) ...
+---------------------------------------------------------------+
| Padding (*) ...
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
_flag = DataType_DataFrameFlags(
END_STREAM=bool(int(flags[0], base=2)), # bit 0
PADDED=bool(int(flags[3], base=2)), # bit 3
)
if _flag.PADDED:
_plen = self._read_unpack(1)
else:
_plen = 0
if _plen > length - 10:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
if _flag.PADDED:
_dlen = length - _plen - 1
else:
_dlen = length - _plen
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_data = self._read_fileng(_dlen)
_pads = self._read_binary(_plen)
data = DataType_DataFrame(
length=length,
type=frame,
flags=_flag,
pad_len=_plen,
sid=sid,
data=_data,
)
return data
def _read_http_headers(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_HeadersFrame':
"""Read HTTP/2 ``HEADERS`` frames.
Structure of HTTP/2 ``HEADERS`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------+-----------------------------------------------+
|Pad Length? (8)|
+-+-------------+-----------------------------------------------+
|E| Stream Dependency? (31) |
+-+-------------+-----------------------------------------------+
| Weight? (8) |
+-+-------------+-----------------------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
| Padding (*) ...
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
_flag = DataType_HeadersFrameFlags(
END_STREAM=bool(int(flags[0], base=2)), # bit 0
END_HEADERS=bool(int(flags[2], base=2)), # bit 2
PADDED=bool(int(flags[3], base=2)), # bit 3
PRIORITY=bool(int(flags[5], base=2)), # bit 5
)
if _flag.PRIORITY:
_edep = self._read_binary(4)
_wght = self._read_unpack(1)
_elen = 5
_excl = bool(int(_edep[0], base=2))
_deps = int(_edep[1:], base=2)
else:
_edep = _wght = _excl = _deps = None # type: ignore[assignment]
_elen = 0
if _flag.PADDED:
_plen = self._read_unpack(1)
_dlen = length - _plen - _elen - 1
else:
_plen = 0
_dlen = length - _plen - _elen
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_frag = self._read_fileng(_dlen) or None
_pads = self._read_binary(_plen)
data = DataType_HeadersFrame(
length=length,
type=frame,
flags=_flag,
pad_len=_plen,
sid=sid,
excl_dependency=_excl,
stream_dependency=_deps,
weight=_wght,
fragment=_frag,
)
return data
def _read_http_priority(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_PriorityFrame':
"""Read HTTP/2 ``PRIORITY`` frames.
Structure of HTTP/2 ``PRIORITY`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+-+-------------------------------------------------------------+
|E| Stream Dependency (31) |
+-+-------------+-----------------------------------------------+
| Weight (8) |
+-+-------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length != 9:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_edep = self._read_binary(4)
_wght = self._read_unpack(1)
data = DataType_PriorityFrame(
length=length,
type=frame,
flags=None,
sid=sid,
excl_dependency=bool(int(_edep[0], base=2)),
stream_dependency=int(_edep[1:], base=2),
weight=_wght + 1,
)
return data
def _read_http_rst_stream(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_RstStreamFrame':
"""Read HTTP/2 ``RST_STREAM`` frames.
Structure of HTTP/2 ``RST_STREAM`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------------------------------------------------------+
| Error Code (32) |
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length != 4:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_code = self._read_unpack(4)
data = DataType_RstStreamFrame(
length=length,
type=frame,
flags=None,
sid=sid,
error=RegType_ErrorCode.get(_code, _code),
)
return data
def _read_http_settings(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_SettingsFrame':
"""Read HTTP/2 ``SETTINGS`` frames.
Structure of HTTP/2 ``SETTINGS`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------------------------------------------------------+
| Identifier (16) |
+-------------------------------+-------------------------------+
| Value (32) |
+---------------------------------------------------------------+
| ...... |
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length % 6 != 0 or sid != 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_flag = DataType_SettingsFrameFlags(
ACK=bool(int(flags[0], base=2)), # bit 0
)
if _flag.ACK and length != 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_sets = OrderedMultiDict() # type: OrderedMultiDict[RegType_Setting, int]
for _ in range(length // 6):
_stid = self._read_unpack(2)
_pval = self._read_unpack(4)
_pkey = RegType_Setting.get(_stid)
_sets.add(_pkey, _pval)
data = DataType_SettingsFrame(
length=length,
type=frame,
flags=_flag,
sid=sid,
settings=_sets,
)
return data
def _read_http_push_promise(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_PushPromiseFrame':
"""Read HTTP/2 ``PUSH_PROMISE`` frames.
Structure of HTTP/2 ``PUSH_PROMISE`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------+-----------------------------------------------+
|Pad Length? (8)|
+-+-------------+-----------------------------------------------+
|R| Promised Stream ID (31) |
+-+-----------------------------+-------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
| Padding (*) ...
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length < 4:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_flag = DataType_PushPromiseFrameFlags(
END_HEADERS=bool(int(flags[2], base=2)), # bit 2
PADDED=bool(int(flags[3], base=2)), # bit 3
)
if _flag.PADDED:
_plen = self._read_unpack(1)
_dlen = length - _plen - 5
else:
_plen = 0
_dlen = length - _plen - 4
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_rpid = self._read_binary(4)
_frag = self._read_fileng(_dlen) or None
_pads = self._read_binary(_plen)
data = DataType_PushPromiseFrame(
length=length,
type=frame,
flags=_flag,
sid=sid,
pad_len=_plen,
promised_sid=int(_rpid[1:], base=2),
fragment=_frag,
)
return data
def _read_http_ping(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_PingFrame':
"""Read HTTP/2 ``PING`` frames.
Structure of HTTP/2 ``PING`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------------------------------------------------------+
| |
| Opaque Data (64) |
| |
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length != 8:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_flag = DataType_PingFrameFlags(
ACK=bool(int(flags[0], base=2)), # bit 0
)
_data = self._read_fileng(8)
data = DataType_PingFrame(
length=length,
type=frame,
flags=_flag,
sid=sid,
data=_data,
)
return data
def _read_http_goaway(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_GoawayFrame':
"""Read HTTP/2 ``GOAWAY`` frames.
Structure of HTTP/2 ``GOAWAY`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+-+-------------+---------------+-------------------------------+
|R| Last-Stream-ID (31) |
+-+-------------------------------------------------------------+
| Error Code (32) |
+---------------------------------------------------------------+
| Additional Debug Data (*) |
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
_dlen = length - 8
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_rsid = self._read_binary(4)
_code = self._read_unpack(4)
_data = self._read_fileng(_dlen) or None
data = DataType_GoawayFrame(
length=length,
type=frame,
flags=None,
sid=sid,
last_sid=int(_rsid[1:], base=2),
error=RegType_ErrorCode.get(_code),
debug_data=_data,
)
return data
def _read_http_window_update(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_WindowUpdateFrame':
"""Read HTTP/2 ``WINDOW_UPDATE`` frames.
Structure of HTTP/2 ``WINDOW_UPDATE`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+-+-------------+---------------+-------------------------------+
|R| Window Size Increment (31) |
+-+-------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length != 4:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_size = self._read_binary(4)
data = DataType_WindowUpdateFrame(
length=length,
type=frame,
flags=None,
sid=sid,
increment=int(_size[1:], base=2),
)
return data
def _read_http_continuation(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_ContinuationFrame':
"""Read HTTP/2 ``CONTINUATION`` frames.
Structure of HTTP/2 ``CONTINUATION`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------------------------------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
_flag = DataType_ContinuationFrameFlags(
END_HEADERS=bool(int(flags[2], base=2)), # bit 2
)
_frag = self._read_fileng(length) or None
data = DataType_ContinuationFrame(
length=length,
type=frame,
flags=_flag,
sid=sid,
fragment=_frag,
)
return data
| StarcoderdataPython |
207649 | <gh_stars>1-10
"""Convolutional Layer implementation."""
import logging
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import initializers
from tensorflow.python.keras.utils import conv_utils
import tf_encrypted as tfe
from tf_encrypted.keras.engine import Layer
from tf_encrypted.keras import activations
from tf_encrypted.keras.layers.layers_utils import default_args_check
from tf_encrypted.keras import backend as KE
from tf_encrypted.protocol.pond import PondPrivateTensor
logger = logging.getLogger('tf_encrypted')
class Conv2D(Layer):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(**kwargs)
self.rank = 2
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, self.rank, 'kernel_size')
if self.kernel_size[0] != self.kernel_size[1]:
raise NotImplementedError("TF Encrypted currently only supports same "
"stride along the height and the width."
"You gave: {}".format(self.kernel_size))
self.strides = conv_utils.normalize_tuple(strides, self.rank, 'strides')
self.padding = conv_utils.normalize_padding(padding).upper()
self.data_format = conv_utils.normalize_data_format(data_format)
if activation is not None:
logger.info("Performing an activation before a pooling layer can result "
"in unnecessary performance loss. Check model definition in "
"case of missed optimization.")
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
# Not implemented arguments
default_args_check(dilation_rate, "dilation_rate", "Conv2D")
default_args_check(kernel_regularizer, "kernel_regularizer", "Conv2D")
default_args_check(bias_regularizer, "bias_regularizer", "Conv2D")
default_args_check(activity_regularizer,
"activity_regularizer",
"Conv2D")
default_args_check(kernel_constraint, "kernel_constraint", "Conv2D")
default_args_check(bias_constraint, "bias_constraint", "Conv2D")
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.kernel_shape = self.kernel_size + (input_dim, self.filters)
kernel = self.kernel_initializer(self.kernel_shape)
self.kernel = self.add_weight(kernel)
if self.use_bias:
# Expand bias shape dimensions. Bias needs to have
# a rank of 3 to be added to the output
bias_shape = [self.filters, 1, 1]
bias = self.bias_initializer(bias_shape)
self.bias = self.add_weight(bias)
else:
self.bias = None
self.built = True
def call(self, inputs):
if self.data_format != 'channels_first':
inputs = self.prot.transpose(inputs, perm=[0, 3, 1, 2])
outputs = self.prot.conv2d(inputs,
self.kernel,
self.strides[0],
self.padding)
if self.use_bias:
outputs = outputs + self.bias
if self.data_format != 'channels_first':
outputs = self.prot.transpose(outputs, perm=[0, 2, 3, 1])
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
"""Compute output_shape for the layer."""
h_filter, w_filter, _, n_filters = self.kernel_shape
if self.data_format == 'channels_first':
n_x, _, h_x, w_x = input_shape.as_list()
else:
n_x, h_x, w_x, _ = input_shape.as_list()
if self.padding == "SAME":
h_out = int(np.ceil(float(h_x) / float(self.strides[0])))
w_out = int(np.ceil(float(w_x) / float(self.strides[0])))
if self.padding == "VALID":
h_out = int(np.ceil(float(h_x - h_filter + 1) / float(self.strides[0])))
w_out = int(np.ceil(float(w_x - w_filter + 1) / float(self.strides[0])))
return [n_x, n_filters, h_out, w_out]
class DepthwiseConv2D(Conv2D):
"""Depthwise separable 2D convolution.
Depthwise Separable convolutions consists in performing
just the first step in a depthwise spatial convolution
(which acts on each input channel separately).
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Arguments:
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. 'linear' activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation').
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`[batch, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, rows, cols, channels]` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`[batch, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.rank = 2
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, self.rank, 'kernel_size')
if self.kernel_size[0] != self.kernel_size[1]:
raise NotImplementedError("TF Encrypted currently only supports same "
"stride along the height and the width."
"You gave: {}".format(self.kernel_size))
self.strides = conv_utils.normalize_tuple(strides, self.rank, 'strides')
self.padding = conv_utils.normalize_padding(padding).upper()
self.depth_multiplier = depth_multiplier
self.data_format = conv_utils.normalize_data_format(data_format)
if activation is not None:
logger.info("Performing an activation before a pooling layer can result "
"in unnecessary performance loss. Check model definition in "
"case of missed optimization.")
self.activation = activations.get(activation)
self.use_bias = use_bias
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.bias_initializer = initializers.get(bias_initializer)
# Not implemented arguments
default_args_check(depthwise_regularizer,
"depthwise_regularizer",
"DepthwiseConv2D")
default_args_check(bias_regularizer,
"bias_regularizer",
"DepthwiseConv2D")
default_args_check(activity_regularizer,
"activity_regularizer",
"DepthwiseConv2D")
default_args_check(depthwise_constraint,
"depthwise_constraint",
"DepthwiseConv2D")
default_args_check(bias_constraint,
"bias_constraint",
"DepthwiseConv2D")
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
self.input_dim = int(input_shape[channel_axis])
self.kernel_shape = self.kernel_size + \
(self.input_dim, self.depth_multiplier)
kernel = self.depthwise_initializer(self.kernel_shape)
kernel = self.rearrange_kernel(kernel)
self.kernel = self.add_weight(kernel)
if self.use_bias:
# Expand bias shape dimensions. Bias needs to have
# a rank of 3 to be added to the output
bias_shape = [self.input_dim * self.depth_multiplier, 1, 1]
bias = self.bias_initializer(bias_shape)
self.bias = self.add_weight(bias)
else:
self.bias = None
self.built = True
def rearrange_kernel(self, kernel):
""" Rearrange kernel to match normal convoluion kernels
Arguments:
kernel: kernel to be rearranged
"""
mask = self.get_mask(self.input_dim)
if isinstance(kernel, tf.Tensor):
mask = tf.constant(mask.tolist(),
dtype=tf.float32,
shape=(self.kernel_size[0],
self.kernel_size[1],
self.input_dim * self.depth_multiplier,
self.input_dim))
if self.depth_multiplier > 1:
# rearrange kernel
kernel = tf.transpose(kernel, [0, 1, 3, 2])
kernel = tf.reshape(kernel, shape=self.kernel_size +
(self.input_dim * self.depth_multiplier, 1))
kernel = tf.multiply(kernel, mask)
elif isinstance(kernel, np.ndarray):
if self.depth_multiplier > 1:
# rearrange kernel
kernel = np.transpose(kernel, [0, 1, 3, 2])
kernel = np.reshape(kernel, newshape=self.kernel_size +
(self.input_dim * self.depth_multiplier, 1))
kernel = np.multiply(kernel, mask)
elif isinstance(kernel, PondPrivateTensor):
mask = tfe.define_public_variable(mask)
if self.depth_multiplier > 1:
# rearrange kernel
kernel = tfe.transpose(kernel, [0, 1, 3, 2])
kernel = tfe.reshape(kernel, shape=self.kernel_size +
(self.input_dim * self.depth_multiplier, 1))
kernel = tfe.mul(kernel, mask)
return kernel
def call(self, inputs):
if self.data_format != 'channels_first':
inputs = self.prot.transpose(inputs, perm=[0, 3, 1, 2])
outputs = self.prot.conv2d(inputs,
self.kernel,
self.strides[0],
self.padding)
if self.use_bias:
outputs = outputs + self.bias
if self.data_format != 'channels_first':
outputs = self.prot.transpose(outputs, perm=[0, 2, 3, 1])
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
"""Compute output_shape for the layer."""
h_filter, w_filter, _, n_filters = self.kernel_shape
if self.data_format == 'channels_first':
n_x, _, h_x, w_x = input_shape.as_list()
else:
n_x, h_x, w_x, _ = input_shape.as_list()
if self.padding == "SAME":
h_out = int(np.ceil(float(h_x) / float(self.strides[0])))
w_out = int(np.ceil(float(w_x) / float(self.strides[0])))
if self.padding == "VALID":
h_out = int(np.ceil(float(h_x - h_filter + 1) / float(self.strides[0])))
w_out = int(np.ceil(float(w_x - w_filter + 1) / float(self.strides[0])))
return [n_x, n_filters, h_out, w_out]
def get_mask(self, in_channels):
mask = np.zeros((self.kernel_size[0],
self.kernel_size[1],
in_channels, in_channels * self.depth_multiplier))
for d in range(self.depth_multiplier):
for i in range(in_channels):
mask[:, :, i, i + (d * in_channels)] = 1.
return np.transpose(mask, [0, 1, 3, 2])
def set_weights(self, weights, sess=None):
""" Sets the weights of the layer.
Arguments:
weights: A list of Numpy arrays with shapes and types
matching the output of layer.get_weights() or a list
of private variables
sess: tfe session"""
weights_types = (np.ndarray, PondPrivateTensor)
assert isinstance(weights[0], weights_types), type(weights[0])
# Assign new keras weights to existing weights defined by
# default when tfe layer was instantiated
if not sess:
sess = KE.get_session()
if isinstance(weights[0], np.ndarray):
for i, w in enumerate(self.weights):
shape = w.shape.as_list()
tfe_weights_pl = tfe.define_private_placeholder(shape)
new_weight = weights[i]
if i == 0:
# kernel
new_weight = self.rearrange_kernel(new_weight)
else:
# bias
new_weight = new_weight.reshape(shape)
fd = tfe_weights_pl.feed(new_weight)
sess.run(tfe.assign(w, tfe_weights_pl), feed_dict=fd)
elif isinstance(weights[0], PondPrivateTensor):
for i, w in enumerate(self.weights):
shape = w.shape.as_list()
new_weight = weights[i]
if i == 0:
# kernel
new_weight = self.rearrange_kernel(new_weight)
else:
# bias
new_weight = new_weight.reshape(shape)
sess.run(tfe.assign(w, new_weight))
| StarcoderdataPython |
155780 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 KuraLabs S.R.L
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Values loading and merging module.
"""
from logging import getLogger
from pprintpp import pformat
from .inputs import load_files
from .utils.dictionary import update
log = getLogger(__name__)
def expand_dotdict(dotdict):
"""
Expand a dictionary containing keys in dot notation.
For example::
.. code-block:: python3
dotdict = {
'key1.key2.key3': 'string1',
'key1.key2.key4': 1000,
'key4.key5': 'string2',
}
expanded = expand_dotdict(dotdict)
Will produce:
.. code-block:: python3
{
'key1': {
'key2': {
'key3': 'string1',
'key4': 1000,
},
},
'key4': {
'key5': 'string2',
},
}
:param dict dotdict: Original dictionary containing string keys in dot
notation.
:return: The expanded dictionary.
:rtype: dict
"""
dtype = type(dotdict)
result = dtype()
for key, value in dotdict.items():
path = key.split('.')
assert path, 'Invalid dot-notation path'
node = result
for part in path[:-1]:
node = node.setdefault(part, dtype())
assert isinstance(node, dtype), 'Incompatible paths to {}'.format(
key,
)
node[path[-1]] = value
return result
def load_values(values_files, values):
"""
:param list values_files: List of Path objects pointing to files with
values for the rendering.
:param OrderedDict values: Dictionary with keys in dot-notation and its
associated values.
:return: Normalized values loaded from all files and overrode with the
values dictionary, if any.
:rtype: dict
"""
bundle = load_files(values_files)
if values:
log.debug(
'Expanding dot-notation dictionary:\n{}'.format(pformat(values))
)
expanded = expand_dotdict(values)
log.debug(
'Expanded dot-notation dictionary:\n{}'.format(pformat(expanded))
)
update(bundle, expanded)
if bundle:
log.debug(
'Final values bundle:\n{}'.format(pformat(bundle))
)
return bundle
__all__ = [
'load_values',
]
| StarcoderdataPython |
1795778 | <filename>taxonomy/cgi-bin/browse.py
#!/usr/bin/python
# Brutally primitive reference taxonomy browser.
# Basically just a simple shim on the taxomachine 'taxon' method.
# Intended to be run as a CGI command, but it can be tested by running it
# directly from the shell; just set the environment QUERY_STRING to be
# the name or id of the taxon to browse to.
# Apache (or other server) must be configured to be able to run CGI scripts,
# and this program must be in the directory where it looks for such scripts.
# The file name there should be simply 'browse' (not browse.py).
# NOT YET IMPLEMENTED: percent-escaping and -unescaping
# If this were to be written using peyotl, it might do something similar to the following:
# from peyotl.api import APIWrapper
# taxo = APIWrapper().taxomachine
# print taxo.taxon(12345)
default_api_base_url = 'https://api.opentreeoflife.org/'
# link to taxonomic amendments in the repo that matches the API base URL
_AMENDMENT_REPO_URL_TEMPLATE = ''
production_amendment_url_template = 'https://github.com/OpenTreeOfLife/amendments-1/blob/master/amendments/{}.json'
dev_amendment_url_template = 'https://github.com/OpenTreeOfLife/amendments-0/blob/master/amendments/{}.json'
import os
import sys
import cgi, cgitb, StringIO
import requests
import json
headers = {
'content-type' : 'application/json',
'accept' : 'application/json',
}
# Main entry point. Returns HTML as a string.
def browse(id=None, name=None, limit=None, api_base=None):
global _AMENDMENT_REPO_URL_TEMPLATE
output = StringIO.StringIO()
if api_base == None:
server_name = os.environ.get('SERVER_NAME')
# Kludge reflecting current Open Tree of Life server configuration
if server_name != None and 'devtree' in server_name:
server_name = server_name.replace('devtree', 'devapi')
api_base = 'https://%s/' % server_name
output.write('using API server %s\n' % server_name)
else:
api_base = default_api_base_url
if 'devapi' in api_base:
_AMENDMENT_REPO_URL_TEMPLATE = dev_amendment_url_template
else:
_AMENDMENT_REPO_URL_TEMPLATE = production_amendment_url_template
try:
if limit != None: limit = int(limit)
except ValueError:
report_invalid_arg(output, "Argument 'limit' should be an integer!")
return output.getvalue()
try:
if id != None:
id = int(id.strip())
browse_by_id(id, limit, api_base, output)
return output.getvalue()
except ValueError:
report_invalid_arg(output, "Argument 'id' should be an integer!")
return output.getvalue()
if name is None:
# bump them to our default taxon (root of synthetic tree)
browse_by_name('cellular organisms', limit, api_base, output)
else:
name = name.strip()
if name.isdigit():
browse_by_id(int(name), limit, api_base, output)
elif ':' in name and not ' ' in name:
browse_by_qid(name, limit, api_base, output)
else:
browse_by_name(name, limit, api_base, output)
return output.getvalue()
def report_invalid_arg(output, info):
start_el(output, 'h1')
output.write('Open Tree taxonomy: <strong class="error">invalid argument</strong>')
end_el(output, 'h1')
output.write('<p class="error">There was a problem with the name or ID provided:</p>\n')
start_el(output, 'pre', 'error')
output.write(cgi.escape(json.dumps(info, sort_keys=True, indent=4)))
end_el(output, 'pre')
def browse_by_name(name, limit, api_base, output):
result = look_up_name(name, api_base)
if result is None:
report_invalid_arg(output, "No taxon found matching name '%s'" % name)
return None
matches = result[u'matches']
if len(matches) == 0:
output.write('no TNRS matches for %s\n' % cgi.escape(name))
return None
elif len(matches) > 1:
output.write('Matches for %s: \n' % cgi.escape(name))
start_el(output, 'ul')
for match in matches:
taxon = match[u'taxon']
output.write(" <li> %s" % link_to_taxon(taxon[u'ott_id'], taxon[u'unique_name']))
end_el(output, 'ul')
else:
taxon = matches[0][u'taxon']
id = taxon[u'ott_id']
browse_by_id(id, limit, api_base, output)
# Map taxon name to taxonomy id using match_names service
def look_up_name(name, api_base):
response = requests.post(api_base + 'v3/tnrs/match_names',
headers=headers,
data=json.dumps({'names':[name], 'include_suppressed':True}))
if response.status_code == 200:
answer = response.json()
results = answer[u'results']
if len(results) == 0: return None
# len(results) > 1 shouldn't happen
return results[0]
else:
return error_report(response)
def browse_by_id(id, limit, api_base, output):
info = get_taxon_info(id, 'ott_id', api_base)
#print json.dumps(info, sort_keys=True, indent=4)
display_taxon_info(info, limit, output, api_base)
def browse_by_qid(id, limit, api_base, output):
info = get_taxon_info(id, 'source_id', api_base)
#print json.dumps(info, sort_keys=True, indent=4)
display_taxon_info(info, limit, output, api_base)
def get_taxon_info(id, property, api_base):
d=json.dumps({property: id, 'include_children': True, 'include_lineage': True})
response = requests.post(api_base + 'v3/taxonomy/taxon_info',
headers=headers,
data=d)
if response.status_code == 200:
return response.json()
else:
return error_report(response)
def display_taxon_info(info, limit, output, api_base):
included_children_output = StringIO.StringIO()
suppressed_children_output = StringIO.StringIO()
# Search box
output.write('<form action="browse"><p align="right"><input type="text" name="name" placeholder="name or id"/></p></form>')
if u'ott_id' in info:
id = info[u'ott_id']
start_el(output, 'h1')
output.write('Open Tree taxonomy: <strong>%s</strong>' % get_display_name(info))
end_el(output, 'h1')
start_el(output, 'p', 'legend')
version = get_taxonomy_version(api_base)
output.write('The current taxonomy version is <a target="_blank" href="https://tree.opentreeoflife.org/about/taxonomy-version/%s">%s (click for more information)</a>. ' % (version, version,))
output.write('See the OTT documentation for <a href="https://github.com/OpenTreeOfLife/reference-taxonomy/blob/master/doc/taxon-flags.md#taxon-flags">an explanation of the taxon flags used</a> below, e.g., <span class="flag">extinct</span>\n')
end_el(output, 'p')
output.write('<h3>Taxon details</h3>')
start_el(output, 'p', 'taxon')
display_basic_info(info, output)
output.write(' (OTT id %s)' % id)
synth_tree_url = "/opentree/argus/ottol@%s" % id
output.write('<br/><a target="_blank" href="%s">View this taxon in the current synthetic tree</a>' % cgi.escape(synth_tree_url))
end_el(output, 'p')
if u'synonyms' in info:
synonyms = info[u'synonyms']
name = info[u'name']
if name in synonyms:
synonyms.remove(name)
if len(synonyms) > 0:
output.write('<h3>Synonym(s)</h3>')
start_el(output, 'p', 'synonyms')
output.write("%s\n" % ', '.join(map(link_to_name, synonyms)))
end_el(output, 'p')
if u'lineage' in info:
first = True
output.write('<h3>Lineage</h3>')
start_el(output, 'p', 'lineage')
# N.B. we reverse the list order to show the root first!
if info[u'lineage']:
info[u'lineage'].reverse()
for ancestor in info[u'lineage']:
if not first:
output.write(' > ')
output.write(link_to_taxon(ancestor[u'ott_id'], ancestor[u'name']))
first = False
output.write('\n')
end_el(output, 'p')
else:
output.write('missing lineage field %s\n', info.keys())
any_included = False
any_suppressed = False
if limit == None: limit = 200
if u'children' in info:
children = sorted(info[u'children'], key=priority)
if len(children) > 0:
# Generate initial output for two lists of children
suppressed_children_output.write('<h3>Children suppressed from the synthetic tree</h3>')
start_el(suppressed_children_output, 'ul', 'children')
nth_suppressed_child = 0
included_children_output.write('<h3>Children included in the synthetic tree</h3>')
start_el(included_children_output, 'ul', 'children')
nth_included_child = 0
for child in children[:limit]:
if ishidden(child):
nth_suppressed_child += 1
odd_or_even = (nth_suppressed_child % 2) and 'odd' or 'even'
start_el(suppressed_children_output, 'li', 'child suppressed %s' % odd_or_even)
#write_suppressed(suppressed_children_output)
suppressed_children_output.write(' ')
display_basic_info(child, suppressed_children_output)
end_el(suppressed_children_output, 'li')
any_suppressed = True
else:
nth_included_child += 1
odd_or_even = (nth_included_child % 2) and 'odd' or 'even'
start_el(included_children_output, 'li', 'child exposed %s' % odd_or_even)
start_el(included_children_output, 'span', 'exposedmarker')
included_children_output.write(" ")
end_el(included_children_output, 'span')
included_children_output.write(' ')
display_basic_info(child, included_children_output)
end_el(included_children_output, 'li')
any_included = True
end_el(suppressed_children_output, 'ul')
end_el(included_children_output, 'ul')
if any_included:
output.write(included_children_output.getvalue())
if any_suppressed:
output.write(suppressed_children_output.getvalue())
if u'children' in info:
children = info[u'children']
if children != None and len(children) > limit:
start_el(output, 'p', 'more_children')
output.write('... %s' % link_to_taxon(id,
('%s more children' %
(len(children)-limit)),
limit=100000))
end_el(output, 'p')
output.write("\n")
else:
report_invalid_arg(output, info)
def get_taxonomy_version(api_base):
response = requests.post(api_base + 'v3/taxonomy/about',
headers=headers,
data={})
if response.status_code == 200:
version_info = response.json().get('source','')
if 'draft' in version_info:
version_info = version_info.split('draft')[0];
return version_info
else:
return error_report(response)
def write_suppressed(output):
start_el(output, 'span', 'suppressedmarker')
output.write("*")
end_el(output, 'span')
def get_display_name(info):
if u'unique_name' in info and len(info[u'unique_name']) > 0:
return info[u'unique_name']
elif u'name' in info:
return info[u'name']
return u'Unnamed taxon'
def display_basic_info(info, output):
# Might be better to put rank as a separate column in a table. That way the
# names will line up
if not info[u'rank'].startswith('no rank'):
output.write(info[u'rank'] + ' ')
# Taxon name
output.write(link_to_taxon(info[u'ott_id'], get_display_name(info)))
# Sources
start_el(output, 'span', 'sources')
if u'tax_sources' in info:
sources = info[u'tax_sources']
if len(sources) > 0:
output.write(' %s ' % source_link(sources[0]))
if len(sources) > 1:
output.write('(%s) ' % (', '.join(map(source_link, sources[1:])),))
end_el(output, 'span')
# Flags
start_el(output, 'span', 'flags')
output.write('%s ' % ', '.join(map(lambda f:'<span class="flag">%s</span>' % f.lower(), info[u'flags'])))
end_el(output, 'span')
output.write('\n')
def source_link(source_id):
global _AMENDMENT_REPO_URL_TEMPLATE
if source_id.startswith('http:') or source_id.startswith('https:'):
url = source_id
else:
parts = source_id.split(':')
url = None
if len(parts) == 2:
if parts[0] == 'ncbi':
url = 'http://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=%s' % parts[1]
elif parts[0] == 'gbif':
url = 'http://www.gbif.org/species/%s/' % parts[1]
elif parts[0] == 'irmng':
# url = 'http://www.marine.csiro.au/mirrorsearch/ir_search.taxon_info?id=%s' % parts[1]
url = 'http://www.irmng.org/aphia.php?p=taxdetails&id=%s' % parts[1]
elif parts[0] == 'if':
url = 'http://www.indexfungorum.org/names/NamesRecord.asp?RecordID=%s' % parts[1]
elif parts[0] == 'worms':
url = 'http://www.marinespecies.org/aphia.php?p=taxdetails&id=%s' % parts[1]
elif parts[0] == 'silva':
url = 'http://www.arb-silva.de/browser/ssu/silva/%s' % parts[1]
else:
# check for taxonomic amendments; link each directly to its latest version on GitHub
possible_amendment_id = parts[0] # EXAMPLE source_id: 'additions-10000038-10000038:10000038'
id_parts = possible_amendment_id.split('-')
# see peyotl for amendment types and prefixes
# https://github.com/OpenTreeOfLife/peyotl/blob/3c32582e16be9dcf1029ce3d6481cdb09444890a/peyotl/amendments/amendments_umbrella.py#L33-L34
if (len(id_parts) > 1) and id_parts[0] in ('additions', 'changes', 'deletions',):
url = _AMENDMENT_REPO_URL_TEMPLATE.format(possible_amendment_id)
# we use a special displayed format for amendments
type_to_singular_prefix = {'additions':'addition' , 'changes':'change', 'deletions':'deletion'}
prefix = type_to_singular_prefix.get(id_parts[0])
node_id = parts[1]
formatted_id = '%s:%s' % (prefix, node_id)
return '<a href="%s">%s</a>' % (cgi.escape(url), cgi.escape(formatted_id))
if url != None:
return '<a href="%s">%s</a>' % (cgi.escape(url), cgi.escape(source_id))
else:
return source_id
def error_report(response):
try:
return response.json()
except:
return response.text
def start_el(output, tag, clas=''):
output.write('<%s class="%s">' % (tag, clas))
def end_el(output, tag):
output.write('</%s>' % tag)
def link_to_taxon(id, text, limit=None):
if limit == None:
option = ''
else:
option = '&limit=%s' % limit
return '<a href="browse?id=%s%s">%s</a>' % (id, option, style_name(cgi.escape(text)))
def link_to_name(name):
name = cgi.escape(name)
return '<a href="browse?name=%s">%s</a>' % (name, style_name(name))
def style_name(ename):
return '<span class="name">%s</span>' % ename
def priority(child):
if ishidden(child):
return 1
else:
return 0
def ishidden(info):
for flag in info[u'flags']:
if flag in ott29_exclude_flags:
return True
return False
# From treemachine/src/main/java/opentree/GraphInitializer.java
ott29_exclude_flags_list = ["major_rank_conflict", "major_rank_conflict_inherited", "environmental",
"unclassified_inherited", "unclassified", "viral", "barren", "not_otu", "incertae_sedis",
"incertae_sedis_inherited", "extinct_inherited", "extinct", "hidden", "unplaced", "unplaced_inherited",
"was_container", "inconsistent", "inconsistent", "hybrid", "merged", "inconsistent"]
ott29_exclude_flags = {}
for flag in ott29_exclude_flags_list:
ott29_exclude_flags[flag.upper()] = True
local_stylesheet = """
<style type="text/css">
h1 {
color: #999;
/* indent multi-line heading (a very long taxon name) */
padding-left: 55px;
text-indent: -25px;
/* maintain pleasing placement of Open Tree logo */
height: auto;
padding-top: 0.35em;
line-height: 1.0em;
min-height: 32px;
background: url(https://opentreeoflife.github.io/images/mini-opentree-logo.png) no-repeat left 5px;
}
h1 strong {
color: #000;
}
h3 {
margin-bottom: 0.3em;
}
.legend {
font-style: italic;
}
.legend .flag {
font-style: normal;
}
.error {
color: #933;
}
h4,
p.taxon,
p.synonyms,
p.lineage,
ul.children {
margin-top: 0.25em;
margin-left: 2em;
}
ul.children {
padding-left: 0;
}
ul.children li {
list-style: none;
padding: 0.25em;
/* align text with other details; pad for bg color and indent second line */
margin-left: -0.5em;
padding-left: 2.5em;
text-indent: -2em;
}
li.odd {
background-color: #fff;
}
li.even {
background-color: #f5f5f5;
}
span.name {
font-weight: bold;
}
span.sources,
span.flags {
padding-left: 1em;
}
span.flag {
font-family: monospace;
color: #999;
}
</style>
"""
if __name__ == '__main__':
form = cgi.FieldStorage()
id = name = limit = api_base = None
if "id" in form: id = form["id"].value
if "name" in form: name = form["name"].value
if "limit" in form: limit = form["limit"].value
if "api_base" in form: api_base = form["api_base"].value
# Content-type information is not helpful in our current setup?
sys.stdout.write('Content-type: text/html; charset=utf8\r\n')
sys.stdout.write('\r\n')
output = sys.stdout
start_el(output, 'html')
start_el(output, 'head', '')
output.write('<link rel="stylesheet" href="//opentreeoflife.github.io/css/main.css" />')
output.write(local_stylesheet)
end_el(output, 'head')
start_el(output, 'body')
print browse(id, name, limit, api_base).encode('utf-8')
end_el(output, 'body')
end_el(output, 'html')
| StarcoderdataPython |
1791377 | <gh_stars>0
# coding: utf8
#########################################################################
## This is a samples controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
error_page=URL('error')
if not session.recent_companies: session.recent_companies=[]
if not session.recent_persons: session.recent_persons=[]
response.menu=[
['Companies',False,url('list_companies')],
['Contacts',False,url('list_persons')],
['Tasks',False,url('list_tasks')],
]
def add(mylist,item):
if not item.id in [x[0] for x in mylist]:
return mylist[:9]+[(item.id,item.name)]
else:
return mylist
def index():
return dict()
@auth.requires_login()
def list_companies():
form=crud.create(db.company)
companies=db(db.company.id>0).select(orderby=db.company.name)
return dict(companies=companies,form=form)
@auth.requires_login()
def view_company():
company_id=request.args(0)
company=db.company[company_id] or redirect(error_page)
session.recent_companies = add(session.recent_companies,company)
return dict(company=company)
@auth.requires_login()
def edit_company():
company_id=request.args(0)
company=db.company[company_id] or redirect(error_page)
session.recent_companies = add(session.recent_companies,company)
form=crud.update(db.company,company,next=url('list_companies'))
return dict(form=form)
@auth.requires_login()
def list_persons():
company_id=request.args(0)
company=db.company[company_id]
if company:
session.recent_companies = add(session.recent_companies,company)
db.person.company.default=company_id
db.person.company.writable=False
db.person.company.readable=False
form=crud.create(db.person)
persons=db(db.person.company==company.id)\
.select(orderby=db.person.name)
else:
form=None
persons=db(db.person.id>0).select(orderby=db.person.name)
return dict(company=company,persons=persons,form=form)
@auth.requires_login()
def view_person():
person_id=request.args(0)
person=db.person[person_id] or redirect(error_page)
session.recent_persons = add(session.recent_persons,person)
return dict(person=person)
@auth.requires_login()
def list_docs():
person_id=request.args(0)
person=db.person[person_id] or redirect(error_page)
session.recent_persons = add(session.recent_persons,person)
db.document.person.default=person.id
db.document.person.writable=False
db.document.person.readable=False
form=crud.create(db.document)
docs=db(db.document.person==person.id).select(orderby=db.document.name)
return dict(person=person,docs=docs,form=form)
@auth.requires_login()
def list_logs():
person_id=request.args(0)
person=db.person[person_id] or redirect(error_page)
session.recent_persons = add(session.recent_persons,person)
db.log.person.default=person.id
db.log.person.writable=False
db.log.person.readable=False
form=crud.create(db.log)
logs=db(db.log.person==person.id).select(orderby=~db.log.created_on)
return dict(person=person,logs=logs,form=form)
@auth.requires_login()
def edit_person():
person_id=request.args(0)
person=db.person[person_id] or redirect(error_page)
session.recent_persons = add(session.recent_persons,person)
db.person.company.writable=False
db.person.company.readable=False
form=crud.update(db.person,person,next=url('view_person',person_id))
return dict(form=form)
@auth.requires_login()
def edit_task():
task_id=request.args(0)
task=db.task[task_id] or redirect(error_page)
person=db.person[task.person]
db.task.person.writable=db.task.person.readable=False
form=crud.update(db.task,task,next='view_task/[id]')
return dict(form=form, person=person)
@auth.requires_login()
def view_task():
task_id=request.args(0)
task=db.task[task_id] or redirect(error_page)
person=db.person[task.person]
db.task.person.writable=db.task.person.readable=False
form=crud.read(db.task,task)
return dict(form=form, person=person, task=task)
@auth.requires_login()
def list_tasks():
person_id=request.args(0)
person=db.person[person_id]
if person_id:
tasks=db(db.task.person==person_id)\
(db.task.created_by==auth.user.id)\
(db.task.start_time>=request.now).select()
else:
tasks=db(db.task.created_by==auth.user.id)\
(db.task.start_time<=request.now).select()
db.task.person.default=person_id
db.task.person.writable=db.task.person.readable=False
form=crud.create(db.task,next='view_task/[id]')
return dict(tasks=tasks,person=person,form=form)
@auth.requires_login()
def calendar():
person_id=request.args(0)
person=db.person[person_id]
if person_id:
tasks=db(db.task.person==person_id)\
(db.task.created_by==auth.user.id)\
(db.task.start_time>=request.now).select()
else:
tasks=db(db.task.created_by==auth.user.id)\
(db.task.start_time>=request.now).select()
return dict(tasks=tasks,person=person)
@auth.requires(auth.user and auth.user.email=='<EMAIL>')
def reset():
for table in db.tables:
if table=='auth_user':
db(db[table].email!='<EMAIL>').delete()
else:
db(db[table].id>0).delete()
session.flash='done!'
redirect('index')
def error():
return dict(message="something is wrong")
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
session.forget()
return service()
| StarcoderdataPython |
3448431 | <reponame>eswan18/remarker<filename>premark/presentation.py
import sys
from functools import reduce
from operator import add
from pathlib import Path
import logging
from typing import Union, List, Iterable, NamedTuple, Optional
from pkg_resources import resource_filename
from dataclasses import dataclass
from jinja2 import Template
import yaml
if sys.version_info >= (3, 8):
from typing import Final
else:
from typing_extensions import Final
PKG_NAME = 'premark'
logger = logging.getLogger(__name__)
class DefaultSettings(NamedTuple):
javascript: str
html_template: Path
stylesheet: Path
title: str
metafile: str
DEFAULTS: Final = DefaultSettings(
javascript="""
<script src="https://remarkjs.com/downloads/remark-latest.min.js"></script>
<script>
var slideshow = remark.create({
ratio: '16:9',
slideNumberFormat: '(%current%/%total%)',
countIncrementalSlides: false,
highlightLines: true
});
</script>
""",
html_template=Path(resource_filename(PKG_NAME, 'templates/default.html')),
stylesheet=Path(resource_filename(PKG_NAME, "templates/default.css")),
title='Premark Presentation',
metafile='sections.yaml',
)
@dataclass
class SectionDefinition:
file: Path
title: Optional[str] = None
autotitle: Optional[bool] = None # If None, treated as True if title is not None.
def __post_init__(self):
# Assume files without suffixes that don't exist should be .md files.
if '.' not in str(self.file) and not self.file.exists():
new_file = self.file.with_suffix('.md')
logger.info(f'Inferring .md suffix: changing {self.file} to {new_file}')
self.file = new_file
def should_autotitle(self):
return self.autotitle if self.autotitle is not None else bool(self.title)
def make_presentation(self, section_num: int = None) -> 'Presentation':
markdown = self.file.read_text()
# Create the auto-generated section title slide.
if self.should_autotitle():
if section_num is None:
msg = ('Must provide a `section_num` argument to create presentations '
'from autotitled SectionDefinitions.')
raise ValueError(msg)
markdown = ('class: center, middle\n'
f'## #{section_num}\n'
f'# {self.title}\n'
'---\n'
f'{markdown}')
return Presentation(markdown)
class Presentation:
'''
An unrendered RemarkJS presentation.
'''
markdown: str
html_template: str
stylesheet_template: str
def __init__(
self,
markdown: Union[str, Path],
html_template: Union[str, Path] = DEFAULTS.html_template,
stylesheet: Union[str, Path] = DEFAULTS.stylesheet,
):
'''
Create a new Presentation.
Parameters
----------
markdown
The markdown from which to render the presentations. If a Path object, is
interpreted as a file containing the markdown. If a string, is interpreted
as the literal markdown.
html_template
The HTML in which to insert the markdown. If a Path object, is interpreted
as a file containing the HTML. If a string, is interpreted as the literal
HTML.
stylesheet
The CSS to include in the eventual rendered HTML. If a Path object, is
interpreted as a file containing the CSS. If a string, is interpreted as the
literal CSS code.
'''
if isinstance(markdown, Path):
markdown = markdown.read_text()
if isinstance(html_template, Path):
html_template = html_template.read_text()
if isinstance(stylesheet, Path):
stylesheet = stylesheet.read_text()
self.markdown = markdown
self.html_template = html_template
self.stylesheet = stylesheet
def to_html(self, title: str = DEFAULTS.title) -> str:
'''
Convert the presentation to HTML.
Parameters
----------
title
The name to be used in the title tag in the resulting HTML.
Returns
-------
str
An HTML rendering of the presentation.
'''
template = Template(self.html_template)
stylesheet_html = f"<style>\n{self.stylesheet}\n</style>"
return template.render(
title=title,
markdown=self.markdown,
stylesheet=stylesheet_html,
js=DEFAULTS.javascript,
)
def __add__(self, other: 'Presentation') -> 'Presentation':
'''Concatenate presentations.'''
if not isinstance(other, self.__class__):
return NotImplemented
html_matches = (self.html_template == other.html_template)
style_matches = (self.stylesheet == other.stylesheet)
if html_matches and style_matches:
merged_markdown = self.markdown + '\n---\n' + other.markdown
return self.__class__(
markdown=merged_markdown,
html_template=self.html_template,
stylesheet=self.stylesheet,
)
else:
msg = ('Cannot concatenate presentations unless they have the same HTML and'
'stylesheet.')
raise TypeError(msg)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
else:
md_matches = (self.markdown == other.markdown)
html_matches = (self.html_template == other.html_template)
style_matches = (self.stylesheet == other.stylesheet)
return (md_matches and html_matches and style_matches)
@classmethod
def from_presentations(
cls,
presentations: Iterable['Presentation'],
) -> 'Presentation':
'''
Create a single presentations by merging others together.
Parameters
----------
presentations
An iterable of Presentation objects
Returns
-------
Presentation
The resulting, merged presentation
'''
# Because '+' is overloaded to concatenate, this merges the inputs.
return reduce(add, presentations)
@classmethod
def from_directory(
cls,
directory: Union[str, Path],
metafile: str = DEFAULTS.metafile,
) -> 'Presentation':
'''
Create a slideshow from multiple markdown files in a folder.
Parameters
----------
directory
The directory where the markdown files are stored. Should be a Path object
or a string that can be treated as a path.
metafile
The name of the file in that directory that defines the order in which to
stitch together the markdown files.
Returns
-------
Presentation
A new presentation based on the files in the input directory
'''
if not isinstance(directory, Path):
directory = Path(directory)
metafile_path = directory / metafile
try:
with open(metafile_path, 'rt') as f:
metadata = yaml.load(f, Loader=yaml.SafeLoader)
except FileNotFoundError as exc:
msg = f'metafile "{metafile}" not found in directory'
raise ValueError(msg) from exc
# The should contain a dictionary with a 'sections' key, the value of which is a
# list of dictionaries, along with optional additional keys 'html_template' and
# 'stylesheet'.
try:
sections = metadata['sections']
except (KeyError, TypeError) as exc:
msg = "Expected to find 'sections' heading in metafile"
raise KeyError(msg) from exc
if 'html_template' in metadata:
html_template = Path(metadata['html_template'])
else:
html_template = DEFAULTS.html_template
if 'stylesheet' in metadata:
stylesheet = Path(metadata['stylesheet'])
else:
stylesheet = DEFAULTS.stylesheet
# If we have a list of {'file': str} pairs (vs just a list of strings), we need
# to extract the filenames.
if isinstance(sections[0], dict): # metadata is List[Dict[str, str]]
try:
section_defs = [
SectionDefinition(
file=(directory / entry['file']),
title=entry.get('title'),
autotitle=entry.get('autotitle')
)
for entry in sections
]
except KeyError:
msg = 'Section entries must contain a "file" key'
raise KeyError(msg)
else: # sections is List[str], hopefully
section_defs = [SectionDefinition(directory / s) for s in sections]
# Check the files exist and then stitch them together.
section_num = 1
presentations: List[Presentation] = []
for section in section_defs:
if section.should_autotitle():
prez = section.make_presentation(section_num)
section_num += 1
else:
prez = section.make_presentation()
presentations.append(prez)
final_prez = Presentation.from_presentations(presentations)
final_prez.html_template = Path(html_template).read_text()
final_prez.stylesheet = Path(stylesheet).read_text()
return final_prez
| StarcoderdataPython |
3416551 | # encoding: utf-8
import re
import base64
from ..utils import int_or_none
from ..utilsEX import download_webPage_by_PYCURL
from ..extractor.common import InfoExtractor
from ..extractor.generic import GenericIE
class GoMoviesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:gomovies|gostream)\.\w+/film/[\w-]+-(?P<id>[\d]+)'
_DOMAIN = ''
_TESTS = [
{
'url': 'https://gomovies.is/film/wonder-woman-20963/',
'md5': '77108c1e4ab58f48031101a1a2119789',
'info_dict': {
'id': 'wonder-woman-20963',
'ext': 'mp4',
'title': 'Wonder Woman.',
'duration': 141,
'timestamp': 1205712000,
'uploader': 'none',
'upload_date': '20080317',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
]
_url = ''
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
try:
return super(GoMoviesIE, self)._download_webpage(url_or_request, video_id, note, errnote, fatal, tries, timeout, encoding, data, headers, query)
except:
return download_webPage_by_PYCURL(self, url_or_request, timeout, data, headers, query)
def _real_extract(self, url):
if (not 'watching.html' in url):
url += 'watching.html'
self._url = url
video_id = self._match_id(url)
#根据video_id求播放源data_id,都取出来
episodes_url = r'https://gostream.is/ajax/movie_episodes/' + video_id
webpage = self._download_webpage(episodes_url, video_id)
data_ids = re.findall(r'id=\\"ep-(?P<id>\d+)?\\"', webpage)
if (not data_ids):
return super(GoMoviesIE, self)._real_extract(url)
formats = []
for data_id in data_ids:
formats.extend(self.get_medias(video_id, data_id))
# openload?
if len(formats) != 0:
for format in formats:
if format['url'] and ('openload' in format['url'] or format['ext'] == 'url'):
return self.url_result(format['url'])
#影片信息
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'thumbnail': thumbnail,
'http_headers': {
'Referer': url
}
}
def get_medias(self, video_id, data_id):
formats = []
try:
#根据video_id、data_id获取所需_x、_y参数
token_url = r'https://gostream.is/ajax/movie_token?eid=%s&mid=%s' % (data_id, video_id)
webpage = self._download_webpage(token_url, video_id)
#取_x、_y参数
#_x='4f90e3b8e08d4afe5ec3e87cffbed574', _y='2b48fcf478de468252b0d522d5fb30ce';
_x = self._search_regex(r'_x=\'(\w+)\'', webpage, '_x')
_y = self._search_regex(r'_y=\'(\w+)\'', webpage, '_y')
if (not _x or not _y):
return formats
#https://gomovies.is/ajax/movie_sources/655099?x=4f90e3b8e08d4afe5ec3e87cffbed574&y=2b48fcf478de468252b0d522d5fb30ce
sources_url = r'https://gostream.is/ajax/movie_sources/%s?x=%s&y=%s' % (data_id, _x, _y)
movie_json = self._download_json(sources_url, video_id)
if (not movie_json or not movie_json['playlist'] or not movie_json['playlist'][0]['sources']):
return formats
#解析格式
movie_list = movie_json['playlist'][0]['sources']
if isinstance(movie_list, dict):
tl = movie_list
movie_list = []
movie_list.append(tl)
for movie in movie_list:
label = movie.get('label', '0p')
height = int_or_none(label.lower().rstrip('p'), default=0)
ext = movie['type']
ext = ext if (ext.find('video/') == -1) else ext.replace('video/', '')
formats.append({
'format_id': label,
#为排序用
'height': height,
'ext': ext,
'url': movie['file'],
})
except Exception as e:
try:
# openload一支
sources_url = r'https://gomovies.is/ajax/movie_embed/%s' % data_id
movie_json = self._download_json(sources_url, video_id)
if not movie_json or not movie_json['src']:
return formats
formats.append({
'ext': 'url',
'url': movie_json['src'],
})
except:
pass
#去除无效资源
self._check_formats(formats, video_id)
return formats
# 加headers判断
def _is_valid_url(self, url, video_id, item='video', headers={}):
try:
headers = { 'Referer': self._url }
result = super(GoMoviesIE, self)._is_valid_url(url, video_id, item, headers)
if result:
webpage = self._download_webpage(url, video_id, headers=headers)
err_infos = ['''There could be several reasons for this, for example it got removed by the owner.''',
'''It maybe got deleted by the owner or was removed due a copyright violation.''']
for err_info in err_infos:
if err_info in webpage:
result = False
return result
except:
return False
class GoMovies_fm_IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gomovies\.fm'
def _real_extract(self, url):
webpage = self._download_webpage(url, url)
str = self._search_regex(r'document\.write\(Base64.decode\("([^"]+)', webpage, 'xxxx')
str = base64.b64decode(str.encode('ascii')).decode('utf-8')
video_url = self._search_regex(r'src="([^"]+)', str, '')
ie = GenericIE()
ie.set_downloader(self._downloader)
result = ie.extract(video_url)
result['http_headers'] = {'Referer': video_url, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}
for entry in result.get('entries', {}):
for format in entry['formats']:
format['http_headers'] = result['http_headers']
return result | StarcoderdataPython |
12817091 | from Drivers.ImageModelDriver import *
from Drivers.ImageDisplayDriver import *
from Drivers.ReadDriver import *
from PIL import Image
class ImageLesson2:
@staticmethod
def run():
# Загружаем фото c12-85v.xcr
loaded_image_c12_85v = ReadDriver.image_binary_read(
'lesson2/', 'c12-85v', '.jpg', '>H', 1024, 1024, 5120
)
# Обрабатываем
ImageModelDriver.rotate90(loaded_image_c12_85v)
ImageModelDriver.grayscale(loaded_image_c12_85v)
ImageDisplayDriver.save(loaded_image_c12_85v)
ImageModelDriver.resize(loaded_image_c12_85v, Image.BILINEAR, 0.6)
ImageDisplayDriver.save(loaded_image_c12_85v)
# Загружаем фото u0.xcr
loaded_image_u0 = ReadDriver.image_binary_read(
'lesson2/', 'u0', '.jpg', '>H', 2048, 2500, 5120
)
# Обрабатываем
ImageModelDriver.rotate90(loaded_image_u0)
ImageModelDriver.grayscale(loaded_image_u0)
ImageDisplayDriver.save(loaded_image_u0)
ImageModelDriver.resize(loaded_image_u0, Image.BILINEAR, 0.6)
ImageDisplayDriver.save(loaded_image_u0)
| StarcoderdataPython |
1646856 | <gh_stars>1-10
# coding=utf-8
"""Testing the NetworkBilling view.
Usage:
$ python manage.py test endagaweb.NetworkNotification
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from django import test
from endagaweb import models
from endagaweb.util import api
from endagaweb.views import network
from settings import BTS_LANGUAGES
class NetworkNotification(test.TestCase):
"""Testing the view's ability to convert credits.
Some day this may be more complex as we take into account the network
setting for subscriber currency.
"""
# user = notification = user_profile = bts = subscriber = None
@classmethod
def setUpClass(cls):
cls.username = 'y'
cls.password = 'pw'
cls.user = models.User(username=cls.username, email='<EMAIL>')
cls.user.set_password(<PASSWORD>)
cls.user.is_superuser = True
cls.user.save()
cls.user_profile = models.UserProfile.objects.get(user=cls.user)
cls.bts = models.BTS(uuid="12345abcd", nickname="testbts",
inbound_url="http://localhost/test",
network=cls.user_profile.network,
locale='en')
cls.bts.save()
cls.subscriber_imsi = 'IMSI000123'
cls.subscriber_num = '5551234'
cls.subscriber_role = 'Subscriber'
cls.subscriber = models.Subscriber.objects.create(
name='test-name', imsi=cls.subscriber_imsi,
role=cls.subscriber_role, network=cls.bts.network, bts=cls.bts)
cls.subscriber.save()
message = 'the quick brown fox jumped over the lazy dog'
translated = api.translate(message, to_lang='tl', from_lang='auto')
cls.notification = models.Notification(
network=cls.user_profile.network, event='Test Event',
message=message, type='automatic', language='tl',
translation=translated)
cls.notification.save()
@classmethod
def tearDownClass(cls):
"""Deleting the objects created for the tests."""
cls.user.delete()
cls.user_profile.delete()
cls.bts.delete()
cls.subscriber.delete()
cls.notification.delete()
def login(self):
"""Log the client in."""
data = {
'email': self.user,
'password': <PASSWORD>,
}
self.client.post(reverse('auth-and-login'), data)
def logout(self):
"""Log the client out."""
self.client.get(reverse('logout'))
def setUp(self):
self.view = network.NetworkNotifications()
self.manage = network.NetworkNotificationsEdit()
def test_translation(self):
languages = ['es', 'id', 'tl']
message = 'the quick brown fox jumped over the lazy dog'
translation = {
# filipino
'tl':
'ang mabilis na brown na lobo ay tumalon sa tamad na aso',
# Spanish
'es': 'el rápido zorro marrón saltó sobre el perro perezoso',
# Indonesian
'id':
'Rubah coklat cepat melompati anjing malas itu'
}
for language in languages:
translated = api.translate(message, to_lang=language)
self.assertEqual(translated, translation[language].decode('utf-8'))
def test_notification_exists(self):
translation = {
# filipino
'tl':
'ang mabilis na brown na lobo ay tumalon sa tamad na aso'
}
event = models.Notification.objects.get(
network=self.user_profile.network, language='tl',
translation=translation['tl']
)
self.assertEqual(event, self.notification)
def test_dashboard_notification_request_unauth(self):
self.logout()
response = self.client.get(reverse('network-notifications'))
self.assertEqual(302, response.status_code)
def test_dashboard_notification_request_auth(self):
self.login()
response = self.client.get(reverse('network-notifications'))
self.assertEqual(200, response.status_code)
def test_notification_created(self):
self.login()
message = 'the quick brown fox jumped over the lazy dog test'
translate = api.multiple_translations(message, *BTS_LANGUAGES)
params = {
'type': 'mapped',
'number': '123',
'message': message,
'pk': 0,
}
for language in translate:
params['lang_'+language] = translate[language]
self.client.post(reverse('network-notifications-manage'), params)
self.assertEqual(len(BTS_LANGUAGES), models.Notification.objects.filter(
network=self.user_profile.network, event='123').count())
def test_notification_deleted(self):
self.login()
event_id = models.Notification.objects.filter(
network=self.user_profile.network).values_list('id', flat=True)
delete_event = {'id': event_id}
self.client.post(reverse('network-notifications-manage'),
delete_event)
# delete all events for that network
self.assertEqual(0, models.Notification.objects.filter(
network=self.user_profile.network).count())
| StarcoderdataPython |
6462719 | <reponame>utsw-bicf/gudmap_rbk.rna-seq
#!/usr/bin/env python3
import argparse
import pandas as pd
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-r', '--repRID', help="The replicate RID.", required=True)
parser.add_argument('-m', '--metaFile',
help="The metadata file to extract.", required=True)
parser.add_argument('-p', '--parameter',
help="The parameter to extract.", required=True)
args = parser.parse_args()
return args
def main():
args = get_args()
metaFile = pd.read_csv(args.metaFile, sep=",", header=0)
# Check replicate RID metadata from 'File.csv'
if (args.parameter == "repRID"):
if (len(metaFile.Replicate_RID.unique()) > 1):
print("There are multiple replicate RID's in the metadata: " +
" ".join(metaFile.Replicate_RID.unique()))
exit(1)
if not (metaFile.Replicate_RID.unique() == args.repRID):
print("Replicate RID in metadata does not match run parameters: " +
metaFile.Replicate_RID.unique() + " vs " + args.repRID)
exit(1)
else:
rep = metaFile["Replicate_RID"].unique()[0]
print(rep)
# Check experiment RID metadata from 'Experiment.csv'
if (args.parameter == "expRID"):
if (len(metaFile.Experiment_RID.unique()) > 1):
print("There are multiple experiment RID's in the metadata: " +
" ".join(metaFile.Experiment_RID.unique()))
exit(1)
else:
exp = metaFile["Experiment_RID"].unique()[0]
print(exp)
# Check study RID metadata from 'Experiment.csv'
if (args.parameter == "studyRID"):
if (len(metaFile.Study_RID.unique()) > 1):
print("There are multiple study RID's in the metadata: " +
" ".join(metaFile.Study_RID.unique()))
exit(1)
else:
study = metaFile["Study_RID"].unique()[0]
print(study)
# Get endedness metadata from 'Experiment Settings.csv'
if (args.parameter == "endsMeta"):
endsMeta = metaFile.Paired_End.unique()[0]
print(endsMeta)
# Get strandedness metadata from 'Experiment Settings.csv'
if (args.parameter == "stranded"):
stranded = metaFile.Strandedness.unique()[0]
print(stranded)
# Get spike-in metadata from 'Experiment Settings.csv'
if (args.parameter == "spike"):
spike = metaFile.Used_Spike_Ins.unique()[0]
print(spike)
# Get species metadata from 'Experiment.csv'
if (args.parameter == "species"):
species = metaFile.Species.unique()[0]
print(species)
# Get read length metadata from 'Experiment Settings.csv'
if (args.parameter == "readLength"):
readLength = metaFile.Read_Length.unique()
print(str(readLength).strip('[]'))
if __name__ == '__main__':
main()
| StarcoderdataPython |
11308302 | from memory.space import Bank, Reserve, Allocate, Write
import instruction.asm as asm
class Steal:
def __init__(self, rom, args):
self.rom = rom
self.args = args
def enable_steal_chances_always(self):
#Always steal if the enemy has an item.
# If the enemy has both rare and common, the rare item will be stolen 3/8 of the time.
space = Reserve(0x0239b7, 0x0239e4, "steal common vs rare logic", asm.NOP())
space.add_label("SPACE_END", space.end_address + 1)
space.write(
asm.PHY(),
asm.PHX(),
asm.LDX(0x3308, asm.ABS_Y), # x = rare item
asm.INY(),
asm.LDA(0x3308, asm.ABS_Y), # a = common item
asm.TAY(), # y = common item
asm.CPX(0xff, asm.IMM8), # rare steal exists?
asm.BEQ("STEAL_COMMON"), # if not, steal common item
asm.CPY(0xff, asm.IMM8), # common steal exists?
asm.BEQ("STEAL_RARE"), # if not, steal rare item
asm.JSR(0x4b5a, asm.ABS), # a = random number between 0 and 255
asm.CMP(0x60, asm.IMM8), # compare with 96
asm.BLT("STEAL_RARE"), # if a < 96 then steal rare item
"STEAL_COMMON",
asm.TYA(), # a = common item
asm.BRA("STEAL_ITEM"), # steal common item
"STEAL_RARE",
asm.TXA(), # a = rare item
"STEAL_ITEM",
asm.PLX(),
asm.PLY(),
asm.BRA("SPACE_END"), # skip nops
)
def enable_steal_chances_higher(self):
# Increase the Constant added to Attacker's Level from 50 (0x32) to 90 (0x5A)
# Effectively increases chance of stealing for same-level targets from 50% to 90%
# Reference on Steal function (starts at C2 399E):
# StealValue = Attacker's level + Constant - Target's level
# If Thief Glove equipped: StealValue *= 2
# If StealValue <= 0 then steal fails
# If StealValue >= 128 then you automatically steal
# If StealValue < Random Value (0-99), then you fail to steal
# Else Steal is successful
space = Reserve(0x239BB, 0x239BB, "steal value constant")
space.write(0x5A) # default: 0x32
# Increase the Rare Steal Constant from 32 (0x20) to 96 (0x60)
# Effectively increases probably of stealing a rare item from 1/8 to 3/8
# Occurs after the StealValue calculation above
# Reference on Rare Steals formula (starts at C2 39DB):
# Load Rare Item into Item-to-Steal slot
# If Rare Steal Constant > Random Value (0-255), <- this occurs 7/8 of the time
# load Common item into Item-to-Steal slot instead
# If Item-to-Steal is not empty, acquire it and set both Common and Rare Items to empty
# Else Fail to steal
space = Reserve(0x239DD, 0x239DD, "rare steal constant")
space.write(0x60) # default: 0x20
def mod(self):
if self.args.steal_chances_higher:
self.enable_steal_chances_higher()
elif self.args.steal_chances_always:
self.enable_steal_chances_always()
def write(self):
if self.args.spoiler_log:
self.log()
def log(self):
pass
| StarcoderdataPython |
3515729 | <filename>planet/scripts/tasks.py
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import numpy as np
from planet import control
from planet import networks
from planet import tools
Task = collections.namedtuple(
'Task', 'name, env_ctor, max_length, state_components')
def cartpole_balance(config, params):
action_repeat = params.get('action_repeat', 8)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'cartpole', 'balance')
return Task('cartpole_balance', env_ctor, max_length, state_components)
def cartpole_swingup(config, params):
action_repeat = params.get('action_repeat', 8)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'cartpole', 'swingup')
return Task('cartpole_swingup', env_ctor, max_length, state_components)
def finger_spin(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity', 'touch']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'finger', 'spin')
return Task('finger_spin', env_ctor, max_length, state_components)
def cheetah_run(config, params):
action_repeat = params.get('action_repeat', 4)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'cheetah', 'run')
return Task('cheetah_run', env_ctor, max_length, state_components)
def cup_catch(config, params):
action_repeat = params.get('action_repeat', 6)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'ball_in_cup', 'catch')
return Task('cup_catch', env_ctor, max_length, state_components)
def walker_walk(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = ['reward', 'height', 'orientations', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'walker', 'walk')
return Task('walker_walk', env_ctor, max_length, state_components)
def humanoid_walk(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = [
'reward', 'com_velocity', 'extremities', 'head_height', 'joint_angles',
'torso_vertical', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'humanoid', 'walk')
return Task('humanoid_walk', env_ctor, max_length, state_components)
def gym_cheetah(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = ['reward', 'state']
env_ctor = functools.partial(
_gym_env, action_repeat, config.batch_shape[1], max_length,
'HalfCheetah-v3')
return Task('gym_cheetah', env_ctor, max_length, state_components)
def gym_racecar(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = ['reward']
env_ctor = functools.partial(
_gym_env, action_repeat, config.batch_shape[1], max_length,
'CarRacing-v0', obs_is_image=True)
return Task('gym_racing', env_ctor, max_length, state_components)
def gym_sokoban(config, params):
action_repeat = params.get('action_repeat', 1)
max_length = 1000 // action_repeat
state_components = ['reward']
env_ctor = functools.partial(
_gym_env, action_repeat, config.batch_shape[1], max_length,
'Sokoban-small-v0', act_is_discrete=True, obs_is_image=True)
return Task('gym_sokoban', env_ctor, max_length, state_components)
def gym_atari(config, params):
game_name = params.get('game_name', 'Freeway-v0')
action_repeat = params.get('action_repeat', 4)
max_length = 2000 // action_repeat
state_components = ['reward']
env_ctor = functools.partial(
_gym_env, action_repeat, config.batch_shape[1], max_length,
game_name, act_is_discrete=True, obs_is_image=True)
return Task('gym_' + game_name, env_ctor, max_length, state_components)
def _dm_control_env(action_repeat, max_length, domain, task):
from dm_control import suite
env = control.wrappers.DeepMindWrapper(suite.load(domain, task), (64, 64))
env = control.wrappers.ActionRepeat(env, action_repeat)
env = control.wrappers.MaximumDuration(env, max_length)
env = control.wrappers.PixelObservations(env, (64, 64), np.uint8, 'image')
env = control.wrappers.ConvertTo32Bit(env)
return env
def _gym_env(action_repeat, min_length, max_length, name,
act_is_discrete=False, obs_is_image=False):
if "Sokoban" in name:
import gym_sokoban
import gym
env = gym.make(name)
env = env.env # Remove the TimeLimit wrapper
env.frameskip = 1 # Disable Gym frame skipping
env = control.wrappers.ActionRepeat(env, action_repeat)
if act_is_discrete:
env = control.wrappers.DiscreteWrapper(env)
else:
env = control.wrappers.NormalizeActions(env)
env = control.wrappers.MinimumDuration(env, min_length)
env = control.wrappers.MaximumDuration(env, max_length)
if obs_is_image:
env = control.wrappers.ObservationDict(env, 'image')
env = control.wrappers.ObservationToRender(env)
else:
env = control.wrappers.ObservationDict(env, 'state')
env = control.wrappers.PixelObservations(env, (64, 64), np.uint8, 'image')
env = control.wrappers.ConvertTo32Bit(env)
return env
| StarcoderdataPython |
3518214 | <reponame>devaos/sublime-remote<gh_stars>1-10
# -*- coding: utf-8 -*-
import sys
import unittest
import env
import mocks.sublime
sys.modules["sublime"] = mocks.sublime.MockSublime()
import remote.sublime_api as sublime_api
class TestSublimeHelperFunctions(unittest.TestCase):
def test_active_project_bad_args(self):
w = mocks.sublime.MockWindow()
ret = sublime_api.project_by_path(w, None)
self.assertEqual(ret, None, "Bad arguments should return nothing")
def test_update_project_settings(self):
w = mocks.sublime.MockWindow()
w.set_project_data({"folders": [{"path": "/a"}]})
ret = sublime_api.update_project_settings(w, "/a/b", {"a": "b"})
self.assertIsNot(ret, None, "Settings should change")
self.assertDictEqual(ret, {"a": "b", "path": "/a"},
"Settings should match")
suite = unittest.TestLoader().loadTestsFromTestCase(TestSublimeHelperFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
| StarcoderdataPython |
11381360 | <gh_stars>0
import numpy as np
from scipy.special import gamma
from skimage import color
class MLVMeasurement():
def __init__(self):
self.gam = np.linspace(0.2,10,9801)
def __estimateggdparam(self,vec):
gam = self.gam
r_gam = (gamma(1/gam)*gamma(3/gam))/((gamma(2/gam)) ** 2)
sigma_sq = np.mean(vec ** 2)
sigma = np.sqrt(sigma_sq)
return sigma
def __MLVMap(self,img):
xs, ys = img.shape
x=img
x1=np.zeros((xs,ys))
x2=np.zeros((xs,ys))
x3=np.zeros((xs,ys))
x4=np.zeros((xs,ys))
x5=np.zeros((xs,ys))
x6=np.zeros((xs,ys))
x7=np.zeros((xs,ys))
x8=np.zeros((xs,ys))
x9=np.zeros((xs,ys))
x1[0:xs-2,0:ys-2] = x[1:xs-1,1:ys-1]
x2[0:xs-2,1:ys-1] = x[1:xs-1,1:ys-1]
x3[0:xs-2,2:ys] = x[1:xs-1,1:ys-1]
x4[1:xs-1,0:ys-2] = x[1:xs-1,1:ys-1]
x5[1:xs-1,1:ys-1] = x[1:xs-1,1:ys-1]
x6[1:xs-1,2:ys] = x[1:xs-1,1:ys-1]
x7[2:xs,0:ys-2] = x[1:xs-1,1:ys-1]
x8[2:xs,1:ys-1] = x[1:xs-1,1:ys-1]
x9[2:xs,2:ys] = x[1:xs-1,1:ys-1]
x1=x1[1:xs-1,1:ys-1]
x2=x2[1:xs-1,1:ys-1]
x3=x3[1:xs-1,1:ys-1]
x4=x4[1:xs-1,1:ys-1]
x5=x5[1:xs-1,1:ys-1]
x6=x6[1:xs-1,1:ys-1]
x7=x7[1:xs-1,1:ys-1]
x8=x8[1:xs-1,1:ys-1]
x9=x9[1:xs-1,1:ys-1]
dd=[]
dd.append(x1-x5)
dd.append(x2-x5)
dd.append(x3-x5)
dd.append(x4-x5)
dd.append(x6-x5)
dd.append(x7-x5)
dd.append(x8-x5)
dd.append(x9-x5)
map = np.max(dd,axis=0)
return map
def getScore(self,x):#x should be double gray image
if(x.ndim == 3):#color
x = color.rgb2gray(x)
map = self.__MLVMap(x)
xs,ys = map.shape
xy_number=xs*ys
vec = map.reshape((xy_number,))
vec[::-1].sort()#descend
svec=vec[0:xy_number]
a=np.arange(xy_number)
q=np.exp(-0.01*a)
svec=svec*q
svec=svec[0:1000]
return self.__estimateggdparam(svec) | StarcoderdataPython |
201031 | import Encoding
# import SPIMITOOL
def test_encoding():
gaps = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
encoder = Encoding.Encoding()
en_gamma_gaps = encoder.run(gaps, "g")
en_delta_gaps = encoder.run(gaps, "d")
print("gap gamma encode delta encode")
for i in gaps:
print(str(i) + " " + str(encoder.gamma_encoding(i)) + " " + str(encoder.delta_encoding(i)), end="")
print()
def test_spimitool():
dir_path = 'D:\\Classes\\CSS6322 Information Retrival\\HW1\Token\\data\\'
SPIMITOOL.SPIMI(dir_path, output_file="index.txt", block_size=50000)
def test_block_compression():
dictionary = {}
# term, df, postings ptr
dictionary = {"automation": [3, 5], "automatic": [5, 6], "autograph": [8, 90], "NASA": [10, 34],
"housekeeper": [2, 495], "household": [48, 95], "houseboat": [3, 48]}
encoder = Encoding.Encoding()
compressed_dictionary = encoder.blocked_compression(dictionary, 8)
print()
def test_front_coding_compression():
dictionary = {}
# term, df, postings ptr
dictionary = {"automation": [3, 5], "automatic": [5, 6], "NASA": [10, 34],
"housekeeper": [2, 495], "household": [48, 95], "autograph": [8, 90], "houseboat": [3, 48]}
for data in files:
dictionary[data[2]] = [data[0], data[1]]
encoder = Encoding.Encoding()
compressed_dictionary = encoder.front_coding_compression(sorted(dictionary), 3, 4)
y = 5
def test_max():
my_dict = {'x': [1, [1, 5, 140]], 'y': [1, [1, 5, 140]], 'z': [1, [1, 3, 140]]}
my_dict2 = {'x': 1, 'b': 2, "c": 2}
array = [[1, 4, 5], [1, 5, 5], [1, 6, 5], [1, 6, 5]]
ke = max(zip(my_dict.values(), my_dict.keys()))
#
# key_max = max(my_dict.keys(), key=(lambda k: my_dict[k][1][1]))
#
# key_max2 = max(my_dict2.keys(), key=(lambda k: my_dict2[k]))
def test():
string = "Python is interesting."
with open("test.bin", "wb") as file:
file.write(bytearray(string, 'utf-8'))
def test_group_term_by_front_code():
encoder = Encoding.Encoding()
dictionary = {"automation": [3, 5], "automatic": [5, 6], "NASA": [10, 34],
"housekeeper": [2, 495], "household": [48, 95], "autograph": [8, 90], "houseboat": [3, 48]}
sorted_term_list = []
for term in sorted(dictionary, key=lambda kv: kv[0]):
sorted_term_list.append(term)
subgrouped_list = encoder.group_term_by_front_code(sorted_term_list, 4)
compressed_index = encoder.compress_sub_group(subgrouped_list, 4, 3)
print()
test_group_term_by_front_code()
| StarcoderdataPython |
155841 | <gh_stars>1-10
from time import sleep
loop = False
while not loop:
n1 = float(input('Primeira nota: '))
n2 = float(input('Segunda nota: '))
m = (n1 + n2) / 2
if n1 <= 10 and n2 <= 10:
print(f'Sua média foi de: {m}')
if m < 5:
print(f'\033[31mREPROVADO!!\033[31m\n')
elif m >= 7:
print(f'\033[32mAPROVADO!!\033[32m\n')
else:
print(f'\033[33mRECUPERAÇÃO!!\033[33m\n')
loop = True
elif (n1 > 10 or n2 > 10) or (n1 > 10 and n2 > 10):
print('Valores não permitidos!!! RENICIANDO...')
sleep(1)
| StarcoderdataPython |
1711726 | #!/usr/bin/env python
import sys
from setuptools import find_packages, setup
setup(
name='kite.metrics',
version='0.1.0',
author='<NAME>.',
description='Kite Metrics',
packages=find_packages(),
install_requires=[
"jinja2>=2",
"PyYAML>=5",
"click>=7",
],
entry_points = {
'console_scripts': ['kite-metrics-schemas=kite_metrics.json_schema:main'],
},
python_requires='>=3.6',
include_package_data = True,
)
| StarcoderdataPython |
11222323 | """Console entrypoint for creating PCR primers"""
import argparse
import sys
from typing import List
from . import __version__, primers, Primer
from .primers import PRIMER_FMT
def run():
"""Entry point for console_scripts.
Create primers and log the results.
"""
args = parse_args(sys.argv[1:])
fwd, rev = primers(
args.seq,
add_fwd=args.f,
add_fwd_len=tuple(args.fl),
add_rev=args.r,
add_rev_len=tuple(args.rl),
offtarget_check=args.t,
)
print(PRIMER_FMT.format("dir", "tm", "ttm", "dg", "pen", "seq"))
print(fwd)
print(rev)
def parse_args(args: List[str]) -> argparse.Namespace:
"""Parse command line parameters.
Created and based on an example from pyscaffold:
https://github.com/pyscaffold/pyscaffold/blob/master/src/pyscaffold/templates/skeleton.template
Args:
args ([str]): List of parameters as strings
Returns:
`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="""Create PCR primers for a DNA sequence.
Logs the FWD and REV primer with columns:
dir, tm, ttm, dg, pen, seq
Where:
dir = FWD or REV.
tm = Melting temperature of the annealing/binding part of the primer (Celsius).
ttm = The total melting temperature of the primer with added seq (Celsius).
dg = The minimum free energy of the primer (kcal/mol).
pen = The primer's penalty score. Lower is better.
seq = The sequence of the primer in the 5' to the 3' direction.
""",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("seq", type=str, metavar="SEQ", help="DNA sequence")
parser.add_argument(
"-f",
type=str,
help="additional sequence to add to FWD primer (5' to 3')",
default="",
metavar="SEQ",
)
parser.add_argument(
"-fl",
type=int,
nargs=2,
help="space separated min-max range for the length to add from '-f' (5' to 3')",
default=[-1, -1],
metavar="INT",
)
parser.add_argument(
"-r",
type=str,
help="additional sequence to add to REV primer (5' to 3')",
default="",
metavar="SEQ",
)
parser.add_argument(
"-rl",
type=int,
nargs=2,
help="space separated min-max range for the length to add from '-r' (5' to 3')",
default=[-1, -1],
metavar="INT",
)
parser.add_argument(
"-t",
type=str,
help="sequence to check for offtargets binding sites",
default="",
metavar="SEQ",
)
parser.add_argument(
"--version", action="version", version="seqfold {ver}".format(ver=__version__)
)
return parser.parse_args(args)
if __name__ == "__main__":
run()
| StarcoderdataPython |
6612390 | <reponame>riihikallio/tsoha
from flask import render_template
from flask_login import login_required
from application import app
from application.reports.models import sales_by_category, sales_by_customer
@app.route("/reports/", methods=["GET"])
@login_required
def reports():
return render_template("reports/show.html", cat=sales_by_category(), cust=sales_by_customer())
| StarcoderdataPython |
1867172 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import migrate_sql.operations
class Migration(migrations.Migration):
dependencies = [
('test_app', '0003_auto_20160108_0048'),
('test_app2', '0001_initial'),
]
operations = [
migrate_sql.operations.ReverseAlterSQL(
name='sale',
sql='DROP TYPE sale',
reverse_sql='CREATE TYPE sale AS (arg1 int); -- 1',
),
migrate_sql.operations.AlterSQL(
name='sale',
sql='CREATE TYPE sale AS (arg1 int, arg2 int); -- 2',
reverse_sql='DROP TYPE sale',
),
]
| StarcoderdataPython |
4818627 | <reponame>guptav96/DeepRL<filename>examples.py
#######################################################################
# Copyright (C) 2017 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from deep_rl import *
torch.set_printoptions(profile="full")
np.set_printoptions(threshold=1000000)
def dqn_pixel(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
kwargs.setdefault('n_step', 1)
kwargs.setdefault('replay_cls', UniformReplay)
kwargs.setdefault('async_replay', True)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.00025/4, eps=1.5e-04)
config.network_fn = lambda: VanillaNet(config.action_dim, NatureConvBody(in_channels=config.history_length))
config.random_action_prob = LinearSchedule(1.0, 0.1, 1e6)
config.batch_size = 32
config.discount = 0.99
config.history_length = 4
config.max_steps = int(2.5e7)
replay_kwargs = dict(
memory_size=int(1e6),
batch_size=config.batch_size,
n_step=config.n_step,
discount=config.discount,
history_length=config.history_length,
)
config.replay_fn = lambda: ReplayWrapper(config.replay_cls, replay_kwargs, config.async_replay)
config.replay_eps = 0.01
config.replay_alpha = 0.6
config.replay_beta = LinearSchedule(0.4, 1.0, config.max_steps)
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.target_network_update_freq = 10000
config.exploration_steps = 20000
config.sgd_update_frequency = 4
config.gradient_clip = 10
config.double_q = True
config.async_actor = False
run_steps(DQNAgent(config))
def bdqn_pixel(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
kwargs.setdefault('n_step', 1)
kwargs.setdefault('replay_cls', UniformReplay)
kwargs.setdefault('async_replay', True)
config = Config()
config.merge(kwargs)
config.tune = False
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.0025, eps=1.5e-04)
config.network_fn = lambda: BDQNNet(NatureConvBody(in_channels=config.history_length))
config.batch_size = 32
config.discount = 0.99
config.history_length = 4
config.max_steps = int(2e7)
replay_kwargs = dict(
memory_size=int(1e6),
batch_size=config.batch_size,
n_step=config.n_step,
discount=config.discount,
history_length=config.history_length,
)
config.replay_fn = lambda: ReplayWrapper(config.replay_cls, replay_kwargs, config.async_replay)
config.replay_eps = 0.01
config.replay_alpha = 0.6
config.replay_beta = LinearSchedule(0.4, 1.0, config.max_steps)
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.target_network_update_freq = 10000
config.exploration_steps = 20000
config.sgd_update_frequency = 4
config.thompson_sampling_freq = 1000
config.bdqn_learn_frequency = 50000
config.prior_var = 0.001
config.noise_var = 1
config.var_k = 0.001
config.gradient_clip = 10
config.double_q = True
config.async_actor = False
run_steps(BDQNAgent(config))
if __name__ == '__main__':
mkdir('log')
mkdir('tf_log')
set_one_thread()
random_seed()
# select_device(-1) # CPU
select_device(1)
game = 'AssaultNoFrameskip-v4'
# dqn_pixel(game=game, n_step=1, replay_cls=PrioritizedReplay, async_replay=True, run=0, remark='prer')
bdqn_pixel(game=game, n_step=1, replay_cls=PrioritizedReplay, async_replay=True, run=0, remark='bdqnprer')
| StarcoderdataPython |
6447148 | <filename>aws-py-dynamicresource/mysql_dynamic_provider.py
# Copyright 2016-2020, Pulumi Corporation. All rights reserved.
import mysql.connector as connector
from mysql.connector import errorcode
from pulumi import Input, Output, ResourceOptions
from pulumi.dynamic import *
from typing import Any, Optional
import binascii
import os
# A class representing the arguments that the dynamic provider needs. Each argument
# will automatically be converted from Input[T] to T before being passed to the
# functions in the provider
class SchemaInputs(object):
creator_name: Input[str]
creator_password: Input[str]
server_address: Input[str]
database_name: Input[str]
creation_script: Input[str]
deletion_script: Input[str]
def __init__(self, creator_name, creator_password, server_address, database_name, creation_script, deletion_script):
self.creator_name = creator_name
self.creator_password = <PASSWORD>
self.server_address = server_address
self.database_name = database_name
self.creation_script = creation_script
self.deletion_script = deletion_script
# The code for the dynamic provider that gives us our custom resource. It handles
# all the create, read, update, and delete operations the resource needs.
class SchemaProvider(ResourceProvider):
# The function that is called when a new resource needs to be created
def create(self, args):
# A connection is created to the MySQL database, and the script is run
connection = connector.connect(user=args["creator_name"],
password=args["<PASSWORD>"],
host=args["server_address"],
database=args["database_name"])
cursor = connection.cursor()
cursor.execute(args["creation_script"], multi=True)
# The creation process is finished. We assign a unique ID to this resource,
# and return all the outputs required by the resource (in this case
# outputs are identical to the inputs)
return CreateResult("schema-"+binascii.b2a_hex(os.urandom(16)).decode("utf-8"), outs=args)
# The function that is called when an existing resource needs to be deleted
def delete(self, id, args):
# A connection is created to the MySQL database, and the script is run
connection = connector.connect(user=args["creator_name"],
password=args["<PASSWORD>"],
host=args["server_address"],
database=args["database_name"])
cursor = connection.cursor()
cursor.execute(args["deletion_script"])
# The function that determines if an existing resource whose inputs were
# modified needs to be updated or entirely replaced
def diff(self, id, old_inputs, new_inputs):
# server_address, database_name, and creation_script are critical inputs
# that require the resource to be entirely replaced if they are modified.
# Changes in other inputs mean the resource can be safely updated without
# recreating it
replaces = []
if (old_inputs["server_address"] != new_inputs["server_address"]): replaces.append("server_address")
if (old_inputs["database_name"] != new_inputs["database_name"]): replaces.append("database_name")
if (old_inputs["creation_script"] != new_inputs["creation_script"]): replaces.append("creation_script")
return DiffResult(
# If the old and new inputs don't match, the resource needs to be updated/replaced
changes=old_inputs != new_inputs,
# If the replaces[] list is empty, nothing important was changed, and we do not have to
# replace the resource
replaces=replaces,
# An optional list of inputs that are always constant
stables=None,
# The existing resource is deleted before the new one is created
delete_before_replace=True)
# The function that updates an existing resource without deleting and
# recreating it from scratch
def update(self, id, old_inputs, new_inputs):
# The old existing inputs are discarded and the new inputs are used
return UpdateResult(outs={**new_inputs})
# The main Schema resource that we instantiate in our infrastructure code
class Schema(Resource):
# The inputs used by the dynamic provider are made implicitly availible as outputs
creator_name: Output[str]
creator_password: Output[str]
server_address: Output[str]
database_name: Output[str]
creation_script: Output[str]
deletion_script: Output[str]
def __init__(self, name: str, args: SchemaInputs, opts = None):
# NOTE: The args object is converted to a dictionary using vars()
super().__init__(SchemaProvider(), name, vars(args), opts)
| StarcoderdataPython |
1898817 | from __future__ import absolute_import
import os
import sys
from dateutil import parser
from threading import Thread
from anyjson import dumps, loads
from amqp.protocol import queue_declare_ok_t
from kombu.exceptions import ChannelError
from kombu.five import Empty, Queue
from kombu.log import get_logger
from kombu.transport import virtual, base
from kombu.utils import cached_property, uuid
from kombu.utils.compat import OrderedDict
import google.auth
from google.cloud import pubsub_v1
from google.cloud import tasks_v2
from google.protobuf import timestamp_pb2
from google.api_core.exceptions import AlreadyExists, DeadlineExceeded
logger = get_logger(__name__)
class Worker(Thread):
''' Worker thread '''
def __init__(
self, client, subscription_path, max_messages,
queue, return_immediately):
Thread.__init__(self)
self.subscriber = client
self.subscription_path = subscription_path
self.queue = queue
self.max_messages = max_messages
self.return_immediately = return_immediately
self.start()
def callback(self, msg):
self.queue.put(msg, block=True)
def run(self):
''' run '''
while True:
logger.info("".join(["Pulling messsage using subscription ",
self.subscription_path]))
try:
resp =\
self.subscriber.pull(
self.subscription_path, self.max_messages,
return_immediately=self.return_immediately)
except (ValueError, DeadlineExceeded):
continue
if resp.received_messages:
for msg in resp.received_messages:
self.queue.put(msg, block=True)
class Message(base.Message):
def __init__(self, channel, msg, **kwargs):
body, props = self._translate_message(msg)
super(Message, self).__init__(
channel,
body=body,
delivery_tag=msg.message.message_id,
content_type=props.get('content_type'),
content_encoding=props.get('content_encoding'),
delivery_info=props.get('delivery_info'),
properties=props,
headers=props.get('headers') or {},
**kwargs)
def _translate_message(self, raw_message):
serialized = loads(raw_message.message.data)
properties = {
'headers': serialized['headers'],
'content_type': serialized['content-type'],
'reply_to': serialized['properties']['reply_to'],
'correlation_id': serialized['properties']['correlation_id'],
'delivery_mode': serialized['properties']['delivery_mode'],
'delivery_info': serialized['properties']['delivery_info'],
'content_encoding': serialized['content-encoding']
}
return serialized['body'], properties
def ack(self):
""""Send an acknowledgement of the message consumed
"""
self.channel.basic_ack(self.delivery_tag)
class QoS(virtual.QoS):
def __init__(self, channel):
super(QoS, self).__init__(channel, 1)
self._channel = channel
self._not_yet_acked = OrderedDict()
def append(self, delivery_tag, message):
"""Append message to consume
:param delivery_tag: delivery tag for message
:type body: str
:keyword message: The message received from the queue.
:type encoding: str
"""
self._not_yet_acked[delivery_tag] = message
def ack(self, delivery_tag):
"""Send an acknowledgement of the message consumed
:param delivery_tag: delivery tag for message
:type body: str
"""
message, subscription_path = self._not_yet_acked.pop(delivery_tag)
self._channel.subscriber.\
acknowledge(subscription_path, [message.ack_id])
class Channel(virtual.Channel):
QoS = QoS
Message = Message
TOPIC_PATH = "projects/{}/topics/{}"
SUBSCRIPTION_NAME = "projects/{}/subscriptions/{}"
def __init__(self, *args, **kwargs):
super(Channel, self).__init__(*args, **kwargs)
self._queue_cache = {}
self.temp_cache = {}
def _get_topic_path(self, exchange):
return self.TOPIC_PATH.format(self.project_id, exchange)
def _get_subscription_name(self, subscription):
return self.SUBSCRIPTION_NAME.format(self.project_id, subscription)
def _new_queue(self, queue, **kwargs):
"""Create a new subscription in gcp
:param queue: queue name
:type body: str
:return: subscription_path
:rtype: str
"""
if 'pid' in queue:
queue = queue.replace("@", ".")
try:
return self._queue_cache[queue]
except KeyError:
subscription_path =\
self._get_subscription_name(queue)
self._queue_cache[queue] = subscription_path
return subscription_path
def _get(self, queue):
"""Get a message from queue
The message is pulled from PubSub.
To boost the consumption of messages the cache size
might be adjusted to pull multiple messages at once
by adjusting MAX_MESSAGES.
:param queue: queue name
:type body: str
:return: message
:rtype: Message
"""
if queue in self.ignored_queues:
raise Empty()
subscription_path = self._new_queue(queue)
return getattr(self,
self._execution_type() + "_msg_get")(subscription_path)
def _concurrent_msg_get(self, subscription_path):
if not self.temp_cache[subscription_path].empty():
msg = self.temp_cache[subscription_path].get(block=True)
self.qos.append(
msg.message.message_id, (msg, subscription_path))
return msg
raise Empty()
def _msg_get(self, subscription_path):
if not self.temp_cache[subscription_path].empty():
return self.temp_cache[subscription_path].get(block=True)
logger.info("".join([
"Pulling messsage using subscription ", subscription_path]))
resp = self.subscriber.pull(
subscription_path, self.max_messages,
return_immediately=self.return_immediately)
if resp.received_messages:
for msg in resp.received_messages:
if self.temp_cache[subscription_path].full():
break
self.qos.append(msg.message.message_id,
(msg, subscription_path))
self.temp_cache[subscription_path].put(msg)
return self.temp_cache[subscription_path].get(block=True)
raise Empty()
def queue_declare(self, queue=None, passive=False, *args, **kwargs):
"""Create a new subscription
:param queue: queue name
:type body: str
:return: message
:rtype: Message
"""
queue = queue or 'gcp.gen-%s' % uuid()
# TODO: need to check case when passive is True
if passive:
raise ChannelError(
'NOT FOUND - no queue {} in host {}'.format(
queue, self.connection.client.virtual_host or '/'),
(50, 10), 'Channel.queue_declare', '404')
self._new_queue(queue, **kwargs)
return queue_declare_ok_t(queue, self._size(queue), 0)
def queue_bind(self, *args, **kwargs):
"""Bind to a subscription
:param queue: queue name
:type body: str
:param exchange: exchange name
:type body: str
"""
subscription_path = self._new_queue(kwargs.get('queue'))
topic_path = self.state.exchanges[kwargs.get('exchange')]
try:
self.subscriber.create_subscription(
subscription_path, topic_path,
ack_deadline_seconds=self.ack_deadline_seconds)
logger.info("".join(["Created subscription: ", subscription_path]))
except AlreadyExists:
logger.info("".join(["Subscription already exists: ", subscription_path]))
pass
queue = Queue(maxsize=self.max_messages)
self.temp_cache[subscription_path] = queue
# if concurrent executions then start worker threads
if self._execution_type() == "_concurrent":
if kwargs.get('queue') in self.ignored_queues:
return
logger.info("".join([
"Starting worker: ", subscription_path,
" with queue size: ", str(self.max_messages)]))
Worker(
self.subscriber, subscription_path,
self.max_messages, queue, self.return_immediately)
def exchange_declare(self, exchange='', **kwargs):
"""Declare a topic in PubSub
:param exchange: queue name
:type body: str
"""
to_add = False
if exchange not in self.state.exchanges:
logger.info("".join(["Topic: ", exchange, " not found added in state"]))
topic_path = self._get_topic_path(exchange)
try:
logger.info("Creating new topic: " + exchange)
self.publisher.create_topic(topic_path)
to_add = True
except AlreadyExists:
to_add = True
except Exception as e:
raise ChannelError(
'{0} - no exchange {1!r} in vhost {2!r}'.format(
e.__str__(),
exchange,
self.connection.client.virtual_host or '/'),
(50, 10), 'Channel.exchange_declare', '404',
)
finally:
logger.info("".join(["adding topic: ", exchange, " to state"]))
if to_add:
self.state.exchanges[exchange] = topic_path
def basic_publish(self, message, exchange='', routing_key='',
mandatory=False, immediate=False, **kwargs):
"""Publish message to PubSub
:param message: message to publish
:type body: str
:param exchange: topic name
:type body: str
"""
if loads(message['body'])['eta']:
return self._create_cloud_task(exchange, message)
return self._publish(exchange, message, **kwargs)
def _publish(self, topic, message, **kwargs):
''' publish the message '''
topic_path =\
self.publisher.topic_path(self.project_id, topic)
message = dumps(message).encode('utf-8')
future = self.publisher.publish(
topic_path, message, **kwargs)
return future.result()
def _create_cloud_task(self, exchange, message):
''' send task to cloud task '''
eta = loads(message['body'])['eta']
task = self._get_task(eta, exchange, message)
return self.cloud_task.create_task(self.cloud_task_queue_path, task)
def _get_task(self, eta, exchange, message):
parsed_time = parser.parse(eta.strip())
ts = timestamp_pb2.Timestamp()
ts.FromDatetime(parsed_time)
return {
"http_request": {
"http_method": tasks_v2.enums.HttpMethod.POST,
"oidc_token": {
"service_account_email": self.service_account_email,
},
"headers": {"Content-type": "application/json"},
"url": self.transport_options.get("CLOUD_FUNCTION_PUBLISHER"),
"body": dumps({
'destination_topic': exchange,
'eta': eta,
'message': message
}).encode('utf-8'),
},
"name": self.cloud_task_queue_path + "/tasks/" + "_".join(
[exchange, uuid()]),
"schedule_time": ts,
}
def _execution_type(self):
if self.transport_options.get("CONCURRENT_PULLS", True):
return '_concurrent'
return ''
@cached_property
def publisher(self):
"""PubSub Publisher credentials"""
return pubsub_v1.PublisherClient()
@cached_property
def subscriber(self):
"""PubSub Subscriber credentials"""
return pubsub_v1.SubscriberClient()
@cached_property
def cloud_task(self):
""" Client connection for cloud task """
return tasks_v2.CloudTasksClient()
@cached_property
def return_immediately(self):
""" return immediately from pull request """
return self.transport_options.get("RETURN_IMMEDIATELY", True)
@cached_property
def service_account_email(self):
email = self.transport_options.get("SERVICE_ACCOUNT_EMAIL", None)
if email:
return email
creds, _ = google.auth.default()
return creds.service_account_email
@cached_property
def transport_options(self):
"""PubSub Transport sepcific configurations"""
return self.connection.client.transport_options
@cached_property
def project_id(self):
"""GCP Project ID"""
if not self.transport_options.get('PROJECT_ID', ''):
return os.getenv("GCP_PROJECT_ID")
return self.transport_options.get('PROJECT_ID', '')
@cached_property
def max_messages(self):
"""Maximum messages to pull into local cache"""
return self.transport_options.get('MAX_MESSAGES', 10)
@cached_property
def ack_deadline_seconds(self):
"""Deadline for acknowledgement from the time received.
This is notified to PubSub while subscribing from the client.
"""
return self.transport_options.get('ACK_DEADLINE_SECONDS', 60)
@cached_property
def cloud_task_queue_path(self):
""" Cloud task queue path """
return self.cloud_task.queue_path(
self.project_id, self.location, self.delayed_queue)
@cached_property
def location(self):
""" Cloud task queue location """
return self.transport_options.get('QUEUE_LOCATION', None)
@cached_property
def delayed_queue(self):
"""Delayed topic used to support delay messages in celery"""
return self.transport_options.get('DELAYED_QUEUE', None)
@cached_property
def ignored_queues(self):
""" Queues to ignore """
return self.transport_options.get('IGNORED_QUEUES', [])
class Transport(virtual.Transport):
Channel = Channel
state = virtual.BrokerState()
driver_type = 'gcp_pubsub'
driver_name = 'pubsub_v1'
def __init__(self, *args, **kwargs):
super(Transport, self).__init__(*args, **kwargs)
| StarcoderdataPython |
9742034 | <gh_stars>0
from django.apps import AppConfig
class DtConfig(AppConfig):
name = 'dt'
| StarcoderdataPython |
6542101 | __author__ = '<NAME>'
from renderchan.module import RenderChanModule
import subprocess
import os, sys
from distutils.version import StrictVersion
from xml.etree import ElementTree
class RenderChanOliveModule(RenderChanModule):
def __init__(self):
RenderChanModule.__init__(self)
self.conf["binary"]=self.findBinary("olive-editor")
self.conf["packetSize"]=0
self.version = StrictVersion('0.1.0') # default value
def getInputFormats(self):
return ["ove"]
def getOutputFormats(self):
return ["mp4","png","avi"]
def checkRequirements(self):
RenderChanModule.checkRequirements(self)
if self.active:
# The CLI features depend on the version
proc = subprocess.Popen([self.conf['binary'], "-v"], stdout=subprocess.PIPE)
try:
outs, errs = proc.communicate(timeout=5)
except TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
rc = proc.poll()
if rc == 0:
line = outs.decode("utf-8")
if line.startswith("Olive "):
self.version = StrictVersion("0.1.0")
else:
# Get the version from stdout. An example of the output: "0.2.0-19eabf28\n"
self.version = line.rstrip().split("-")[0]
self.version = StrictVersion(self.version)
print("WARNING: Olive version >= 0.2.0 not supported yet.")
self.active = False
else:
self.active = False
if self.active == False:
print("WARNING: Failed to initialize Olive module.")
return self.active
def analyze(self, filename):
def _decode_callback(matches):
id = matches.group(1)
try:
return chr(int(id,16))
except:
return id
def time_to_frames(time, fps):
if time == None:
result = 0
else:
fps = float(fps)
split = time.split(' ')
framesCount = 0
multiplier_map = { 'f': 1, 's': fps, 'm': fps*60, 'h': fps*60*60 }
for field in split:
framesCount += float(field[0:-1]) * float(multiplier_map[field[-1]])
result = int(round(framesCount))
return result
info = {"dependencies": []}
f=open(filename, 'rb')
tree = ElementTree.parse(f)
root = tree.getroot()
# Parse dependencies
dependencies=[]
# TODO: Detect if file version is compatible with installed version of Olive
if self.version < StrictVersion('0.2.0'):
media_tag = root.find(".//media")
if media_tag:
for footage_tag in media_tag.iter('footage'):
dependencies.append(footage_tag.get('url'))
else:
#TODO: Add support for parsing Olive 0.2.0 format
pass
f.close()
# Eliminate empty entries
for i,val in enumerate(dependencies):
if dependencies[i]!=None:
info["dependencies"].append(dependencies[i])
dirname = os.path.dirname(filename)
for i, val in enumerate(info["dependencies"]):
fullpath = os.path.abspath(os.path.join(dirname, info["dependencies"][i]))
info["dependencies"][i] = fullpath
return info
def render(self, filename, outputPath, startFrame, endFrame, format, updateCompletion, extraParams={}):
#if self.version < StrictVersion('0.2.0'):
print()
print("ERROR: Commandline rendering not implemented for Olive. Aborting.", file=sys.stderr)
print()
exit(1)
| StarcoderdataPython |
12827626 | from django.core.management import call_command
from django.core.management.base import BaseCommand
from core.cli.mixins import CliInteractionMixin
from redmine import Redmine
class Command(BaseCommand, CliInteractionMixin):
help = "Syncronizes all data to Redmine instance."
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.redmine = Redmine()
def handle(self, *args, **options):
print()
self.stdout.write('VALIDATING REDMINE INSTANCE')
print()
if self.redmine.instance_valid() is False:
self.stderr.write(self.style.ERROR('Errors:'))
for e in self.redmine.instance_errors():
self.stderr.write(self.style.ERROR(f'- {e}'))
self.exit()
call_command('sync_trackers')
call_command('sync_score_field')
call_command('sync_projects')
call_command('sync_categories')
call_command('sync_versions')
call_command('sync_issues')
| StarcoderdataPython |
4894946 | <filename>tests/tree_xml_parser_note_processor_tests.py
from unittest import TestCase
from mock import patch
from regparser.test_utils.node_accessor import NodeAccessor
from regparser.test_utils.xml_builder import XMLBuilder
from regparser.tree.xml_parser import note_processor
class NoteProcessingTests(TestCase):
def test_integration(self):
"""Verify that a NOTE tag is converted into an appropriate tree. We
also expect no warnings to be emitted"""
with XMLBuilder("NOTES") as ctx:
ctx.HD("Notes:")
ctx.P("1. 111")
ctx.P("a. 1a1a1a")
ctx.P("b. 1b1b1b")
ctx.P("2. 222")
matcher = note_processor.NoteMatcher()
self.assertTrue(matcher.matches(ctx.xml))
to_patch = 'regparser.tree.xml_parser.paragraph_processor.logger'
with patch(to_patch) as logger:
results = matcher.derive_nodes(ctx.xml)
self.assertFalse(logger.warning.called)
self.assertEqual(len(results), 1)
tree = NodeAccessor(results[0])
self.assertEqual(tree['1'].text, '1. 111')
self.assertEqual(tree['1']['a'].text, 'a. 1a1a1a')
self.assertEqual(tree['1']['b'].text, 'b. 1b1b1b')
self.assertEqual(tree['2'].text, '2. 222')
| StarcoderdataPython |
3559777 | <gh_stars>1000+
# The major idea of the overall GNN model explanation
import argparse
import os
import dgl
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from dgl import load_graphs
from models import dummy_gnn_model
from NodeExplainerModule import NodeExplainerModule
from utils_graph import extract_subgraph, visualize_sub_graph
def main(args):
# load an exisitng model or ask for training a model
model_path = os.path.join('./', 'dummy_model_{}.pth'.format(args.dataset))
if os.path.exists(model_path):
model_stat_dict = th.load(model_path)
else:
raise FileExistsError('No Saved Model file. Please train a GNN model first...')
# load graph, feat, and label
g_list, label_dict = load_graphs('./'+args.dataset+'.bin')
graph = g_list[0]
labels = graph.ndata['label']
feats = graph.ndata['feat']
num_classes = max(labels).item() + 1
feat_dim = feats.shape[1]
hid_dim = label_dict['hid_dim'].item()
# create a model and load from state_dict
dummy_model = dummy_gnn_model(feat_dim, hid_dim, num_classes)
dummy_model.load_state_dict(model_stat_dict)
# Choose a node of the target class to be explained and extract its subgraph.
# Here just pick the first one of the target class.
target_list = [i for i, e in enumerate(labels) if e==args.target_class]
n_idx = th.tensor([target_list[0]])
# Extract the computation graph within k-hop of target node and use it for explainability
sub_graph, ori_n_idxes, new_n_idx = extract_subgraph(graph, n_idx, hops=args.hop)
#Sub-graph features.
sub_feats = feats[ori_n_idxes,:]
# create an explainer
explainer = NodeExplainerModule(model=dummy_model,
num_edges=sub_graph.number_of_edges(),
node_feat_dim=feat_dim)
# define optimizer
optim = th.optim.Adam(explainer.parameters(), lr=args.lr, weight_decay=args.wd)
# train the explainer for the given node
dummy_model.eval()
model_logits = dummy_model(sub_graph, sub_feats)
model_predict = F.one_hot(th.argmax(model_logits, dim=-1), num_classes)
for epoch in range(args.epochs):
explainer.train()
exp_logits = explainer(sub_graph, sub_feats)
loss = explainer._loss(exp_logits[new_n_idx], model_predict[new_n_idx])
optim.zero_grad()
loss.backward()
optim.step()
# visualize the importance of edges
edge_weights = explainer.edge_mask.sigmoid().detach()
visualize_sub_graph(sub_graph, edge_weights.numpy(), ori_n_idxes, n_idx)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Demo of GNN explainer in DGL')
parser.add_argument('--dataset', type=str, default='syn1',
help='The dataset to be explained.')
parser.add_argument('--target_class', type=int, default='1',
help='The class to be explained. In the synthetic 1 dataset, Valid option is from 0 to 4'
'Will choose the first node in this class to explain')
parser.add_argument('--hop', type=int, default='2',
help='The hop number of the computation sub-graph. For syn1 and syn2, k=2. For syn3, syn4, and syn5, k=4.')
parser.add_argument('--epochs', type=int, default=200, help='The number of epochs.')
parser.add_argument('--lr', type=float, default=0.01, help='The learning rate.')
parser.add_argument('--wd', type=float, default=0.0, help='Weight decay.')
args = parser.parse_args()
print(args)
main(args)
| StarcoderdataPython |
4870658 | <filename>pyjpboatrace/utils/str2num.py
def str2num(s: str, typ: type, default_val=None):
''' string to number whose type is the type given as typ.
typ must be int, float or complex
Note: Failure of casting returns default_val
'''
if typ not in [int, float, complex]:
raise NotImplementedError(
f'typ must be int, float or complex, but {typ.__name__} given.'
)
try:
return typ(s)
except ValueError:
return default_val
| StarcoderdataPython |
1723819 | from email.message import EmailMessage
import logging
from mailbox import MH
from operator import itemgetter
from pathlib import Path
from typing import List, Union
from mailbits import email2dict
import pytest
from outgoing import Sender, from_dict
from outgoing.senders.mailboxes import MHSender
@pytest.mark.parametrize("folder", [None, "work", ["important", "work"]])
def test_mh_construct(
folder: Union[str, List[str], None], monkeypatch: pytest.MonkeyPatch, tmp_path: Path
) -> None:
monkeypatch.chdir(tmp_path)
sender = from_dict(
{
"method": "mh",
"path": "inbox",
"folder": folder,
},
configpath=str(tmp_path / "foo.txt"),
)
assert isinstance(sender, Sender)
assert isinstance(sender, MHSender)
assert sender.dict() == {
"configpath": tmp_path / "foo.txt",
"path": tmp_path / "inbox",
"folder": folder,
}
assert sender._mbox is None
def test_mh_send_no_folder_new_path(
caplog: pytest.LogCaptureFixture,
monkeypatch: pytest.MonkeyPatch,
test_email1: EmailMessage,
tmp_path: Path,
) -> None:
caplog.set_level(logging.DEBUG, logger="outgoing")
monkeypatch.chdir(tmp_path)
sender = from_dict(
{
"method": "mh",
"path": "inbox",
},
configpath=str(tmp_path / "foo.txt"),
)
with sender as s:
assert sender is s
sender.send(test_email1)
inbox = MH("inbox")
assert inbox.list_folders() == []
msgs = list(inbox)
assert len(msgs) == 1
assert email2dict(test_email1) == email2dict(msgs[0])
assert caplog.record_tuples == [
(
"outgoing.senders.mailboxes",
logging.DEBUG,
f"Opening MH mailbox at {tmp_path/'inbox'}, root folder",
),
(
"outgoing.senders.mailboxes",
logging.INFO,
f"Adding e-mail {test_email1['Subject']!r} to MH mailbox at"
f" {tmp_path/'inbox'}, root folder",
),
(
"outgoing.senders.mailboxes",
logging.DEBUG,
f"Closing MH mailbox at {tmp_path/'inbox'}, root folder",
),
]
def test_mh_send_folder_str_new_path(
caplog: pytest.LogCaptureFixture,
monkeypatch: pytest.MonkeyPatch,
test_email1: EmailMessage,
tmp_path: Path,
) -> None:
caplog.set_level(logging.DEBUG, logger="outgoing")
monkeypatch.chdir(tmp_path)
sender = from_dict(
{
"method": "mh",
"path": "inbox",
"folder": "work",
},
configpath=str(tmp_path / "foo.txt"),
)
with sender:
sender.send(test_email1)
inbox = MH("inbox")
assert inbox.list_folders() == ["work"]
work = inbox.get_folder("work")
msgs = list(work)
assert len(msgs) == 1
assert email2dict(test_email1) == email2dict(msgs[0])
assert caplog.record_tuples == [
(
"outgoing.senders.mailboxes",
logging.DEBUG,
f"Opening MH mailbox at {tmp_path/'inbox'}, folder 'work'",
),
(
"outgoing.senders.mailboxes",
logging.INFO,
f"Adding e-mail {test_email1['Subject']!r} to MH mailbox at"
f" {tmp_path/'inbox'}, folder 'work'",
),
(
"outgoing.senders.mailboxes",
logging.DEBUG,
f"Closing MH mailbox at {tmp_path/'inbox'}, folder 'work'",
),
]
def test_mh_send_folder_list_new_path(
caplog: pytest.LogCaptureFixture,
monkeypatch: pytest.MonkeyPatch,
test_email1: EmailMessage,
tmp_path: Path,
) -> None:
caplog.set_level(logging.DEBUG, logger="outgoing")
monkeypatch.chdir(tmp_path)
sender = from_dict(
{
"method": "mh",
"path": "inbox",
"folder": ["important", "work"],
},
configpath=str(tmp_path / "foo.txt"),
)
with sender:
sender.send(test_email1)
inbox = MH("inbox")
assert inbox.list_folders() == ["important"]
important = inbox.get_folder("important")
assert important.list_folders() == ["work"]
work = important.get_folder("work")
msgs = list(work)
assert len(msgs) == 1
assert email2dict(test_email1) == email2dict(msgs[0])
assert caplog.record_tuples == [
(
"outgoing.senders.mailboxes",
logging.DEBUG,
f"Opening MH mailbox at {tmp_path/'inbox'}, folder 'important'/'work'",
),
(
"outgoing.senders.mailboxes",
logging.INFO,
f"Adding e-mail {test_email1['Subject']!r} to MH mailbox at"
f" {tmp_path/'inbox'}, folder 'important'/'work'",
),
(
"outgoing.senders.mailboxes",
logging.DEBUG,
f"Closing MH mailbox at {tmp_path/'inbox'}, folder 'important'/'work'",
),
]
def test_mh_send_no_folder_extant_path(
monkeypatch: pytest.MonkeyPatch,
test_email1: EmailMessage,
test_email2: EmailMessage,
tmp_path: Path,
) -> None:
monkeypatch.chdir(tmp_path)
inbox = MH("inbox")
inbox.lock()
inbox.add(test_email1)
inbox.unlock()
sender = from_dict(
{
"method": "mh",
"path": "inbox",
},
configpath=str(tmp_path / "foo.txt"),
)
with sender:
sender.send(test_email2)
assert inbox.list_folders() == []
msgs = list(inbox)
assert len(msgs) == 2
msgs.sort(key=itemgetter("Subject"))
assert email2dict(test_email1) == email2dict(msgs[0])
assert email2dict(test_email2) == email2dict(msgs[1])
def test_mh_send_folder_str_extant_path(
monkeypatch: pytest.MonkeyPatch,
test_email1: EmailMessage,
test_email2: EmailMessage,
tmp_path: Path,
) -> None:
monkeypatch.chdir(tmp_path)
inbox = MH("inbox")
inbox.add(test_email1)
sender = from_dict(
{
"method": "mh",
"path": "inbox",
"folder": "work",
},
configpath=str(tmp_path / "foo.txt"),
)
with sender:
sender.send(test_email2)
assert inbox.list_folders() == ["work"]
work = inbox.get_folder("work")
msgs = list(work)
assert len(msgs) == 1
assert email2dict(test_email2) == email2dict(msgs[0])
def test_mh_send_extant_folder_str_extant_path(
monkeypatch: pytest.MonkeyPatch,
test_email1: EmailMessage,
test_email2: EmailMessage,
tmp_path: Path,
) -> None:
monkeypatch.chdir(tmp_path)
inbox = MH("inbox")
inbox.add_folder("work").add(test_email1)
sender = from_dict(
{
"method": "mh",
"path": "inbox",
"folder": "work",
},
configpath=str(tmp_path / "foo.txt"),
)
with sender:
sender.send(test_email2)
assert inbox.list_folders() == ["work"]
work = inbox.get_folder("work")
msgs = list(work)
assert len(msgs) == 2
msgs.sort(key=itemgetter("Subject"))
assert email2dict(test_email1) == email2dict(msgs[0])
assert email2dict(test_email2) == email2dict(msgs[1])
def test_mh_send_folder_list_extant_path(
monkeypatch: pytest.MonkeyPatch,
test_email1: EmailMessage,
test_email2: EmailMessage,
tmp_path: Path,
) -> None:
monkeypatch.chdir(tmp_path)
inbox = MH("inbox")
inbox.add(test_email1)
sender = from_dict(
{
"method": "mh",
"path": "inbox",
"folder": ["important", "work"],
},
configpath=str(tmp_path / "foo.txt"),
)
with sender:
sender.send(test_email2)
assert inbox.list_folders() == ["important"]
important = inbox.get_folder("important")
assert important.list_folders() == ["work"]
work = important.get_folder("work")
msgs = list(work)
assert len(msgs) == 1
assert email2dict(test_email2) == email2dict(msgs[0])
def test_mh_send_partially_extant_folder_list(
monkeypatch: pytest.MonkeyPatch,
test_email1: EmailMessage,
test_email2: EmailMessage,
tmp_path: Path,
) -> None:
monkeypatch.chdir(tmp_path)
inbox = MH("inbox")
inbox.add_folder("important").add(test_email1)
inbox.add_folder("work")
sender = from_dict(
{
"method": "mh",
"path": "inbox",
"folder": ["important", "work"],
},
configpath=str(tmp_path / "foo.txt"),
)
with sender:
sender.send(test_email2)
assert sorted(inbox.list_folders()) == ["important", "work"]
assert list(inbox.get_folder("work")) == []
important = inbox.get_folder("important")
assert important.list_folders() == ["work"]
work = important.get_folder("work")
msgs = list(work)
assert len(msgs) == 1
assert email2dict(test_email2) == email2dict(msgs[0])
def test_mh_send_no_context(
monkeypatch: pytest.MonkeyPatch, test_email1: EmailMessage, tmp_path: Path
) -> None:
monkeypatch.chdir(tmp_path)
sender = from_dict(
{
"method": "mh",
"path": "inbox",
},
configpath=str(tmp_path / "foo.txt"),
)
sender.send(test_email1)
inbox = MH("inbox")
assert inbox.list_folders() == []
msgs = list(inbox)
assert len(msgs) == 1
assert email2dict(test_email1) == email2dict(msgs[0])
def test_mh_close_unopened(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
monkeypatch.chdir(tmp_path)
sender = from_dict(
{
"method": "mh",
"path": "inbox",
},
configpath=str(tmp_path / "foo.txt"),
)
assert isinstance(sender, MHSender)
with pytest.raises(ValueError) as excinfo:
sender.close()
assert str(excinfo.value) == "Mailbox is not open"
| StarcoderdataPython |
4851939 | <reponame>shenwei0329/rdm-flasky<gh_stars>0
# -*- coding: utf-8 -*-
#
# 生成数据文件,并通过邮件发送
# ============================
# 2019.8.1 @Chengdu
#
from DataHandler import exceltools
import mongodb_class
mongo_db = mongodb_class.mongoDB()
line_number = 1
def write_title(_book, _titles):
_v = 0
for _t in _titles:
_book.write((0, _v, _t))
_v += 1
def do_search(table, search):
"""
有条件获取表数据
:param table: 表名
:param search: 条件
:return: 数据列表
"""
_value = []
_cur = mongo_db.handler(table, "find", search)
for _v in _cur:
_value.append(_v)
return _value
def func_jira(_book):
"""
写jira明细
:param _book: 文档
:return:
"""
global mongo_db, line_number
mongo_db.connect_db('WORK_LOGS')
_rec = do_search('worklog', {'issue': {'$regex': "JDWL-"}})
_l = line_number
for _r in _rec:
_book.write((_l, 0, _r['started'].split('T')[0]))
_book.write((_l, 1, _r['author']))
_book.write((_l, 2, _r['comment']))
_book.write((_l, 3, _r['timeSpent']))
_l += 1
line_number = _l
def func_star_task(_book):
"""
写jira明细
:param _book: 文档
:return:
"""
global mongo_db, line_number
mongo_db.connect_db('ext_system')
_rec = do_search('star_task', {u'分类': u"嘉定SBU项目"})
_l = line_number
for _r in _rec:
_book.write((_l, 0, _r[u'完成时间'].split(' ')[0]))
_book.write((_l, 1, _r[u'责任人']))
_book.write((_l, 2, _r[u'任务描述']))
_book.write((_l, 3, u"当天"))
_l += 1
def func_member_jira(_members, _book):
"""
写jira明细
:param _members: 人员列表
:param _book: 文档
:return:
"""
global mongo_db, line_number
_l = line_number
mongo_db.connect_db('WORK_LOGS')
for _member in _members:
_rec = do_search('worklog', {'author': _member})
for _r in _rec:
if u"嘉定" in _r["project"] or u"嘉定" in _r["comment"]:
_book.write((_l, 0, _r['started'].split('T')[0]))
_book.write((_l, 1, _r['author']))
_book.write((_l, 2, _r['comment']))
_book.write((_l, 3, _r['timeSpent']))
_book.write((_l, 4, _r['project']))
_l += 1
line_number = _l
def func_member_star_task(_members, _book):
"""
写jira明细
:param _members:人员列表
:param _book: 文档
:return:
"""
global mongo_db, line_number
mongo_db.connect_db('ext_system')
_l = line_number
for _member in _members:
_rec = do_search('star_task', {u'责任人': _member})
for _r in _rec:
if u"嘉定" in _r[u'任务描述'] or "SBU" in _r[u'任务描述']:
_book.write((_l, 0, _r[u'完成时间'].split(' ')[0]))
_book.write((_l, 1, _r[u'责任人']))
_book.write((_l, 2, _r[u'任务描述']))
_book.write((_l, 3, u"当天"))
_book.write((_l, 4, u"项目要求"))
_l += 1
def main():
"""
主程序
:return:
"""
global line_number
"""创建excel文件"""
book = exceltools.ExcelTools()
book.open("pj_work_daily")
book.add_sheet(u"工作日志")
write_title(book, [u"日期", u"人员", u"内容", u"工时"])
func_jira(book)
func_star_task(book)
members = [u"查明", u"孙莎莎", u"王奕骅", u"梁雨", u"武静"]
book.add_sheet(u"人员日志")
line_number = 1
write_title(book, [u"日期", u"人员", u"内容", u"工时", u"项目"])
func_member_jira(members, book)
func_member_star_task(members, book)
book.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5019702 | # Copyright 2018 Spotify AB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model module - hosting database models."""
import json
from datetime import datetime
from sqlalchemy import Column, DateTime, Integer, JSON, String, UnicodeText, types
from sqlalchemy.ext.declarative import declarative_base
class BaseRecordRepr:
"""
This class can be used by declarative_base, to add an automatic
__repr__ method to *all* subclasses of BaseRecord.
"""
def __repr__(self):
"""Return a representation of this object as a string.
Returns:
str: a representation of the object.
"""
return f"{self.__class__.__name__}: " + " ".join(
[f"{k}={self.__getattribute__(k)}" for k, v in self.__class__.__dict__.items() if hasattr(v, "__set__")]
)
BaseRecord = declarative_base(cls=BaseRecordRepr)
class JSONType(types.TypeDecorator): # pylint: disable=abstract-method
"""This is for testing purposes, to make the JSON type work with sqlite."""
impl = UnicodeText
def load_dialect_impl(self, dialect):
"""This is an end-user override hook that can be used to provide
differing types depending on the given dialect.
Args:
dialect (object): SQLAlchemy dialect object
Returns:
object: if dialect name is 'mysql' it will override the type descriptor to JSON()
"""
if dialect.name == "mysql":
return dialect.type_descriptor(JSON())
return dialect.type_descriptor(self.impl)
def process_bind_param(self, value, dialect):
"""This is an end-user override hook that is used to support JSON conversion is sqlite
when binding parameters.
Args:
value (object): a JSON dumpable object
dialect (object): the dialect object
Returns:
json: the processed value
"""
if dialect.name == "mysql":
return value
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
"""This is an end-user override hook that is used to support JSON conversion is sqlite
when processing retrieved values.
Args:
value (json): a JSON dumpable object
dialect (object): the dialect object
Returns:
str: the processed value
"""
if dialect.name == "mysql":
return value
if value is not None:
value = json.loads(value)
return value
class EventRecord(BaseRecord):
"""Event model.
Args:
args (list) : arguments list passed to the BaseRecord constructor
kwargs (dict) : arguments dict passed to the BaseRecord constructor
"""
__tablename__ = "event"
id = Column(Integer, primary_key=True)
source_type = Column(String(250), nullable=False)
fingerprint = Column(String(250))
owner = Column(String(250))
event_metadata = Column(JSONType())
data = Column(JSONType())
received_at = Column(DateTime, default=datetime.utcnow)
sent_at = Column(DateTime, default=None)
escalated_at = Column(DateTime, default=None)
processed_at = Column(DateTime, default=None)
def __init__(self, *args, **kwargs):
self.new = False
self.owner_email_overridden = False
super().__init__(*args, **kwargs)
def update_metadata(self, metadata):
"""Update optional metadata for the event.
Args:
metadata (dict): arbitrary metadata for the event
"""
if self.event_metadata:
self.event_metadata.update(metadata)
else:
self.event_metadata = metadata
class IgnoreFingerprintRecord(BaseRecord):
"""Acceptedrisk model.
"""
__tablename__ = "ignore_fingerprint"
id = Column(Integer, primary_key=True)
fingerprint = Column(String(250))
ignore_type = Column(String(50))
reported_at = Column(DateTime, default=datetime.utcnow)
expires_at = Column(DateTime, default=None)
record_metadata = Column(JSONType())
SNOOZE = "snooze"
ACCEPT_RISK = "acceptrisk"
FALSE_POSITIVE = "falsepositive"
ACKNOWLEDGE = "acknowledge"
ESCALATE_MANUALLY = "escalate_manually"
| StarcoderdataPython |
271987 | <filename>plugins/Weather.py
from robot.sdk.AbstractPlugin import AbstractPlugin
import requests
from robot import logging,config
logger = logging.getLogger(__name__)
class Plugin(AbstractPlugin):
# 创建一个查询天气的技能
def handle(self, query):
logger.info('命中 <天气> 插件')
city = config.get('/location')
url = 'https://free-api.heweather.net/s6/weather/forecast?parameters'
params = {
"location": city,
"key": '<KEY>'
}
r = requests.get(url, params=params)
r.encoding= 'utf-8'
try:
results = r.json()['HeWeather6'][0]['daily_forecast']
logger.debug(results)
res = '{}天气'.format(city)
day_label = ['今天','明天','后天']
i=0
for result in results:
# 通过获取的json串分别获取最高气温,最低气温,白天气温,夜间气温
tmp_min, tmp_max, cond_txt_d, cond_txt_n = \
result['tmp_min'],result['tmp_max'],result['cond_txt_d'],result['cond_txt_n']
res += '{}:白天{},夜间{},气温{}度到{}度,'.format(day_label[i],cond_txt_d, cond_txt_n, tmp_max, tmp_min)
i += 1
self.con.say(res, True)
logger.info(res)
except Exception as e:
logger.error(e)
self.con.say("天气查询失败了!", True)
def isValid(self, query):
return '天气' in query | StarcoderdataPython |
1824502 | <reponame>hrozan/utfpr-final-paper<filename>legacy/smart-object/main.py
import json
import logging
import time
from app.config import get_config, DEVELOPMENT
from app.network import mqtt_client_factory, fetch_broker_config
from app.system import get_system_information
DATA_TOPIC = 'system/data'
def main():
app_config = get_config()
print("Starting application")
if app_config.env == DEVELOPMENT:
print("Running in development")
logging.basicConfig(level=logging.DEBUG)
broker_config = fetch_broker_config(app_config)
client = mqtt_client_factory(broker_config)
while True:
payload = get_system_information()
client.publish(DATA_TOPIC, json.dumps(payload))
logging.info("Published in %s %s", DATA_TOPIC, payload)
time.sleep(2)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
logging.info("Exit Gracefully")
exit(0)
| StarcoderdataPython |
4806972 | <gh_stars>10-100
from django.db import models
from auditable.models import Commentable
class ExclusionAgreement(Commentable):
"""
Container for a single instance of "Exclusion Agreement"
"""
class Meta:
db_table = 'compliance_report_exclusion_agreement'
db_table_comment = 'Container for a single instance of ' \
'"Exclusion Agreement"'
class ExclusionAgreementRecord(Commentable):
"""
Line items for "Exclusion Agreement".
"""
exclusion_agreement = models.ForeignKey(
ExclusionAgreement,
related_name='records',
on_delete=models.PROTECT,
null=False
)
transaction_type = models.ForeignKey(
'TransactionType',
on_delete=models.PROTECT,
null=False
)
fuel_type = models.ForeignKey(
'ApprovedFuel',
on_delete=models.PROTECT,
null=False
)
transaction_partner = models.CharField(
max_length=200,
blank=False,
null=False,
db_comment="Legal organization name of the transaction partner. This "
"is a free form text field with auto-suggested values from "
"existing Organization names."
)
postal_address = models.CharField(
max_length=200,
blank=False,
null=False,
db_comment="Contains the transaction partner address. This is a free "
"form text field with auto-suggested values from existing "
"Organization addresses."
)
quantity = models.DecimalField(
blank=False,
null=False,
decimal_places=0,
max_digits=15,
db_comment="Quantity of fuel purchased or sold."
)
quantity_not_sold = models.DecimalField(
blank=False,
null=False,
decimal_places=0,
max_digits=15,
db_comment="Quantity of fuel not sold or supplied within the "
"Compliance Period."
)
class Meta:
db_table = 'compliance_report_exclusion_agreement_record'
ordering = ['id']
db_table_comment = 'Line items for "Exclusion Agreement".'
| StarcoderdataPython |
83011 | import time
import multiprocessing as mp
from multiprocessing import Pool as ProcessPool
import numpy as np
import pandas as pd
from floris.utils.tools import valid_ops as vops
from floris.utils.tools import farm_config as fconfig
from floris.utils.visualization import wflo_eval as vweval
from floris.utils.visualization import wflo_opt as vwopt
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# MAIN #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
class LayoutPower(object):
def __init__(self, configs, **kwargs):
self.config = configs
# self.wtnum = None
# self.layout = None
# self.yawed = None
# self.speed = None
self.params = configs["param"]
self.vbins = configs["vbins"]
self.wbins = configs["wbins"]
self.turb = configs["turb"]
self.bins = (self.vbins, self.wbins)
self.uniform = self.uniform_check(configs["param"])
self.wdcdf = vops.wind_speed_dist()[1]
self.windn = configs["wind"]
self.wmn = configs["wm"]
self.wm = self.models(configs["wm"], "wm")
self.wsmn = configs["wsm"]
self.wsm = self.models(configs["wsm"], "wsm")
self.timn = configs["tim"]
self.tim = self.models(configs["tim"], "tim")
self.costn = configs["cost"]
self.cost = self.models(configs["cost"], "cost")
self.wdepth = configs["wdepth"]
self.pool = ProcessPool(int(mp.cpu_count()))
def initial(self, layout, **kwargs):
self.layout = layout
self.wtnum = layout.shape[0]
self.yawed = kwargs.get("yawed", None)
if not kwargs.get("params", None):
self.param = self.params_uniform(self.wtnum)
self.pow_curve = vops.params_loader(self.param["power_curve"][0]).pow_curve
else:
self.param = self.params_nonuniform(kwargs["params"])
self.speed = (np.min(self.param["v_in"]), np.max(self.param["v_out"]))
self.v_bin, self.v_point, self.w_bin, self.w_point = \
self.discretization(self.bins, self.speed)
self.wind, self.wind_pdf = self.data_load('wind'), self.data_load('pdf').values
assert self.wind.shape[0] == self.w_point.shape[0]
self.capacity = self.param["P_rated"].values
def uniform_check(self, params):
if isinstance(params, list):
return False if len(params) > 1 else True
else:
return True
def layout_check(self):
pass
def params_uniform(self, num):
params = vops.params_loader(self.params).params().values
cols = vops.params_loader(self.params).params().columns
return pd.DataFrame(np.repeat(params, num, axis=0), columns=cols)
def params_nonuniform(self, params): # TODO
self.uniform = False
return None
def data_load(self, data):
return vops.winds_loader(data, self.windn, self.bins, self.speed)
def models(self, name, model):
return vops.find_and_load_model(name, model)
def discretization(self, bins, speeds):
return vops.winds_discretization(bins, speeds)
def unpack_nonuniform(self, ):
pass
def plot_layout(self, layout, theta=0, annotate=False):
return vweval.layout_plot(
vops.coordinate_transform(layout, theta), annotate)
def wakes(self, mprocess=False):
wd_num, ws_num = self.w_point.shape[0], self.v_point.shape[0]
if mprocess:
args = list(zip(list(self.w_point), [self.v_point] * wd_num,
[self.wm] * wd_num, [self.wsm] * wd_num,
[self.tim] * wd_num, [self.turb] * wd_num,
[self.param] * wd_num, [self.layout] * wd_num,))
result = self.pool.map_async(deficits, args); result.wait()
wt_deficits = np.transpose(np.array(result.get()), (1, 2, 0))
else:
wt_deficits = np.zeros((wd_num, ws_num, self.wtnum))
for i, wd in enumerate(self.w_point):
wt_deficits[i, :, :] = self.deficits(wd, self.layout)
# wt_deficits = np.vectorize(self.deficits(wd, self.layout))
wt_deficits = np.transpose(wt_deficits, (1, 2, 0))
return wt_deficits
def one_wakes(self, layout, theta, mprocess=False, **kwargs):
self.initial(layout, **kwargs)
if mprocess:
args = list(zip([theta], [self.v_point], [self.wm], [self.wsm],
[self.tim], [self.turb], [self.param], [self.layout]))
result = self.pool.map_async(deficits, args); result.wait()
return result
else:
return self.deficits(theta, self.layout)
def powers_old(self, deficits, params):
v_in, v_out, power_curve = params["v_in"], params["v_out"], \
vops.params_loader(params["power_curve"]).pow_curve
v_bins = vops.winds_discretization(self.bins, (v_in, v_out))[0]
v_bins_j_1, v_bins_j, wind_freq_bins = v_bins[:-1], v_bins[1:], self.wind["w_l-1"]
c_list, k_list = self.wind["c"], self.wind["k"]
power_cdf_bins = np.zeros(len(self.wind["l-1"]))
no_wake_power_cdf_bins = np.zeros(len(self.wind["l-1"]))
for i in range(len(self.wind["l-1"])):
pr_v_bins = self.wdcdf(v_bins_j, c_list[i], k_list[i]) - self.wdcdf(v_bins_j_1, c_list[i], k_list[i])
power_bins = np.vectorize(power_curve)(((v_bins_j_1 + v_bins_j) / 2) * (1 - deficits[:, i]))
no_wake_power_bins = np.vectorize(power_curve)((v_bins_j_1 + v_bins_j) / 2)
power_cdf_bins[i] = np.dot(power_bins, pr_v_bins)
no_wake_power_cdf_bins[i] = np.dot(no_wake_power_bins, pr_v_bins)
return np.array([np.dot(power_cdf_bins, wind_freq_bins),
np.dot(no_wake_power_cdf_bins, wind_freq_bins)])
def powers(self, deficits, params, **kwargs):
pow_curve = vops.params_loader(params["power_curve"]).pow_curve \
if not self.uniform else self.pow_curve
wt_power = np.vectorize(pow_curve)(self.v_point[:, None] * (1. - deficits))
no_wake_wt_power = \
np.vectorize(pow_curve)(self.v_point[:, None] * np.ones((deficits.shape)))
wd_powers = np.zeros((2, self.w_point.shape[0]))
if kwargs.get("wd_output", False):
wds_fs = self.wind_pdf / self.wind.values[:, -1][None, :]
wd_power, no_wake_wd_power = \
np.sum(wt_power * wds_fs, axis=0), np.sum(no_wake_wt_power * wds_fs, axis=0)
wd_powers = np.concatenate((wd_power[None, :], no_wake_wd_power[None, :]), axis=0)
wt_power, no_wake_wt_power = \
np.sum(wt_power * self.wind_pdf), np.sum(no_wake_wt_power * self.wind_pdf)
return np.array([wt_power, no_wake_wt_power], dtype=np.float), wd_powers
def output(self, deficits, **kwargs):
assert deficits.shape == (self.v_point.shape[0], self.wtnum, self.w_point.shape[0])
powers, wd_powers = \
np.zeros((self.wtnum, 2)), np.zeros((self.wtnum, 2, self.w_point.shape[0]))
for i in range(self.wtnum):
powers[i, :], wd_powers[i, :, :] = \
self.powers(deficits[:, i, :], self.param.iloc[i], **kwargs)
return powers, np.sum(wd_powers, axis=0).transpose(1, 0)
def run(self, layout, **kwargs):
self.initial(layout, **kwargs)
powers, wd_powers = self.output(self.wakes(mprocess=True), **kwargs)
cost = self.cost(layout, powers, self.capacity, wdepth=self.wdepth, **kwargs)
return cost, powers, wd_powers
def test(self, layout, baseline=None, verbose=True, **kwargs):
start = time.time()
cost, powers, wd_powers = self.run(layout, **kwargs)
end = time.time()
if verbose:
power, no_wake_power = np.sum(powers, axis=0)
cf, eff, = power * 100 / np.sum(self.capacity), power * 100 / no_wake_power
print(f"Interactive time: {end - start:.3f} s")
print(f"Optimal({self.costn}[€/MWh] / Power[MW] / No-wake[MW] / " +
f"CF[%] / Eff[%] / Loss[%]):\n ==> {cost:.3f} / {power:.3f} / " +
f"{no_wake_power:.3f} / {cf:.2f} / {eff:.2f} / {100. - eff:.2f}\n")
if baseline is not None:
bcost, bpowers, _ = self.run(baseline, **kwargs)
bpower, bno_wake_power = np.sum(bpowers, axis=0)[0], np.sum(bpowers, axis=0)[1]
bcf, beff = bpower * 100 / np.sum(self.capacity), bpower * 100 / bno_wake_power
print(f"Baseline({self.costn}[€/MWh] / Power[MW] / No-wake[MW] / " +
f"CF[%] / Eff[%] / Loss[%]):\n ==> {bcost:.3f} / {bpower:.3f} / " +
f"{bno_wake_power:.3f} / {bcf:.2f} / {beff:.2f} / {100. - beff:.2f}\n")
if kwargs.get("wd_output", False):
assert wd_powers.all() != 0.
vwopt.wd_power_plot(self.w_point, wd_powers, self.capacity, **kwargs)
if kwargs.get("wt_output", False):
vwopt.wt_power_plot(powers, self.capacity, **kwargs)
return cost, powers
def deficits(self, theta, layout):
wt_loc = vops.coordinate_transform(layout, theta)
wt_index = vops.wind_turbines_sort(wt_loc)
assert wt_index.shape[0] == wt_loc.shape[0]
deficits = np.zeros((len(self.v_point), len(wt_index)))
deficit_tab = np.full((len(self.v_point), len(wt_index), len(wt_index) + 2), None)
turbulence_tab = np.full((len(self.v_point), len(wt_index), len(wt_index) + 2), None)
v_start = time.time()
for z, v_i in enumerate(self.v_point):
deficit_tab[z, 0, -2], deficit_tab[z, 0, -1] = 0., v_i
if self.tim is not None:
turbulence_tab[z, 0, -2], turbulence_tab[z, 0, -1] = 0., self.turb
for i, t in enumerate(wt_index):
# wt_start = time.time()
ct_curve = vops.params_loader(self.param.iloc[t]["ct_curve"]).ct_curve
wake = self.wm(wt_loc[t, :], ct_curve(deficit_tab[z, i, -1]),
self.param.iloc[t]["D_r"],
self.param.iloc[t]["z_hub"], T_m=self.tim,
I_w=turbulence_tab[z, i, -1], I_a=self.turb)
if i < len(wt_index) - 1:
for j, wt in enumerate(wt_index[i+1:]):
deficit_tab[z, i, i + j + 1], turbulence_tab[z, i, i + j + 1] = \
wake.wake_loss(wt_loc[wt, :], self.param.iloc[wt]["D_r"], debug=None)
total_deficit = self.wsm(deficit_tab[z, :, :], i + 1, inflow=v_i)
if self.tim is not None:
turbulence_tab[z, i + 1, -2] = np.max(turbulence_tab[z, :i+1, i+1])
turbulence_tab[z, i + 1, -1] = np.sqrt(
np.max(turbulence_tab[z, :i+1, i+1])**2 + self.turb**2)
deficit_tab[z, i + 1, -2] = total_deficit
deficit_tab[z, i + 1, -1] = v_i * (1 - total_deficit)
else:
break
# wt_end = time.time()
# print(f"WT: {i} || Time: {wt_end - wt_start}")
deficits[z, :] = vops.wt_power_reorder(wt_index, deficit_tab[z, :, -2])
v_end = time.time()
print(f"Wind: {theta} | Time: {v_end - v_start}")
return deficits
def deficits(args):
theta, speeds, wm, wsm, tim, turb, params, layout = args
wt_loc = vops.coordinate_transform(layout, theta)
wt_index = vops.wind_turbines_sort(wt_loc)
assert wt_index.shape[0] == wt_loc.shape[0]
deficits = np.zeros((len(speeds), len(wt_index)))
deficit_tab = np.full((len(speeds), len(wt_index), len(wt_index) + 2), None)
turbulence_tab = np.full((len(speeds), len(wt_index), len(wt_index) + 2), None)
start = time.time()
for z, v_i in enumerate(speeds):
deficit_tab[z, 0, -2], deficit_tab[z, 0, -1] = 0., v_i
if tim is not None:
turbulence_tab[z, 0, -2], turbulence_tab[z, 0, -1] = 0., turb
for i, t in enumerate(wt_index):
ct_curve = vops.params_loader(params.iloc[t]["ct_curve"]).ct_curve
wake = wm(wt_loc[t, :], ct_curve(deficit_tab[z, i, -1]),
params.iloc[t]["D_r"],
params.iloc[t]["z_hub"], T_m=tim,
I_w=turbulence_tab[z, i, -1], I_a=turb)
if i < len(wt_index) - 1:
for j, wt in enumerate(wt_index[i+1:]):
deficit_tab[z, i, i + j + 1], turbulence_tab[z, i, i + j + 1] = \
wake.wake_loss(wt_loc[wt, :], params.iloc[wt]["D_r"], debug=None)
total_deficit = wsm(deficit_tab[z, :, :], i + 1, inflow=v_i)
if tim is not None:
turbulence_tab[z, i + 1, -2] = np.max(turbulence_tab[z, :i+1, i+1])
turbulence_tab[z, i + 1, -1] = np.sqrt(
np.max(turbulence_tab[z, :i+1, i+1])**2 + turb**2)
deficit_tab[z, i + 1, -2] = total_deficit
deficit_tab[z, i + 1, -1] = v_i * (1 - total_deficit)
else:
break
deficits[z, :] = vops.wt_power_reorder(wt_index, deficit_tab[z, :, -2])
end = time.time()
# print(f"Wind: {theta} | Time: {end - start:.3f}")
return deficits
def analysis(path="solution", baseline="horns", result=None, config=None,
**kwargs):
result = result if isinstance(result, dict) else \
vops.json_load(f"{path}/{result}.json")
config = config or result['config']
wf = LayoutPower(config)
if wf.uniform:
layout = np.array(result['layout'][-1]) if config['stage'] == 2 \
else np.array(result['layout'])
param = None
else:
print("NOTE: Nonuniform Wind Farm Configuration")
layout, param = wf.unpack_nonuniform(result['layout'])
# print(layout.shape)
if layout.shape[0] == config['num']:
wt_num = layout.shape[0]
else:
wt_num = layout.shape[0] // 2
layout = layout.reshape((wt_num, 2))
assert wt_num == config['num'], \
'WTs number is not matching. Please check!'
print("\nWind Turbine Num: ", wt_num)
if baseline in ['horns', ]:
baseline = vops.params_loader(baseline).baseline(wt_num)
if (config["opt"] == "ga" and config["stage"] != 2) and config["grid"]:
_, grids = vops.layout2grids([0, 0], [63, 48.89], config["grid"])
layout = vops.grids2layout(layout, grids)
layout = layout[np.argsort(layout[:, 1]), :] * 80.
layout = layout - np.array([0, 589])
cost, _ = wf.test(layout, baseline, param=param, path=path, **kwargs)
if cost is not None:
if kwargs.get("layout_output", False):
vwopt.wf_layout_plot(layout, baseline, path=path, **kwargs)
if kwargs.get("curve_output", False):
vwopt.opt_curve_plot(result, path=path, **kwargs)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# MISCELLANEOUS #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
def power_debug():
from scipy import integrate
def weibull(v, shape=2.3, scale=10.59):
return (shape / scale) * (v / scale)**(shape - 1) * np.exp(-(v / scale) ** shape)
def linear(v):
return ((2 / 11) * v + (8 / 11))
def directions(theta):
return 1 / 360
a, _ = integrate.quad(
lambda v: (0.18 * v + 0.73) * 0.217 * (v / 10.59)**1.3 * np.exp(-(v / 10.59) ** 2.3),
4, 15)
b, _ = integrate.quad(
lambda v: 2 * 0.217 * (v / 10.59)**1.3 * np.exp(-(v / 10.59) ** 2.3),
15, 25)
integral_a, _ = integrate.quad(lambda t: (1 / 360) * a, 0, 360)
integral_b, _ = integrate.quad(lambda t: (1 / 360) * b, 0, 360)
return integral_a, integral_b, integral_a + integral_b
if __name__ == "__main__":
config = {
"stage":2,
"opt":['ga', 'pso'],
"tag":'25',
# "pop": 40,
"pop": [20, 20],
# "maxg": 5,
"maxg": [20, 20],
"grid": 5,
"num": 25,
"param": "horns",
"wind": "horns",
"vbins": 3,
"wbins": 15,
"wdepth": "linear_x",
"cost": "LCOE",
"turb": 0.077,
"wm": "Jensen",
# "wm": "Bastankhah",
"wsm": "SS",
"tim": "Frandsen",
# "tim": None,
}
layout = (fconfig.Horns.baseline(25) / 80.).ravel()
# layout = None
# path = "output/21_6_30/Jen_49_mos"
# path = "solution"
# analysis(path=path,
# baseline="horns",
# result="eapso_results_49",
# # result={'layout': layout},
# # config=config,
# layout_output=True,
# layout_name="layout_49",
# curve_output=True,
# curve_name="curve_25",
# wd_output=False,
# wd_name="wds_25",
# wt_output=False,
# wt_name="wts_25",
# )
# LayoutPower(config).one_wakes(layout, 105., mprocess=True)
| StarcoderdataPython |
8073739 | <filename>Arquitectura de Software/Practica 1 (Calculadora)/classes.py
#! usr/bin/python
from math import sqrt
from math import pow
from math import sin
from math import cos
from math import tan
from math import radians
class CalculadoraBasica():
def __init__(self, numX, numY):
self.numX = numX
self.numY = numY
def Suma(self):
return self.numX + self.numY
def Resta(self):
return self.numX - self.numY
def Division(self):
return self.numX / self.numY
def Multiplicacion(self):
return self.numX * self.numY
class CalculadoraCientifica(CalculadoraBasica):
def __init__(self, numX, numY):
CalculadoraBasica.__init__(self, numX, numY)
def Raiz(self):
return sqrt(self.numX)
def Radianes(self):
return radians(self.numX)
def Potencia(self):
return pow(self.numX, self.numY)
def Seno(self):
return sin(radians(self.numX))
def Coseno(self):
return cos(radians(self.numX))
def Tangente(self):
return tan(radians(self.numX))
class CalculadoraProgramador(CalculadoraBasica):
def __init__(self, numX, numY, resultado):
CalculadoraBasica.__init__(self, numX, numY)
self.resultado = resultado
def Binario(self):
return bin(self.resultado)[2:]
def Octal(self):
return oct(self.resultado)
def Hexadecimal(self):
return hex(self.resultado)[2:]
class CalculadoraFinanciera(CalculadoraBasica):
def __init__(self, numX, numY):
CalculadoraBasica.__init__(self, numX, numY)
self.costDolar = 19.634
self.costEUR = 20.7753
self.costPesoDolar = 1/self.costDolar
self.costPesoEUR = 1/self.costEUR
def DolarToPeso(self):
return self.numX * self.costDolar
def EurToPeso(self):
return self.numX * self.costEUR
def PesoToDolar(self):
return self.numX * self.costPesoDolar
def PesoToEur(self):
return self.numX * self.costPesoEUR | StarcoderdataPython |
5159451 | <filename>tests/behavioural/features/steps/collection_exercise_field.py
from behave import given, when
@given("the collection_exercise is set to '{collection_exercise}'")
@when("the collection_exercise is set to '{collection_exercise}'")
def step_impl_the_collection_exercise_is_set_to(context, collection_exercise):
"""set the collection exercise to a specific value"""
context.bdd_helper.message_data['collection_exercise'] = collection_exercise
| StarcoderdataPython |
3513774 | #!/usr/bin/env python3
#
# # Copyright (c) 2021 Facebook, inc. and its affiliates. All Rights Reserved
#
#
from uimnet import utils
from uimnet import algorithms
from uimnet import workers
from omegaconf import OmegaConf
from pathlib import Path
import torch
import torch.distributed as dist
import torch.multiprocessing as tmp
import numpy as np
import os
import argparse
import pickle
import filelock
PREDICTION_CFG = """
output_dir: null # subfolder. Mutable at dispatch
dataset:
name: ImageNat
root: /checkpoint/ishmaelb/data/datasets/ILSVRC2012
equalize_partitions: True
batch_size: 256
seed: 42
experiment:
distributed: True
platform: slurm
## ----- Mutable on the worker during distributed/device setup.
seed: 42 # Workers seed
device: 'cuda:0'
rank: null
local_rank: null
world_size: null
dist_protocol: env
dist_url: null
num_workers: 5
# ------
"""
def parse_arguments():
parser = argparse.ArgumentParser(description='runs prediction state on the model director')
parser.add_argument('-m', '--model_dir', type=str, required=True)
parser.add_argument('-c', '--clustering_file', type=str, required=True)
parser.add_argument('--local_rank', type=int, default=None)
parser.add_argument('-d', '--distributed', action='store_true')
parser.add_argument('--dist_protocol', type=str, default='env')
return parser.parse_args()
def partition_datasets(prediction_cfg, partitions):
all_datasets = {}
for split_name in ['train', 'val']:
all_datasets[split_name] = utils.partition_dataset(name=prediction_cfg.dataset.name,
root=prediction_cfg.dataset.root,
split=split_name,
partitions=partitions,
equalize_partitions=prediction_cfg.dataset.equalize_partitions)
return all_datasets
# def run_predictor(prediction_cfg, validation_cfg, Algorithm, train_dataset, val_dataset):
# pass
# def run(model_dir, clustering_file, distributed, ranks=None):
# if ranks is None:
# ranks = list(range(dist.get_world_size()))
# if not distributed:
# pass
# processes = []
# if distributed:
# for rank in ranks:
# p = tmp.Process(target=run_predictor, predictor_args)
# p.start()
# processes.append(p)
# for p in processes:
# p.join()
@utils.timeit
def run_predictor(model_dir, clustering_file, args):
model_path = Path(model_dir)
train_cfg = utils.load_cfg(model_path / 'train_cfg.yaml')
OmegaConf.set_struct(train_cfg, True)
prediction_cfg = OmegaConf.create(PREDICTION_CFG)
OmegaConf.set_struct(prediction_cfg, True)
prediction_cfg.experiment.distributed = args.distributed
prediction_cfg.experiment.dist_protocol = args.dist_protocol
if utils.is_distributed():
os.environ['OMP_NUM_THREADS'] = prediction_cfg.experiment.num_workers
with filelock.FileLock(clustering_file + '.lock'):
with open(clustering_file, 'rb') as fp:
clustering = pickle.load(fp)
all_datasets = partition_datasets(prediction_cfg, partitions=clustering['partitions'])
Algorithm = utils.load_model_cls(train_cfg)
predictor = workers.Predictor()
output = predictor(prediction_cfg, train_cfg, Algorithm=Algorithm,
train_dataset=all_datasets['train']['in'],
val_dataset=all_datasets['val']['in'])
return output
if __name__ == '__main__':
args = parse_arguments()
predictor_output = run_predictor(args.model_dir, args.clustering_file, args)
| StarcoderdataPython |
8128946 | from mir.io.feature_io_base import *
import numpy as np
class ChromaIO(FeatureIO):
def read(self, filename, entry):
if(filename.endswith('.csv')):
f=open(filename,'r')
lines=f.readlines()
result=[]
for line in lines:
line=line.strip()
if(line==''):
continue
arr=np.array(list(map(float,line.split(',')[2:])))
arr=arr.reshape((2,12))[::-1].T
arr=np.roll(arr,-3,axis=0).reshape((24))
result.append(arr)
data=np.array(result)
else:
data=pickle_read(self, filename)
return data
def write(self, data, filename, entry):
pickle_write(self, data, filename)
def visualize(self, data, filename, entry, override_sr):
sr=entry.prop.sr
win_shift=entry.prop.hop_length
feature_tuple_size=entry.prop.chroma_tuple_size
# if(FEATURETUPLESIZE==2):
features=data
f = open(filename, 'w')
for i in range(0, features.shape[0]):
time = win_shift * i / sr
f.write(str(time))
for j in range(0,feature_tuple_size):
if(j>0):
f.write('\t0')
for k in range(0, 12):
f.write('\t' + str(features[i][k*feature_tuple_size+j]))
f.write('\n')
f.close()
def pre_assign(self, entry, proxy):
entry.prop.set('n_frame', LoadingPlaceholder(proxy, entry))
def post_load(self, data, entry):
entry.prop.set('n_frame', data.shape[0]) | StarcoderdataPython |
194873 | <reponame>bsulman/INTERFACE-model-experiment-synthesis<gh_stars>1-10
import CORPSE
from pylab import *
import pandas
# 5% clay
params_lowclay={
'vmaxref':[1500,50,600], #Relative maximum enzymatic decomp rates
'Ea':[37e3,54e3,50e3], # Activation energy
'kC':[0.01,0.01,0.01], # Michaelis-Menton parameter
'gas_diffusion_exp':2.5, # Determines suppression of decomp at high soil moisture
'minMicrobeC':1e-3, #Minimum microbial biomass (fraction of total C)
'Tmic':0.15, # Microbial lifetime
'et':0.5, # Fraction of turnover not converted to CO2
'eup':[0.6,0.05,0.6], # Carbon uptake efficiency
'tProtected':75.0, # Protected C turnover time (years)
'protection_rate':[1.0,0.00005,1.0], # Protected carbon formation rate (year-1)
}
# 20% clay
params_highclay=params_lowclay.copy()
params_highclay['protection_rate']=array(params_lowclay['protection_rate'])*CORPSE.prot_clay(20.0)/CORPSE.prot_clay(5.0)
params_higherclay=params_lowclay.copy()
params_higherclay['protection_rate']=array(params_lowclay['protection_rate'])*CORPSE.prot_clay(70.0)/CORPSE.prot_clay(5.0)
# c_lowclay_highquality=CORPSE.soil_carbon_cohort(litterC=[0.0018,0.5535,0.0073],livingMicrobeC=0.025,protectedC=[0.215,0.0,1.14],params=params_lowclay)
# c_highclay_highquality=CORPSE.soil_carbon_cohort(litterC=[0.0018,0.5535,0.0073],livingMicrobeC=0.025,protectedC=[0.4,0.0,2.5],params=params_highclay)
# c_lowclay_lowquality=CORPSE.soil_carbon_cohort(litterC=[0.0022587,0.605026,0.005075],livingMicrobeC=0.012595,protectedC=[0.215,0.0,0.778],params=params_lowclay)
# c_highclay_lowquality=CORPSE.soil_carbon_cohort(litterC=[0.002195,0.5946,0.005646],livingMicrobeC=0.013072,protectedC=[0.408,0.02922,1.8762],params=params_highclay)
c_lowclay_highquality = CORPSE.soil_carbon_cohort(litterC=[0.004849,0.3673,0.006732], protectedC=[0.3637,0.001377,0.5049], livingMicrobeC=0.02342)
c_lowclay_lowquality = CORPSE.soil_carbon_cohort(litterC=[0.002171,0.622,0.004328], protectedC=[0.1628,0.002333,0.3246], livingMicrobeC=0.01188)
c_highclay_highquality = CORPSE.soil_carbon_cohort(litterC=[0.004849,0.3673,0.006732], protectedC=[0.7108,0.002691,0.9867], livingMicrobeC=0.02342)
c_highclay_lowquality = CORPSE.soil_carbon_cohort(litterC=[0.002171,0.622,0.004327], protectedC=[0.5831,0.008352,1.162], livingMicrobeC=0.01188)
c_higherclay_highquality = CORPSE.soil_carbon_cohort(litterC=[0.004849,0.3673,0.006732], protectedC=[0.7108,0.002691,0.9867], livingMicrobeC=0.02342)
c_higherclay_lowquality = CORPSE.soil_carbon_cohort(litterC=[0.002171,0.622,0.004327], protectedC=[0.5831,0.008352,1.162], livingMicrobeC=0.01188)
c_lowclay_highquality.set_params(params_lowclay)
c_lowclay_lowquality.set_params(params_lowclay)
c_highclay_highquality.set_params(params_highclay)
c_highclay_lowquality.set_params(params_highclay)
c_higherclay_highquality.set_params(params_higherclay)
c_higherclay_lowquality.set_params(params_higherclay)
do_spinup=False
configs=[
'lowclay_highquality',
'lowclay_lowquality',
'highclay_highquality',
'highclay_lowquality',
'higherclay_highquality',
'higherclay_lowquality'
]
if do_spinup:
# Just do control runs to save time
sims=['control']
else:
sims=[
'control',
'warming_2',
'warming_5',
'labile_addition',
'total_addition_30',
'total_addition_100',
'warming_2_total_addition_30',
'warming_2_labile_addition',
'litter_removal'
]
cohorts=dict()
cohorts['lowclay_highquality']=dict()
cohorts['highclay_highquality']=dict()
cohorts['higherclay_highquality']=dict()
cohorts['lowclay_lowquality']=dict()
cohorts['highclay_lowquality']=dict()
cohorts['higherclay_lowquality']=dict()
for sim in sims:
cohorts['lowclay_highquality'][sim]=c_lowclay_highquality.copy()
cohorts['highclay_highquality'][sim]=c_highclay_highquality.copy()
cohorts['higherclay_highquality'][sim]=c_highclay_highquality.copy()
cohorts['lowclay_lowquality'][sim]=c_lowclay_lowquality.copy()
cohorts['highclay_lowquality'][sim]=c_higherclay_lowquality.copy()
cohorts['higherclay_lowquality'][sim]=c_higherclay_lowquality.copy()
nsteps=365*60
def make_outputs(nsteps):
out=dict()
for sim in sims:
out[sim]=dict()
out[sim]['unprotectedC']=zeros((nsteps,3))
out[sim]['protectedC']=zeros((nsteps,3))
out[sim]['microbeC']=zeros(nsteps)
out[sim]['CO2']=zeros(nsteps)
return out
outputs=dict()
outputs['lowclay_highquality']=make_outputs(nsteps)
outputs['lowclay_lowquality']=make_outputs(nsteps)
outputs['highclay_highquality']=make_outputs(nsteps)
outputs['highclay_lowquality']=make_outputs(nsteps)
outputs['higherclay_highquality']=make_outputs(nsteps)
outputs['higherclay_lowquality']=make_outputs(nsteps)
dt=1.0/365.0
start=365*10
temperature=zeros(nsteps)+273.15+20
theta=zeros(nsteps)+0.5
T_warming_2=temperature.copy()
T_warming_2[start:]=T_warming_2[start:]+2.0
T_warming_5=temperature.copy()
T_warming_5[start:]=T_warming_5[start:]+5.0
T={
'control':temperature,
'warming_2':T_warming_2,
'warming_5':T_warming_5,
'labile_addition':temperature,
'total_addition_30':temperature,
'total_addition_100':temperature,
'warming_2_total_addition_30':T_warming_2,
'warming_2_labile_addition':T_warming_2,
'litter_removal':temperature
}
inputs_highquality=column_stack([zeros((nsteps,1))+150.0,zeros((nsteps,1))+350.0,zeros((nsteps,1))+0.0])/1000 # kgC Per year
inputs_lowquality=column_stack([zeros((nsteps,1))+50.0,zeros((nsteps,1))+450.0,zeros((nsteps,1))+0.0])/1000 # kgC Per year
inputs_highquality_labile_addition=inputs_highquality.copy()
inputs_highquality_labile_addition[start:,0]=inputs_highquality_labile_addition[start:,0]*1.3
inputs_lowquality_labile_addition=inputs_lowquality.copy()
inputs_lowquality_labile_addition[start:,0]=inputs_lowquality_labile_addition[start:,0]*1.3
inputs_highquality_total_addition_30=inputs_highquality.copy()
inputs_highquality_total_addition_30[start:,:]=inputs_highquality_total_addition_30[start:,:]*1.3
inputs_highquality_total_addition_100=inputs_highquality.copy()
inputs_highquality_total_addition_100[start:,:]=inputs_highquality_total_addition_100[start:,:]*2.0
inputs_lowquality_total_addition_30=inputs_lowquality.copy()
inputs_lowquality_total_addition_30[start:,:]=inputs_lowquality_total_addition_30[start:,:]*1.3
inputs_lowquality_total_addition_100=inputs_lowquality.copy()
inputs_lowquality_total_addition_100[start:,:]=inputs_lowquality_total_addition_100[start:,:]*2.0
inputs_highquality_litter_removal=inputs_highquality.copy()
inputs_highquality_litter_removal[start:,:]=0.0
inputs_lowquality_litter_removal=inputs_lowquality.copy()
inputs_lowquality_litter_removal[start:,:]=0.0
inputs={'lowclay_highquality':{},'highclay_highquality':{},'lowclay_lowquality':{},'highclay_lowquality':{},'higherclay_highquality':{},'higherclay_lowquality':{}}
inputs['lowclay_highquality']['control']=inputs_highquality
inputs['lowclay_highquality']['warming_2']=inputs_highquality
inputs['lowclay_highquality']['warming_5']=inputs_highquality
inputs['lowclay_highquality']['labile_addition']=inputs_highquality_labile_addition
inputs['lowclay_highquality']['total_addition_30']=inputs_highquality_total_addition_30
inputs['lowclay_highquality']['total_addition_100']=inputs_highquality_total_addition_100
inputs['lowclay_highquality']['warming_2_total_addition_30']=inputs_highquality_total_addition_30
inputs['lowclay_highquality']['warming_2_labile_addition']=inputs_highquality_labile_addition
inputs['lowclay_highquality']['litter_removal']=inputs_highquality_litter_removal
inputs['highclay_highquality']['control']=inputs_highquality
inputs['highclay_highquality']['warming_2']=inputs_highquality
inputs['highclay_highquality']['warming_5']=inputs_highquality
inputs['highclay_highquality']['labile_addition']=inputs_highquality_labile_addition
inputs['highclay_highquality']['total_addition_30']=inputs_highquality_total_addition_30
inputs['highclay_highquality']['total_addition_100']=inputs_highquality_total_addition_100
inputs['highclay_highquality']['warming_2_total_addition_30']=inputs_highquality_total_addition_30
inputs['highclay_highquality']['warming_2_labile_addition']=inputs_highquality_labile_addition
inputs['highclay_highquality']['litter_removal']=inputs_highquality_litter_removal
inputs['higherclay_highquality']['control']=inputs_highquality
inputs['higherclay_highquality']['warming_2']=inputs_highquality
inputs['higherclay_highquality']['warming_5']=inputs_highquality
inputs['higherclay_highquality']['labile_addition']=inputs_highquality_labile_addition
inputs['higherclay_highquality']['total_addition_30']=inputs_highquality_total_addition_30
inputs['higherclay_highquality']['total_addition_100']=inputs_highquality_total_addition_100
inputs['higherclay_highquality']['warming_2_total_addition_30']=inputs_highquality_total_addition_30
inputs['higherclay_highquality']['warming_2_labile_addition']=inputs_highquality_labile_addition
inputs['higherclay_highquality']['litter_removal']=inputs_highquality_litter_removal
inputs['lowclay_lowquality']['control']=inputs_lowquality
inputs['lowclay_lowquality']['warming_2']=inputs_lowquality
inputs['lowclay_lowquality']['warming_5']=inputs_lowquality
inputs['lowclay_lowquality']['labile_addition']=inputs_lowquality_labile_addition
inputs['lowclay_lowquality']['total_addition_30']=inputs_lowquality_total_addition_30
inputs['lowclay_lowquality']['total_addition_100']=inputs_lowquality_total_addition_100
inputs['lowclay_lowquality']['warming_2_total_addition_30']=inputs_lowquality_total_addition_30
inputs['lowclay_lowquality']['warming_2_labile_addition']=inputs_lowquality_labile_addition
inputs['lowclay_lowquality']['litter_removal']=inputs_lowquality_litter_removal
inputs['highclay_lowquality']['control']=inputs_lowquality
inputs['highclay_lowquality']['warming_2']=inputs_lowquality
inputs['highclay_lowquality']['warming_5']=inputs_lowquality
inputs['highclay_lowquality']['labile_addition']=inputs_lowquality_labile_addition
inputs['highclay_lowquality']['total_addition_30']=inputs_lowquality_total_addition_30
inputs['highclay_lowquality']['total_addition_100']=inputs_lowquality_total_addition_100
inputs['highclay_lowquality']['warming_2_total_addition_30']=inputs_lowquality_total_addition_30
inputs['highclay_lowquality']['warming_2_labile_addition']=inputs_lowquality_labile_addition
inputs['highclay_lowquality']['litter_removal']=inputs_lowquality_litter_removal
inputs['higherclay_lowquality']['control']=inputs_lowquality
inputs['higherclay_lowquality']['warming_2']=inputs_lowquality
inputs['higherclay_lowquality']['warming_5']=inputs_lowquality
inputs['higherclay_lowquality']['labile_addition']=inputs_lowquality_labile_addition
inputs['higherclay_lowquality']['total_addition_30']=inputs_lowquality_total_addition_30
inputs['higherclay_lowquality']['total_addition_100']=inputs_lowquality_total_addition_100
inputs['higherclay_lowquality']['warming_2_total_addition_30']=inputs_lowquality_total_addition_30
inputs['higherclay_lowquality']['warming_2_labile_addition']=inputs_lowquality_labile_addition
inputs['higherclay_lowquality']['litter_removal']=inputs_lowquality_litter_removal
# Run model for all configs and sims
for conf in configs:
for sim in sims:
print 'Starting ',conf,sim
for step in xrange(nsteps):
if (step*dt)%10 ==0:
print 'Year: %d'% (step*dt)
output=cohorts[conf][sim].update(T[sim][step],theta[step],dt)
cohorts[conf][sim].check_validity()
outputs[conf][sim]['unprotectedC'][step,:]=cohorts[conf][sim].litterC
outputs[conf][sim]['protectedC'][step]=cohorts[conf][sim].protectedC.sum()
outputs[conf][sim]['microbeC'][step]=cohorts[conf][sim].livingMicrobeC
outputs[conf][sim]['CO2'][step]=cohorts[conf][sim].CO2
cohorts[conf][sim].add_carbon(inputs[conf][sim][step,:]*dt)
t=arange(nsteps)/365.0
plotstyles={
'control':'-',
'warming_2':'--',
'warming_5':'--',
'labile_addition':':',
'total_addition_30':'-.',
'total_addition_100':'-.',
'warming_2_total_addition_30':'-.',
'warming_2_labile_addition':':',
'litter_removal':'-.'
}
plotcolors={
'control':'k',
'warming_2':[0.8,0.5,0.0],
'warming_5':[1.0,0.0,0.0],
'labile_addition':'g',
'total_addition_30':[0.0,0.8,0.0],
'total_addition_100':[0.0,0.7,0.6],
'warming_2_total_addition_30':'m',
'warming_2_labile_addition':'m',
'litter_removal':'b'
}
plotlegends={
'control':'Control',
'warming_2':'+2$^\circ$ C',
'warming_5':'+5$^\circ$ C',
'labile_addition':'+30% Labile',
'total_addition_30':'+30% Total',
'total_addition_100':'+100% Total',
'warming_2_total_addition_30':r'+30% total, +2$^\circ$ C',
'warming_2_labile_addition':r'+30% labile, +2$^\circ$ C',
'litter_removal':'No litter'
}
def plot_results(mode='total'):
for plotnum,conf in enumerate(configs):
subplot(2,3,plotnum+1)
for sim in sims:
if mode=='total':
y=outputs[conf][sim]['microbeC']+\
outputs[conf][sim]['unprotectedC'].sum(axis=1)+\
outputs[conf][sim]['protectedC'].sum(axis=1)
elif mode=='protected':
y=outputs[conf][sim]['protectedC'].sum(axis=1)
elif mode=='unprotected':
y=outputs[conf][sim]['unprotectedC'].sum(axis=1)
else:
raise ValueError('Invalid plotting mode')
h=plot(t,y,c=plotcolors[sim],ls=plotstyles[sim],label=plotlegends[sim],lw=2.0)
title(conf.replace('_',', ').title()+' litter')
ylabel('Carbon pools (kgC/m$^2$)')
xlabel('Time (years)')
if plotnum==2:
leg=legend(fontsize='small',loc='best')
leg.get_frame().set_alpha(0.5)
subplots_adjust(hspace=0.25)
figtext(0.5,0.95,mode.capitalize()+' C',fontsize=20,ha='center')
draw()
figure(1);clf()
plot_results('total')
figure(2);clf()
plot_results('protected')
figure(3);clf()
plot_results('unprotected')
show()
def flatten_output(output):
t=arange(len(output['CO2']))
df=pandas.DataFrame(index=t)
df['unprotectedC_fast']=output['unprotectedC'][:,0]
df['unprotectedC_slow']=output['unprotectedC'][:,1]
df['unprotectedC_deadmic']=output['unprotectedC'][:,2]
df['protectedC']=output['protectedC'].sum(axis=1)
df['CO2']=output['CO2']
df['microbeC']=output['microbeC']
return df
def flatten_output_all(output):
df_out=pandas.DataFrame()
for conf in output.keys():
for expt in output[conf].keys():
df=flatten_output(output[conf][expt])
clay,quality=conf.split('_')
df['clay']=clay
df['quality']=quality
df['Experiment']=expt
df['Day']=arange(len(df))
df_out=pandas.concat([df_out,df])
df_out.set_index(['Day','clay','quality','Experiment'],inplace=True)
df_out.sort_index(inplace=True)
return df_out
def save_output_csv(output,directory=None):
for conf in output.keys():
for expt in output[conf].keys():
df=flatten_output(output[conf][expt])
fname='CORPSE-output-%s-%s.csv'%(conf,expt)
if directory is not None:
import os.path
fname=os.path.join(directory,fname)
df.to_csv(fname,index_label='Day')
def save_output_pickle(output,directory=None):
df=flatten_output_all(output)
fname='CORPSE-output-python-dataframe.pik'
if directory is not None:
import os.path
fname=os.path.join(directory,fname)
df.to_pickle(fname)
def steady_state_protected(protected,output):
return (output['protected_produced']/dt)/(output['protected_turnover_rate']/protected)
if do_spinup:
for conf in configs:
sim='control'
total=outputs[conf][sim]['microbeC']+outputs[conf][sim]['unprotectedC'].sum(axis=1)+outputs[conf][sim]['protectedC'].sum(axis=1)
print conf
print 'Total C change: %1.3f (%1.2f%%)'%(total[-1]-total[0],(total[-1]-total[0])/total[0]*100)
unprot=outputs[conf][sim]['unprotectedC'].sum(axis=1)
print 'Unprotected C change: %1.3f (%1.2f%%)'%(unprot[-1]-unprot[0],(unprot[-1]-unprot[0])/unprot[0]*100)
prot=outputs[conf][sim]['protectedC'].sum(axis=1)
print 'Protected C change: %1.3f (%1.2f%%)'%(prot[-1]-prot[0],(prot[-1]-prot[0])/prot[0]*100)
print 'To make spun up cohorts:'
for conf in configs:
sim='control'
c=cohorts[conf][sim].copy()
o=c.update(temperature[-1],theta[-1],dt)
p=steady_state_protected(c.protectedC,o)
s='soil_carbon_cohort(litterC=[%1.4g,%1.4g,%1.4g], protectedC=[%1.4g,%1.4g,%1.4g], livingMicrobeC=%1.4g)'\
%(c.litterC[0],c.litterC[1],c.litterC[2],
p[0],p[1],p[2],
c.livingMicrobeC)
print 'c_%s = CORPSE.%s'%(conf,s)
| StarcoderdataPython |
383788 | """Observationally-based results and scaling relations
"""
import numpy as np
__all__ = ["lbol_from_5100ang_runnoe2012", "lbol_from_3000ang_runnoe2012",
"lbol_from_1450ang_runnoe2012", "lbol_from_2to10kev_all_runnoe2012",
"lbol_from_2to10kev_RL_runnoe2012", "lbol_from_2to10kev_RQ_runnoe2012"]
def _lband_to_lbol__pow_law(lam_lum_lam, alpha, beta, lum0=1.0, fiso=1.0):
"""
log(L_iso) = alpha + beta * log10(lambda * L_lambda / lum0)
L_bol = fiso * L_iso
"""
liso = alpha + beta*np.log10(lam_lum_lam / lum0)
lbol = fiso * (10**liso)
return lbol
def _dist_pars(arg, num):
"""If `arg` is tuple (2,), draw `num` points from normal distribution with those parameters.
"""
if isinstance(arg, tuple):
if len(arg) != 2:
raise ValueError("`arg` must be a tuple of (mean, std)!")
arg = np.random.normal(*arg, size=num)
return arg
def _lbol_to_lband__pow_law(lbol, alpha, beta, lum0=1.0, fiso=1.0):
"""Returns lambda*L_lambda
log(L_iso) = alpha + beta * log10(lambda * L_lambda / lum0)
L_iso = L_bol / fiso
"""
liso_log = np.log10(lbol/fiso)
num = np.size(lbol)
alpha = _dist_pars(alpha, num)
beta = _dist_pars(beta, num)
lam_lum_lam = lum0 * np.power(10, (liso_log - alpha)/beta)
return lam_lum_lam
# Runnoe+2012 [1201.5155] - Updating quasar bolometric luminosity corrections
# https://ui.adsabs.harvard.edu/abs/2012MNRAS.422..478R/abstract
# ------------------------------------------------------------------------------------------------
def lbol_from_5100ang_runnoe2012(lam_lum_lam, scatter=False):
"""
Runnoe+2012 [1201.5155]
Eq.11 & 13
log(Liso) = (4.89 ± 1.66) + (0.91 ± 0.04) log(5100, L5100), [Eq.11]
"""
alpha = (4.89, 1.66)
beta = (0.91, 0.04)
if not scatter:
alpha = alpha[0]
beta = beta[0]
lbol = _lband_to_lbol__pow_law(lam_lum_lam, alpha, beta, lum0=1.0, fiso=0.75)
return lbol
def lum5100_from_lbol_runnoe2012(lbol, scatter=False):
"""
Runnoe+2012 [1201.5155]
Eq.11 & 13
log(Liso) = (4.89 ± 1.66) + (0.91 ± 0.04) log(5100, L5100), [Eq.11]
"""
alpha = (4.89, 1.66)
beta = (0.91, 0.04)
if not scatter:
alpha = alpha[0]
beta = beta[0]
lam_lum_lam = _lbol_to_lband__pow_law(lbol, alpha, beta, lum0=1.0, fiso=0.75)
return lam_lum_lam
def lum3000_from_lbol_runnoe2012(lbol):
"""
Runnoe+2012 [1201.5155]
Eq.10 & 13
log(Liso) = (1.85 ± 1.27) + (0.98 ± 0.03) log(3000L3000).
"""
lam_lum_lam = _lbol_to_lband__pow_law(lbol, 1.85, 0.98, lum0=1.0, fiso=0.75)
return lam_lum_lam
def lum1450_from_lbol_runnoe2012(lbol):
"""
Runnoe+2012 [1201.5155]
Eq.9 & 13
log(Liso) = (4.74 ± 1.00) + (0.91 ± 0.02) log(1450L1450).
"""
lam_lum_lam = _lbol_to_lband__pow_law(lbol, 4.74, 0.91, lum0=1.0, fiso=0.75)
return lam_lum_lam
def lbol_from_3000ang_runnoe2012(lam_lum_lam):
"""
Runnoe+2012 [1201.5155]
Eq.10 & 13
log(Liso) = (1.85 ± 1.27) + (0.98 ± 0.03) log(3000L3000).
"""
lbol = _lband_to_lbol__pow_law(lam_lum_lam, 1.85, 0.98, lum0=1.0, fiso=0.75)
return lbol
def lbol_from_1450ang_runnoe2012(lam_lum_lam):
"""
Runnoe+2012 [1201.5155]
Eq.9 & 13
log(Liso) = (4.74 ± 1.00) + (0.91 ± 0.02) log(1450L1450).
"""
lbol = _lband_to_lbol__pow_law(lam_lum_lam, 4.74, 0.91, lum0=1.0, fiso=0.75)
return lbol
def lbol_from_2to10kev_all_runnoe2012(lam_lum_lam):
"""
Bolometric correction from X-Ray (2–10keV) full sample of quasars
Runnoe+2012 [1201.5155]
Table 5
log (Liso) = (25.14 ± 01.93) + (0.47 ± 0.043) log (L2−−10 keV),
"""
lbol = _lband_to_lbol__pow_law(lam_lum_lam, 25.14, 0.47, lum0=1.0, fiso=0.75)
return lbol
def lbol_from_2to10kev_RL_runnoe2012(lam_lum_lam):
"""
Bolometric correction from X-Ray (2–10keV) radio-loud quasars
Runnoe+2012 [1201.5155]
Eq.14
log(Liso,RL) = (23.04 ± 03.60) + (0.52 ± 0.080) log(L2–10 keV),
"""
lbol = _lband_to_lbol__pow_law(lam_lum_lam, 23.04, 0.52, lum0=1.0, fiso=0.75)
return lbol
def lbol_from_2to10kev_RQ_runnoe2012(lam_lum_lam):
"""
Bolometric correction from X-Ray (2–10keV) radio-quiet quasars
Runnoe+2012 [1201.5155]
Eq.15
log(Liso,RQ) = (33.06 ± 03.17) + (0.29 ± 0.072) log(L2–10 keV).
"""
lbol = _lband_to_lbol__pow_law(lam_lum_lam, 33.06, 0.29, lum0=1.0, fiso=0.75)
return lbol
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.