content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
"""
Playground for downloading GenBank files, wrapping in bowtie2, sCLIP/SOR prep
"""
from Bio import Entrez
import csv
import re
import requests
import subprocess
import os
from Bio import SeqIO
import shutil
from urllib.error import HTTPError
import time
import inv_config as config
# TODO Remove .gbk files at the end that don't have SOR/sCLIP to save disc space?
# my ncbi info - please don't share haha. You can get your own easily by logging in to NCBI and requesting an API Key
Entrez.email = config.email
Entrez.api_key = config.api_key
def ascp(accession_num, save_path=os.getcwd()):
url = 'https://www.ebi.ac.uk/ena/data/warehouse/filereport?accession={0}&result=read_run&fields=fastq_ftp'.format(
accession_num)
# Send the request to ENA
print("Requesting ENA for FASTQ FTP link for accession run {0}...".format(accession_num))
r = requests.get(url)
# from the text of the request, grab the fastq link(s)
fastq_finder = re.compile('ftp.*?fastq.gz')
print("FASTQ FTP links found:")
fastq_links = fastq_finder.findall(r.text)
if len(fastq_links) < 2:
print("Insufficient links found! Please check accession number or if the accession has submitted FASTQ files.")
return False
for link in fastq_links:
print(link)
# Alright, now for each link, build an ascp command
# Modify as needed, but should be default
ascp_openssh_file = config.ascp_ssh_key
print("Retrieving files by ascp...")
for link in fastq_links:
# build the ASCP file path
ascp_path = 'era-fasp@fasp.sra.ebi.ac.uk:/' + link[18:]
# build the ASCP command
cmd = 'ascp -QT -l300M -P33001 -i "{0}" {1} {2}'.format(ascp_openssh_file, ascp_path, save_path)
# subprocess
try:
subprocess.run(cmd, shell=True)
except subprocess.CalledProcessError as err:
print("Error:", err)
return True
# uses bowtie2-build to make a reference index
def bowtie2_build(ref, ind):
subprocess.run(['bowtie2-build', ref, ind], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
return
# align using bowtie2
# note bowtie2 uses stderr for output, oddly enough
# Use local mode to capture clipping events
def bowtie2(ind, fq1, fq2, sam, use_threads=4):
# Very rarely, there is a poorly formatted FASTQ file that catches. Return a fail.
try:
subprocess.run(['bowtie2', '-x', ind, '-1', fq1, '-2', fq2, '-S', sam, '-p', str(use_threads), '--local'],
check=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError as e:
print("Bowtie2 error! {0}".format(e))
return False
def get_file_len(a):
with open(a, 'r') as b:
lines = len(b.readlines())
return lines
# get SOR files using awk
def dump_sor(reads, outfile):
headers = 'colors,POS,CIGAR,TLEN\n'
# CMD has been modified to exclude zero-based TLEN
cmd = "awk 'BEGIN { OFS = \",\" } $2 ~ /113|177|65|129/ $9 !~ /0/ {print $2, $4, $6, $9}'"
with open(outfile, 'w') as o:
# add headers
o.write(headers)
o.flush()
subprocess.run(cmd, stdin=reads, shell=True, stdout=o)
return
# get sCLIP reads using awk
def dump_sclip(reads, outfile):
headers = 'colors,POS,CIGAR,TLEN\n'
cmd = "awk 'BEGIN {OFS=\",\"} ($2 ~ /147|83/ && $6 ~ /^..?S/) || ($2 ~ /99|163/ && $6 ~ /S$/) {next;} $6 ~ /^..?S/ {print $2, $4, $6, $9 }'"
with open(outfile, 'w') as o:
o.write(headers)
o.flush()
# maybe using rb will allow us to properly read samtools bam
subprocess.run(cmd, stdin=reads, shell=True, stdout=o)
return
# bam, sort, and index using samtools
def bamify(sam_file, bam_file, use_threads=8):
# first, convert sam to bam
print("Convering to BAM...")
subprocess.run(['samtools', 'view', '-u', '-b', sam_file, '-o', 'tmp.bam', '-@', str(use_threads)])
# then, sort the bam file
print("Sorting...")
subprocess.run(['samtools', 'sort', 'tmp.bam', '-o', bam_file, '-@', str(use_threads)],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# now, index the bam file
print("Indexing...")
subprocess.run(['samtools', 'index', bam_file])
os.remove('tmp.bam')
return
# Extract reads by accession
# Keep in memory to pass to awk commands
def extract_reads(acc, bam_file, sor_file, sclip_file, use_threads=8):
print("Extracting reads from {0}...".format(acc))
# Unfortunately, assumes .1 for accession version...could alternatively re-do the grab so it maintains version
with open('tmp.sam', 'w') as o:
subprocess.run(['samtools', 'view', bam_file, acc+'.1', '-@', str(use_threads)], stdout=o, encoding='utf-8')
with open('tmp.sam', 'r') as i:
#print("Extracting SOR reads...")
dump_sor(i, sor_file)
with open('tmp.sam', 'r') as i:
#print("Extracting sCLIP reads...")
dump_sclip(i, sclip_file)
os.remove('tmp.sam')
return
### MAIN ###
# Firstly, load up the data table
data_file = config.data_file
acc_save_path = config.acc_save_path
fasta_save_path = config.fasta_save_path
run_save_path = config.run_save_path
sam_save_path = config.sam_save_path
sor_save_path = config.sor_save_path
sclip_save_path = config.sclip_save_path
script_path = config.script_path
acc_list_path = config.acc_list_path
use_threads = config.use_threads
sor_read_threshold = config.sor_read_threshold
sclip_read_threshold = 100 # Usually not a problem.
max_error = 10 # Timeout error threshold
with open(data_file, 'r') as f:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
my_paths = [acc_save_path, fasta_save_path, run_save_path, sam_save_path, sor_save_path, sclip_save_path]
for path in my_paths:
if not os.path.exists(path):
os.mkdir(path)
acc_with_genes = [] # List of accessions we want to run SOR mapping for
good_acc = [] # List of accession numbers we ultimately want to pass to detect_inversion_clusters
# Now, we want a couple things: The SRA Accession, the Biosample Accession, and the list of NUCCORE Accessions
sra_accession = row['SRA']
biosample_accession = row['Biosample']
nuccore_accessions = row['RefSeq Accessions'].split(',')
print("Now processing: {0}".format(biosample_accession))
# First, we need to bowtie2 the reads and reference together. So let's first grab the reads:
code = ascp(sra_accession, save_path=run_save_path)
if not code:
print("ASCP error! Skipping...")
else:
# Then, let's grab the accessions using Entrez Efetch - we want separate gbk files and a combined FASTA
# Sometimes there are literally too many accessions, and we get an HTTP 414 error. Break it up by 50?
batch_size = 50
for i in range(0, len(nuccore_accessions), batch_size):
end = min(len(nuccore_accessions), i + batch_size)
print('Retrieving gbk records {0} to {1}...'.format(i, end))
current_acc = nuccore_accessions[i:end]
nuccore_acc_query = ','.join(current_acc)
num_attempts = 1
while num_attempts < max_error:
try:
handle = Entrez.efetch(db='nuccore', id=nuccore_acc_query, rettype='gbwithparts', retmode='text')
num_attempts = max_error + 1
except HTTPError as err:
if 500 <= err.code <= 599:
print("Received error from server: {0}".format(err.code))
print("Attempt {0} of {1}".format(num_attempts, max_error))
num_attempts += 1
time.sleep(15)
else:
raise
for record in SeqIO.parse(handle, format='gb'):
name = os.path.join(acc_save_path, record.name + '.gb')
# Verify CDS info. If none, exclude from further analysis
elements = record.features
num_cds = 0
for element in elements:
if element.type == "CDS":
num_cds += 1
if num_cds == 0:
print("No gene data detected for {0}. Removing from analysis...".format(record.name))
else:
acc_with_genes.append(record.name)
with open(name, 'w') as out_handle:
SeqIO.write(record, out_handle, "gb")
handle.close()
print("{0} accessions with gene data detected.".format(len(acc_with_genes)))
print("Retrieving fasta records {0} to {1}...".format(i, end))
fasta_output = os.path.join(fasta_save_path, biosample_accession + '.fasta')
fasta_records = []
with Entrez.efetch(db='nuccore', id=nuccore_acc_query, rettype='fasta', retmode='text') as handle:
for record in SeqIO.parse(handle, format='fasta'):
fasta_records.append(record)
# Now write all the fasta to a single combined reference record.
with open(fasta_output, 'w') as out_handle:
SeqIO.write(fasta_records, out_handle, "fasta")
# Now let's use bowtie2 to align the reads
ref_path = os.path.join(fasta_save_path, biosample_accession + '.fasta')
f1 = os.path.join(run_save_path, sra_accession + '_1.fastq.gz')
f2 = os.path.join(run_save_path, sra_accession + '_2.fastq.gz')
sam_output = os.path.join(sam_save_path, biosample_accession + '.sam')
print("Aligning {0} to read set {1} using bowtie2...".format(ref_path, sra_accession))
bowtie2_build(ref_path, ind='INDEX')
code = bowtie2(ind='INDEX', fq1=f1, fq2=f2, sam=sam_output, use_threads=use_threads)
if not code:
print("Bowtie2 encountered an error! Skipping {0}...".format(biosample_accession))
else:
bam_output = os.path.join(sam_save_path, biosample_accession + '.bam')
# print("Indexing and sorting using SAMtools...")
bamify(sam_output, bam_output, use_threads=use_threads)
# Now, for each accession, let's extract the reads
for acc in acc_with_genes:
sor_file = os.path.join(sor_save_path, acc + '_sor.csv')
sclip_file = os.path.join(sclip_save_path, acc + '_sclip.csv')
# Extract reads for the accession
extract_reads(acc, bam_output, sor_file, sclip_file, use_threads=use_threads)
# Check to make sure the SOR and sCLIP files aren't empty. For ones that aren't add to accessions_list.txt
sor_lines = get_file_len(sor_file)
sclip_lines = get_file_len(sclip_file)
if (sor_lines >= sor_read_threshold) and (sclip_lines >= sclip_read_threshold):
good_acc.append(acc)
else:
print("{0} has insufficient SOR/sCLIP reads! Excluding from analysis. (SOR={1}, sCLIP={2})".format(acc, sor_lines, sclip_lines))
# Create a list of accessions with actual SOR/sCLIP data to feed to detect_inversion_clusters
with open(acc_list_path, 'w') as acc_list:
for acc in good_acc:
acc_list.write(acc+'\n')
# Now, with everything in place, run the detect inversions script, placing output in a Biosample folder
# Also check to make sure SOR and sCLIP have data in them first!
print("Executing detection script...")
subprocess.run(['python3', '{0}'.format(script_path), biosample_accession])
# Now remove all the data we no longer need to save hard disk space.
print("Cleaning SAM/BAM files for {0}...".format(biosample_accession))
shutil.rmtree(sam_save_path, ignore_errors=True)
print("Cleaning Run files for {0}...".format(sra_accession))
shutil.rmtree(run_save_path, ignore_errors=True)
print("Cleaning sCLIP files for {0}...".format(biosample_accession))
shutil.rmtree(sclip_save_path, ignore_errors=True)
print("Cleaning SOR files for {0}...".format(biosample_accession))
shutil.rmtree(sor_save_path, ignore_errors=True)
print("Cleaning Entrez FASTA files for {0}...".format(biosample_accession))
shutil.rmtree(fasta_save_path, ignore_errors=True)
print("Done!")
|
nilq/baby-python
|
python
|
from __future__ import division
import os
import math
import scipy.misc
import numpy as np
import argparse
from glob import glob
from pose_evaluation_utils import mat2euler, dump_pose_seq_TUM
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_dir", type=str, help="path to kitti odometry dataset")
parser.add_argument("--output_dir", type=str, help="path to output pose snippets")
parser.add_argument("--seq_id", type=int, default=9, help="sequence id to generate groundtruth pose snippets")
parser.add_argument("--seq_length", type=int, default=5, help="sequence length of pose snippets")
args = parser.parse_args()
def is_valid_sample(frames, tgt_idx, seq_length):
N = len(frames)
tgt_drive, _ = frames[tgt_idx].split(' ')
max_src_offset = int((seq_length - 1)/2)
min_src_idx = tgt_idx - max_src_offset
max_src_idx = tgt_idx + max_src_offset
if min_src_idx < 0 or max_src_idx >= N:
return False
min_src_drive, _ = frames[min_src_idx].split(' ')
max_src_drive, _ = frames[max_src_idx].split(' ')
if tgt_drive == min_src_drive and tgt_drive == max_src_drive:
return True
return False
def main():
pose_gt_dir = args.dataset_dir + 'poses/'
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
seq_dir = os.path.join(args.dataset_dir, 'sequences', '%.2d' % args.seq_id)
img_dir = os.path.join(seq_dir, 'image_2')
N = len(glob(img_dir + '/*.png'))
test_frames = ['%.2d %.6d' % (args.seq_id, n) for n in range(N)]
with open(args.dataset_dir + 'sequences/%.2d/times.txt' % args.seq_id, 'r') as f:
times = f.readlines()
times = np.array([float(s[:-1]) for s in times])
with open(pose_gt_dir + '%.2d.txt' % args.seq_id, 'r') as f:
poses = f.readlines()
poses_gt = []
for pose in poses:
pose = np.array([float(s) for s in pose[:-1].split(' ')]).reshape((3,4))
rot = np.linalg.inv(pose[:,:3])
tran = -np.dot(rot, pose[:,3].transpose())
rz, ry, rx = mat2euler(rot)
poses_gt.append(tran.tolist() + [rx, ry, rz])
poses_gt = np.array(poses_gt)
max_src_offset = (args.seq_length - 1)//2
for tgt_idx in range(N):
if not is_valid_sample(test_frames, tgt_idx, args.seq_length):
continue
if tgt_idx % 100 == 0:
print('Progress: %d/%d' % (tgt_idx, N))
pred_poses = poses_gt[tgt_idx - max_src_offset:tgt_idx + max_src_offset + 1]
curr_times = times[tgt_idx - max_src_offset:tgt_idx + max_src_offset + 1]
out_file = args.output_dir + '%.6d.txt' % (tgt_idx - max_src_offset)
dump_pose_seq_TUM(out_file, pred_poses, curr_times)
main()
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
with open('timings.txt','r') as inp:
inp.readline()
times = np.loadtxt(inp, delimiter=',')
print(times.shape)
selected = list([0,1,3,8,13,18])
# plt.plot((2+np.array(range(19))),times[:,0],'r^-',label="Best first search algorithm")
# plt.plot((2+np.array(range(19))),times[:,1],'bd-',label="Sequential scan algorithm")
plt.plot((2+np.array(range(19)))[selected],times[:,7][selected],'bd-',label="Locality Sensitive Hashing (99% dist prec)")
# plt.plot((2+np.array(range(19)))[selected],times[:,1][selected],'bd-',label="Sequential scan algorithm")
plt.title("Average Query Time vs Dimension")
plt.xlabel('Dimension')
plt.ylabel('Average Time for 100NN query(in sec)')
# plt.ylim([0,0.01])
plt.legend()
plt.grid()
plt.show()
# import numpy as np
# import matplotlib.pyplot as plt
# with open('timings.txt','r') as inp:
# inp.readline()
# times = np.loadtxt(inp, delimiter=',')
# print(times.shape)
# selected = list([0,1,3,8,13,18])
# # plt.plot((2+np.array(range(19))),times[:,0],'r^-',label="Best first search algorithm")
# # plt.plot((2+np.array(range(19))),times[:,1],'bd-',label="Sequential scan algorithm")
# # plt.plot((2+np.array(range(19)))[selected],(times[:,1]/times[:,7])[selected],'r^-',label="Speedup")
# # plt.plot((2+np.array(range(19)))[selected],times[:,7][selected],'bd-',label="LSH (95% dist_prec)")
# plt.title("Average query time ratio(seq_scan/lsh(99% dist_prec)) vs Dimension")
# plt.xlabel('Dimension')
# plt.ylabel('Ratio of Average times for 100NN query(in sec)')
# # plt.ylim([0,0.01])
# plt.legend()
# plt.grid()
# plt.show()
|
nilq/baby-python
|
python
|
from pessoa import Pessoa
class Aluno(Pessoa):
def __init__(self, rm, turma_id, rg, nome):
super().__init__(rg, nome)
self._rm = rm
self._turma_id = turma_id
self._notas = []
def media(self):
if len(self._notas) > 0:
return sum(self._notas)/len(self._notas)
else:
return None
def insere_nota(self, nota):
self._notas.append(nota)
#to_string --> várias linguagens --> transforma um objeto em uma representação do mesmo em texto
def __str__(self):
return f'RM: {self._rm} - Nome: {self._nome}'
|
nilq/baby-python
|
python
|
import numpy as np
import torch
class FeaturesLinear(torch.nn.Module):
def __init__(self, field_dims, output_dim=1):
super().__init__()
self.fc = torch.nn.Embedding(sum(field_dims), output_dim)
self.bias = torch.nn.Parameter(torch.zeros((output_dim,)))
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]), dtype=np.int64)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return torch.sum(self.fc(x), dim=1) + self.bias
class FeaturesEmbedding(torch.nn.Module):
def __init__(self, field_dims, embed_dim):
super().__init__()
self.embedding = torch.nn.Embedding(sum(field_dims), embed_dim)
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]), dtype=np.int32)
torch.nn.init.xavier_uniform_(self.embedding.weight.data)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return self.embedding(x)
class MultiHotEmbedding(torch.nn.Module):
def __init__(self, multi_hotencoding_size, embed_dim):
super().__init__()
self.embed_dim = embed_dim
self.emb_w = torch.nn.Parameter(torch.zeros([multi_hotencoding_size, embed_dim], dtype=torch.float32))
torch.nn.init.xavier_uniform_(self.emb_w)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, multi_hotencoding_size)``
return (batch_size, embed_dim)
"""
return torch.matmul(x, self.emb_w).reshape(-1, 1, self.embed_dim)
class FactorizationMachine(torch.nn.Module):
def __init__(self, reduce_sum=True):
super().__init__()
self.reduce_sum = reduce_sum
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
square_of_sum = torch.sum(x, dim=1) ** 2
sum_of_square = torch.sum(x ** 2, dim=1)
ix = square_of_sum - sum_of_square
if self.reduce_sum:
ix = torch.sum(ix, dim=1, keepdim=True)
return 0.5 * ix
class MultiLayerPerceptron(torch.nn.Module):
def __init__(self, input_dim, embed_dims, dropout, output_layer=True):
super().__init__()
layers = list()
for embed_dim in embed_dims:
layers.append(torch.nn.Linear(input_dim, embed_dim))
layers.append(torch.nn.BatchNorm1d(embed_dim))
layers.append(torch.nn.ReLU())
layers.append(torch.nn.Dropout(p=dropout))
input_dim = embed_dim
if output_layer:
layers.append(torch.nn.Linear(input_dim, 1))
self.mlp = torch.nn.Sequential(*layers)
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, embed_dim)``
"""
return self.mlp(x)
# class DeepFactorizationMachineModel(torch.nn.Module):
# """
# A pytorch implementation of DeepFM.
# Reference:
# H Guo, et al. DeepFM: A Factorization-Machine based Neural Network for CTR Prediction, 2017.
# """
# def __init__(self, field_dims, embed_dim, mlp_dims, dropout, device):
# super().__init__()
# self.linear = FeaturesLinear(field_dims)
# self.fm = FactorizationMachine(reduce_sum=True)
# self.embedding = FeaturesEmbedding(field_dims, embed_dim)
# self.embed_output_dim = len(field_dims) * embed_dim
# self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
# self.to(device)
# def forward(self, x):
# """
# :param x: Long tensor of size ``(batch_size, num_fields)``
# """
# embed_x = self.embedding(x) # [batch_size, num_fields, emb_size] <-[batch_size, num_fields]
# x = self.linear(x) + self.fm(embed_x) + self.mlp(embed_x.view(-1, self.embed_output_dim))
# return torch.sigmoid(x.squeeze(1))
class DeepFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of DeepFM.
Reference:
H Guo, et al. DeepFM: A Factorization-Machine based Neural Network for CTR Prediction, 2017.
"""
def __init__(self, field_dims, multi_hot_size, embed_dim, mlp_dims, dropout, device):
super().__init__()
self.linear = FeaturesLinear(field_dims)
self.fm = FactorizationMachine(reduce_sum=True)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.multi_embedding = MultiHotEmbedding(multi_hot_size, embed_dim)
self.embed_output_dim = (len(field_dims) + 1) * embed_dim
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout)
self.to(device)
def forward(self, x, genres):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x) # [batch_size, num_fields, emb_size] <-[batch_size, num_fields]
embed_genres = self.multi_embedding(genres)
embed_x = torch.concat([embed_x, embed_genres], dim=1)
x = self.linear(x) + self.fm(embed_x) + self.mlp(embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
|
nilq/baby-python
|
python
|
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solum base exception handling.
Includes decorator for re-raising Solum-type exceptions.
"""
import collections
import functools
import sys
import uuid
from keystoneclient import exceptions as keystone_exceptions
from oslo_config import cfg
import pecan
import six
import wsme
from solum.common import safe_utils
from solum.openstack.common import excutils
from solum.openstack.common.gettextutils import _
from solum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal')
]
def list_opts():
yield None, exc_log_opts
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions.
It logs the exception as well as optionally sending
it to the notification system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier:
call_dict = safe_utils.getcallargs(f, *args, **kw)
payload = dict(exception=e,
private=dict(args=call_dict)
)
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
OBFUSCATED_MSG = _('Your request could not be handled '
'because of a problem in the server. '
'Error Correlation id is: %s')
def wrap_controller_exception(func, func_server_error, func_client_error):
"""This decorator wraps controllers methods to handle exceptions:
- if an unhandled Exception or a SolumException with an error code >=500
is catched, raise a http 5xx ClientSideError and correlates it with a log
message
- if a SolumException is catched and its error code is <500, raise a http
4xx and logs the excp in debug mode
"""
@functools.wraps(func)
def wrapped(*args, **kw):
try:
return func(*args, **kw)
except Exception as excp:
LOG.error(excp)
http_error_code = 500
if hasattr(excp, 'code'):
http_error_code = excp.code
if http_error_code >= 500:
# log the error message with its associated
# correlation id
log_correlation_id = str(uuid.uuid4())
LOG.error("%s:%s", log_correlation_id, str(excp))
# raise a client error with an obfuscated message
func_server_error(log_correlation_id, http_error_code)
else:
# raise a client error the original message
func_client_error(excp, http_error_code)
return wrapped
def wrap_wsme_controller_exception(func):
"""This decorator wraps wsme controllers to handle exceptions."""
def _func_server_error(log_correlation_id, status_code):
raise wsme.exc.ClientSideError(
six.text_type(OBFUSCATED_MSG % log_correlation_id), status_code)
def _func_client_error(excp, status_code):
raise wsme.exc.ClientSideError(six.text_type(excp), status_code)
return wrap_controller_exception(func,
_func_server_error,
_func_client_error)
def wrap_pecan_controller_exception(func):
"""This decorator wraps pecan controllers to handle exceptions."""
def _func_server_error(log_correlation_id, status_code):
pecan.response.status = status_code
pecan.response.text = six.text_type(OBFUSCATED_MSG %
log_correlation_id)
# message body for errors is just a plain text message
# The following code is functionally equivalent to calling:
#
# pecan.override_template(None, "text/plain")
#
# We do it this way to work around a bug in our unit-test framework
# in which the mocked request object isn't properly mocked in the pecan
# core module (gilbert.plz@oracle.com)
pecan.request.pecan['override_template'] = None
pecan.request.pecan['override_content_type'] = 'text/plain'
def _func_client_error(excp, status_code):
pecan.response.status = status_code
pecan.response.text = six.text_type(excp)
# The following code is functionally equivalent to calling:
#
# pecan.override_template(None, "text/plain")
#
# We do it this way to work around a bug in our unit-test framework
# in which the mocked request object isn't properly mocked in the pecan
# core module (gilbert.plz@oracle.com)
pecan.request.pecan['override_template'] = None
pecan.request.pecan['override_content_type'] = 'text/plain'
return wrap_controller_exception(func,
_func_server_error,
_func_client_error)
def wrap_wsme_pecan_controller_exception(func):
"""Error handling for controllers decorated with wsmeext.pecan.wsexpose:
Controllers wrapped with wsme_pecan.wsexpose don't throw
exceptions but handle them internally. We need to intercept
the response and mask potentially sensitive information.
"""
@functools.wraps(func)
def wrapped(*args, **kw):
ret = func(*args, **kw)
ismapping = isinstance(ret, collections.Mapping)
if (pecan.response.status_code >= 500 and ismapping):
log_correlation_id = str(uuid.uuid4())
LOG.error("%s:%s", log_correlation_id, ret.get("faultstring",
"Unknown Error"))
ret['faultstring'] = six.text_type(OBFUSCATED_MSG %
log_correlation_id)
return ret
return wrapped
def wrap_keystone_exception(func):
"""Wrap keystone exceptions and throw Solum specific exceptions."""
@functools.wraps(func)
def wrapped(*args, **kw):
try:
return func(*args, **kw)
except keystone_exceptions.AuthorizationFailure:
raise AuthorizationFailure(
client=func.__name__, message="reason: %s" % sys.exc_info()[1])
except keystone_exceptions.ClientException:
raise AuthorizationFailure(
client=func.__name__,
message="unexpected keystone client error occurred: %s"
% sys.exc_info()[1])
return wrapped
@six.python_2_unicode_compatible
class SolumException(Exception):
"""Base Solum Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
def __init__(self, **kwargs):
self.kwargs = kwargs
if CONF.fatal_exception_format_errors:
assert isinstance(self.msg_fmt, six.text_type)
try:
self.message = self.msg_fmt % kwargs
except KeyError:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'),
extra=dict(
private=dict(
msg=self.msg_fmt,
args=kwargs
)
)
)
if CONF.fatal_exception_format_errors:
raise
def __str__(self):
return self.message
class ResourceLimitExceeded(SolumException):
msg_fmt = _("Resource limit exceeded. Reason: %(reason)s")
class BadRequest(SolumException):
msg_fmt = _("The request is malformed. Reason: %(reason)s")
code = 400
class ObjectNotFound(SolumException):
msg_fmt = _("The %(name)s %(id)s could not be found.")
class ObjectNotUnique(SolumException):
msg_fmt = _("The %(name)s already exists.")
class RequestForbidden(SolumException):
msg_fmt = _("The request is forbidden. Reason: %(reason)s")
code = 403
class ResourceNotFound(ObjectNotFound):
msg_fmt = _("The %(name)s resource %(id)s could not be found.")
code = 404
class ResourceExists(ObjectNotUnique):
msg_fmt = _("The %(name)s resource already exists.")
code = 409
class ResourceStillReferenced(SolumException):
msg_fmt = _("The %(name)s resource cannot be deleted because one or more"
" resources reference it.")
code = 409
class UnsupportedMediaType(SolumException):
msg_fmt = _("\'%(name)s\' is not a supported media type for the %(method)s"
" method of this resource")
code = 415
class Unprocessable(SolumException):
msg_fmt = _("Server is incapable of processing the specified request.")
code = 422
class PlanStillReferenced(ResourceStillReferenced):
msg_fmt = _("Plan %(name)s cannot be deleted because one or more"
" Assemblies reference it.")
class LPStillReferenced(ResourceStillReferenced):
msg_fmt = _("Languagepack %(name)s cannot be deleted because one or more"
" applications reference it.")
class NotImplemented(SolumException):
msg_fmt = _("The requested operation is not implemented.")
code = 501
class AuthorizationFailure(SolumException):
msg_fmt = _("%(client)s connection failed. %(message)s")
class InvalidObjectSizeError(Exception):
msg_fmt = _("Invalid object size.")
class MaxRetryReached(Exception):
msg_fmt = _("Maximum retries has been reached.")
|
nilq/baby-python
|
python
|
import fileReader
import inputHandler
import fileHandler
import os
inputStuff = inputHandler.inputHandler()
fileStuff = fileHandler.FileHandler()
def getMode():
mode = inputStuff.determineAutoOrManual()
if mode == 'man':
getFileInfo()
loadAudioFile()
elif mode =='auto':
fileProcesser = fileReader.FileReader()
fileProcesser.processFile(inputStuff.getFileName())
def getFileInfo():
filename = inputStuff.getFileName()
if os.path.isfile(filename):
fileStuff.setFileName(filename)
else:
print('file not found, please try again.')
getFileInfo()
def loadAudioFile():
try:
fileStuff.loadFile()
except Exception:
print('error: not an audio file')
getFileInfo()
fileStuff.splitFile(inputStuff.getSplitTimes())
getMode()
|
nilq/baby-python
|
python
|
from pybullet_utils import bullet_client
import math
class QuadrupedPoseInterpolator(object):
def __init__(self):
pass
def ComputeLinVel(self,posStart, posEnd, deltaTime):
vel = [(posEnd[0]-posStart[0])/deltaTime,(posEnd[1]-posStart[1])/deltaTime,(posEnd[2]-posStart[2])/deltaTime]
return vel
def ComputeAngVel(self,ornStart, ornEnd, deltaTime, bullet_client):
dorn = bullet_client.getDifferenceQuaternion(ornStart,ornEnd)
axis,angle = bullet_client.getAxisAngleFromQuaternion(dorn)
angVel = [(axis[0]*angle)/deltaTime,(axis[1]*angle)/deltaTime,(axis[2]*angle)/deltaTime]
return angVel
def ComputeAngVelRel(self,ornStart, ornEnd, deltaTime, bullet_client):
ornStartConjugate = [-ornStart[0],-ornStart[1],-ornStart[2],ornStart[3]]
pos_diff, q_diff =bullet_client.multiplyTransforms([0,0,0], ornStartConjugate, [0,0,0], ornEnd)
axis,angle = bullet_client.getAxisAngleFromQuaternion(q_diff)
angVel = [(axis[0]*angle)/deltaTime,(axis[1]*angle)/deltaTime,(axis[2]*angle)/deltaTime]
return angVel
def Slerp(self, frameFraction, frameData, frameDataNext,bullet_client ):
keyFrameDuration = frameData[0]
basePos1Start = [frameData[1],frameData[2],frameData[3]]
basePos1End = [frameDataNext[1],frameDataNext[2],frameDataNext[3]]
self._basePos = [basePos1Start[0]+frameFraction*(basePos1End[0]-basePos1Start[0]),
basePos1Start[1]+frameFraction*(basePos1End[1]-basePos1Start[1]),
basePos1Start[2]+frameFraction*(basePos1End[2]-basePos1Start[2])]
self._baseLinVel = self.ComputeLinVel(basePos1Start,basePos1End, keyFrameDuration)
baseOrn1Start = [frameData[5],frameData[6], frameData[7],frameData[4]]
baseOrn1Next = [frameDataNext[5],frameDataNext[6], frameDataNext[7],frameDataNext[4]]
self._baseOrn = bullet_client.getQuaternionSlerp(baseOrn1Start,baseOrn1Next,frameFraction)
self._baseAngVel = self.ComputeAngVel(baseOrn1Start,baseOrn1Next, keyFrameDuration, bullet_client)
jointPositions=[]
jointVelocities=[]
for j in range (12):
index=j+8
jointPosStart=frameData[index]
jointPosEnd=frameDataNext[index]
jointPos=jointPosStart+frameFraction*(jointPosEnd-jointPosStart)
jointVel=(jointPosEnd-jointPosStart)/keyFrameDuration
jointPositions.append(jointPos)
jointVelocities.append(jointVel)
self._jointPositions = jointPositions
self._jointVelocities = jointVelocities
return jointPositions,jointVelocities
|
nilq/baby-python
|
python
|
from typing import List
from extraction.event_schema import EventSchema
from extraction.predict_parser.predict_parser import Metric
from extraction.predict_parser.tree_predict_parser import TreePredictParser
decoding_format_dict = {
'tree': TreePredictParser,
'treespan': TreePredictParser,
}
def get_predict_parser(format_name):
return decoding_format_dict[format_name]
def eval_pred(predict_parser, gold_list, pred_list, text_list=None, raw_list=None):
well_formed_list, counter = predict_parser.decode(
gold_list, pred_list, text_list, raw_list)
relation_metric = Metric()
for instance in well_formed_list:
relation_metric.count_instance(instance['gold_relation'],
instance['pred_relation'],
verbose=False)
role_result = relation_metric.compute_f1(prefix='relation-')
result = dict()
result.update(role_result)
result.update(counter)
return result
def eval_pred_with_decoding(gold_list, pred_list, text_list=None, raw_list=None):
relation_metric = Metric()
relation_metric.count_instance(gold_list, pred_list,verbose= False)
role_result = relation_metric.compute_f1(prefix='relation-')
result = dict()
result.update(role_result)
return result
def get_extract_metrics(pred_lns: List[str], tgt_lns: List[str], label_constraint: EventSchema, decoding_format='tree'):
# predict_parser is the TreePredictParser, because decoding_format is tree
predict_parser = get_predict_parser(format_name=decoding_format)(label_constraint=label_constraint)
return eval_pred_with_decoding(
gold_list=tgt_lns,
pred_list=pred_lns
)
|
nilq/baby-python
|
python
|
import os
filename = os.path.dirname(__file__) + "\\input"
with open(filename) as file:
x = []
start = 0
for line in file:
text = list(line.rstrip())
if start == 0:
x = [0] * len(text)
i = 0
for t in text:
if t == '1':
x[i] += 1
i += 1
start += 1
gamma_nums = []
epsilon_nums = []
for y in x:
if y > start / 2:
gamma_nums.append(1)
epsilon_nums.append(0)
else:
gamma_nums.append(0)
epsilon_nums.append(1)
gamma = [str(v) for v in gamma_nums]
epsilon = [str(r) for r in epsilon_nums]
g = int(''.join(gamma), 2)
e = int(''.join(epsilon), 2)
print(g * e)
|
nilq/baby-python
|
python
|
def solve(input):
ans = 0
for g in input.split("\n\n"):
b = 0
for c in g:
if c.isalpha():
b |= 1 << (ord(c) - ord("a"))
ans += bin(b).count("1")
return ans
|
nilq/baby-python
|
python
|
# https://www.hackerrank.com/challenges/three-month-preparation-kit-jack-goes-to-rapture/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getCost' function below.
#
# The function accepts WEIGHTED_INTEGER_GRAPH g as parameter.
#
#
# For the weighted graph, <name>:
#
# 1. The number of nodes is <name>_nodes.
# 2. The number of edges is <name>_edges.
# 3. An edge exists between <name>_from[i] and <name>_to[i]. The weight of the edge is <name>_weight[i].
#
#
def getCost(n, g_from, g_to, g_weight):
parent = [-1]*n
def find(n):
if parent[n]<0: return n
p = find(parent[n])
parent[n] = p
return p
edges = []
for z in range(len(g_from)):
a, b, c = g_from[z], g_to[z], g_weight[z]
a,b = a-1,b-1
edges.append((c, a, b))
edges.sort()
if(find(0)==find(n-1)): return 0
else:
for c,a,b in edges:
a = find(a)
b = find(b)
if(a!=b):
if(parent[a]==parent[b]): parent[b] -= 1
if(parent[a]>parent[b]): parent[a] = b
if(parent[a]<parent[b]): parent[b] = a
if(find(0)==find(n-1)): return c
else: return 'NO PATH EXISTS'
if __name__ == '__main__':
g_nodes, g_edges = map(int, input().rstrip().split())
g_from = [0] * g_edges
g_to = [0] * g_edges
g_weight = [0] * g_edges
for i in range(g_edges):
g_from[i], g_to[i], g_weight[i] = map(int, input().rstrip().split())
print(getCost(g_nodes, g_from, g_to, g_weight))
|
nilq/baby-python
|
python
|
from typing import List, Callable, Optional, Dict, Set
from tqdm import tqdm
from src.data.objects.frame import Frame
from src.data.objects.stack import Stack
from src.data.readers.annotation_reader import AnnotationLoader
class LineModifiedClassifier:
def __init__(self, user_ids: Set[int], annotation_loader: AnnotationLoader,
weight_fn: Callable[[int, int], float], top_k_frames: Optional[int] = None):
self._user_ids = user_ids
self._annotation_loader = annotation_loader
self._weight_fn = weight_fn
self._top_k_frames = top_k_frames
def _frame_scores(self, frame: Frame, stack_ts: int) -> Dict[int, float]:
scores = dict.fromkeys(self._user_ids, 0)
frame = frame.raw_frame
annotation = self._annotation_loader(frame.commit_hash, frame.file_name)
if annotation and frame.line_num and frame.line_num - 1 < len(annotation):
line_author = annotation.author[frame.line_num - 1]
line_ts = annotation.ts[frame.line_num - 1]
if line_author in self._user_ids and line_ts <= stack_ts:
scores[line_author] += self._weight_fn(stack_ts, line_ts)
return scores
def _stack_scores(self, stack: Stack) -> Dict[int, float]:
user_scores = dict.fromkeys(self._user_ids, 0)
frames = stack.frames[:self._top_k_frames]
for frame in frames:
frame_scores = self._frame_scores(frame, stack.ts)
for user_id in self._user_ids:
user_scores[user_id] += frame_scores[user_id]
return user_scores
def predict(self, stacks: List[Stack]) -> List[Dict[int, float]]:
return [self._stack_scores(stack) for stack in tqdm(stacks)]
|
nilq/baby-python
|
python
|
from scrapy import cmdline
name = 'douban_movie_top250'
cmd = 'scrapy crawl {}'.format(name)
cmdline.execute(cmd.split())
|
nilq/baby-python
|
python
|
#you += hash(pubkey || index) to both the private scalar and public point
#<tacotime> [02:35:38] so to get priv_i and pub_i
#<tacotime> [02:36:06] priv_i = (priv + hash) mod N
#<tacotime> [02:37:17] pub_i = (pub + scalarbasemult(hash))
import MiniNero
import PaperWallet
sk, vk, pk, pvk, addr, wl, cks = PaperWallet.keysBoth()
print("making keychain")
for i in range(1, 600):
index = MiniNero.intToHex(i)
has = MiniNero.cn_fast_hash(pk + index)
sk1 = MiniNero.sc_add_keys(sk, has)
pk1 = MiniNero.addKeys(pk, MiniNero.scalarmultBase(has))
pk1_check = MiniNero.publicFromSecret(sk1)
print("Check", pk1== pk1_check)
print(sk1)
#print("i, sk, pk", i, sk1, pk1)
|
nilq/baby-python
|
python
|
class OperationFailed(Exception):
pass
class ValidationFailed(Exception):
pass
|
nilq/baby-python
|
python
|
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython
"""
def suma(a: float, b: float) -> float:
return a + b
def resta(a: float, b: float) -> float:
return a - b
def multiplicacion(a: float, b: float) -> float:
return a * b
def division(a: float, b: float) -> float:
if b <= 0:
raise ZeroDivisionError
return a / b
|
nilq/baby-python
|
python
|
class ArtistCollection():
"""
Matplotlib collections can't handle Text.
This is a barebones collection for text objects
that supports removing and making (in)visible
"""
def __init__(self, artistlist):
"""
Pass in a list of matplotlib.text.Text objects
(or possibly any matplotlib Artist will work)
"""
self.artistlist = artistlist
def remove(self):
for T in self.artistlist:
T.remove()
def add_to_axes(self, ax):
for T in self.artistlist:
ax.add_artist(T)
def get_visible(self):
visible = True
for T in self.artistlist:
if not T.get_visible():
visible = False
return visible
def set_visible(self, visible=True):
for T in self.artistlist:
T.set_visible(visible)
|
nilq/baby-python
|
python
|
from qemuvolume import QEMUVolume
from ..tools import log_check_call
class VirtualHardDisk(QEMUVolume):
extension = 'vhd'
qemu_format = 'vpc'
ovf_uri = 'http://go.microsoft.com/fwlink/?LinkId=137171'
# Azure requires the image size to be a multiple of 1 MiB.
# VHDs are dynamic by default, so we add the option
# to make the image size fixed (subformat=fixed)
def _before_create(self, e):
self.image_path = e.image_path
vol_size = str(self.size.bytes.get_qty_in('MiB')) + 'M'
log_check_call(['qemu-img', 'create', '-o', 'subformat=fixed', '-f', self.qemu_format, self.image_path, vol_size])
def get_uuid(self):
if not hasattr(self, 'uuid'):
import uuid
self.uuid = uuid.uuid4()
return self.uuid
|
nilq/baby-python
|
python
|
import pytest
import numpy
import os
import spacy
from spacy.matcher import Matcher
from spacy.attrs import ORTH, LOWER, ENT_IOB, ENT_TYPE
from spacy.attrs import ORTH, TAG, LOWER, IS_ALPHA, FLAG63
from spacy.symbols import DATE, LOC
def test_overlap_issue118(EN):
'''Test a bug that arose from having overlapping matches'''
doc = EN.tokenizer(u'how many points did lebron james score against the boston celtics last night')
ORG = doc.vocab.strings['ORG']
matcher = Matcher(EN.vocab,
{'BostonCeltics':
('ORG', {},
[
[{LOWER: 'celtics'}],
[{LOWER: 'boston'}, {LOWER: 'celtics'}],
]
)
}
)
assert len(list(doc.ents)) == 0
matches = [(ent_type, start, end) for ent_id, ent_type, start, end in matcher(doc)]
assert matches == [(ORG, 9, 11), (ORG, 10, 11)]
doc.ents = matches[:1]
ents = list(doc.ents)
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
def test_overlap_issue242():
'''Test overlapping multi-word phrases.'''
patterns = [
[{LOWER: 'food'}, {LOWER: 'safety'}],
[{LOWER: 'safety'}, {LOWER: 'standards'}],
]
if os.environ.get('SPACY_DATA'):
data_dir = os.environ.get('SPACY_DATA')
else:
data_dir = None
nlp = spacy.en.English(path=data_dir, tagger=False, parser=False, entity=False)
nlp.matcher = Matcher(nlp.vocab)
nlp.matcher.add('FOOD', 'FOOD', {}, patterns)
doc = nlp.tokenizer(u'There are different food safety standards in different countries.')
matches = [(ent_type, start, end) for ent_id, ent_type, start, end in nlp.matcher(doc)]
doc.ents += tuple(matches)
food_safety, safety_standards = matches
assert food_safety[1] == 3
assert food_safety[2] == 5
assert safety_standards[1] == 4
assert safety_standards[2] == 6
def test_overlap_reorder(EN):
'''Test order dependence'''
doc = EN.tokenizer(u'how many points did lebron james score against the boston celtics last night')
ORG = doc.vocab.strings['ORG']
matcher = Matcher(EN.vocab,
{'BostonCeltics':
('ORG', {},
[
[{LOWER: 'boston'}, {LOWER: 'celtics'}],
[{LOWER: 'celtics'}],
]
)
}
)
assert len(list(doc.ents)) == 0
matches = [(ent_type, start, end) for ent_id, ent_type, start, end in matcher(doc)]
assert matches == [(ORG, 9, 11), (ORG, 10, 11)]
doc.ents = matches[:1]
ents = list(doc.ents)
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
def test_overlap_prefix(EN):
'''Test order dependence'''
doc = EN.tokenizer(u'how many points did lebron james score against the boston celtics last night')
ORG = doc.vocab.strings['ORG']
matcher = Matcher(EN.vocab,
{'BostonCeltics':
('ORG', {},
[
[{LOWER: 'boston'}],
[{LOWER: 'boston'}, {LOWER: 'celtics'}],
]
)
}
)
assert len(list(doc.ents)) == 0
matches = [(ent_type, start, end) for ent_id, ent_type, start, end in matcher(doc)]
doc.ents = matches[1:]
assert matches == [(ORG, 9, 10), (ORG, 9, 11)]
ents = list(doc.ents)
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
def test_overlap_prefix_reorder(EN):
'''Test order dependence'''
doc = EN.tokenizer(u'how many points did lebron james score against the boston celtics last night')
ORG = doc.vocab.strings['ORG']
matcher = Matcher(EN.vocab,
{'BostonCeltics':
('ORG', {},
[
[{LOWER: 'boston'}, {LOWER: 'celtics'}],
[{LOWER: 'boston'}],
]
)
}
)
assert len(list(doc.ents)) == 0
matches = [(ent_type, start, end) for ent_id, ent_type, start, end in matcher(doc)]
doc.ents += tuple(matches)[1:]
assert matches == [(ORG, 9, 10), (ORG, 9, 11)]
ents = doc.ents
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
# @pytest.mark.models
# def test_ner_interaction(EN):
# EN.matcher.add('LAX_Airport', 'AIRPORT', {}, [[{ORTH: 'LAX'}]])
# EN.matcher.add('SFO_Airport', 'AIRPORT', {}, [[{ORTH: 'SFO'}]])
# doc = EN(u'get me a flight from SFO to LAX leaving 20 December and arriving on January 5th')
# ents = [(ent.label_, ent.text) for ent in doc.ents]
# assert ents[0] == ('AIRPORT', 'SFO')
# assert ents[1] == ('AIRPORT', 'LAX')
# assert ents[2] == ('DATE', '20 December')
# assert ents[3] == ('DATE', 'January 5th')
# @pytest.mark.models
# def test_ner_interaction(EN):
# # ensure that matcher doesn't overwrite annotations set by the NER model
# doc = EN.tokenizer.tokens_from_list(u'get me a flight from SFO to LAX leaving 20 December and arriving on January 5th'.split(' '))
# EN.tagger(doc)
# columns = [ENT_IOB, ENT_TYPE]
# values = numpy.ndarray(shape=(len(doc),len(columns)), dtype='int32')
# # IOB values are 0=missing, 1=I, 2=O, 3=B
# iobs = [2,2,2,2,2,3,2,3,2,3,1,2,2,2,3,1]
# types = [0,0,0,0,0,LOC,0,LOC,0,DATE,DATE,0,0,0,DATE,DATE]
# values[:] = zip(iobs,types)
# doc.from_array(columns,values)
# assert doc[5].ent_type_ == 'LOC'
# assert doc[7].ent_type_ == 'LOC'
# assert doc[9].ent_type_ == 'DATE'
# assert doc[10].ent_type_ == 'DATE'
# assert doc[14].ent_type_ == 'DATE'
# assert doc[15].ent_type_ == 'DATE'
# EN.matcher.add('LAX_Airport', 'AIRPORT', {}, [[{ORTH: 'LAX'}]])
# EN.matcher.add('SFO_Airport', 'AIRPORT', {}, [[{ORTH: 'SFO'}]])
# EN.matcher(doc)
# assert doc[5].ent_type_ != 'AIRPORT'
# assert doc[7].ent_type_ != 'AIRPORT'
# assert doc[5].ent_type_ == 'LOC'
# assert doc[7].ent_type_ == 'LOC'
# assert doc[9].ent_type_ == 'DATE'
# assert doc[10].ent_type_ == 'DATE'
# assert doc[14].ent_type_ == 'DATE'
# assert doc[15].ent_type_ == 'DATE'
|
nilq/baby-python
|
python
|
# 3rd party imports
import stellar_base.utils
from stellar_base.exceptions import *
from stellar_base.keypair import Keypair
from stellar_base.address import Address
STELLAR_MEMO_TEXT_MAX_BYTES = 28
def is_address_valid(address):
"""
Checks if a given Stellar address is valid. It does not check if it exists on the Stellar
network, only if it is correctly formatted.
:param str address: address to be evaluated.
:return: Returns true if the given address is valid and false otherwise.
:rtype: bool
"""
if address is None:
return False
try:
stellar_base.utils.decode_check('account', address)
return True
except DecodeError:
return False
def is_seed_valid(key):
"""
Checks if a given Stellar seed is valid.
:param str key: Seed to be evaluated.
:return: Returns true if the seed is valid and false otherwise.
:rtype: bool
"""
if key is None:
return False
try:
stellar_base.utils.decode_check('seed', key)
return True
except DecodeError:
return False
def is_transaction_text_memo_valid(memo):
"""
Checks if a given Stellar transaction text memo is valid. To be valid the text memo
can only have, at most, 28 bytes.
:param str memo: Text memo to be evaluated.
:return: Returns true if the given text memo is valid and false otherwise.
:rtype: bool
"""
if memo is None:
return False
return False if len(memo) > STELLAR_MEMO_TEXT_MAX_BYTES else True
def is_seed_matching_address(seed, address):
"""
Checks if the specified seed address matches the specified address.
:param str seed: Seed to be evaluated.
:param str address: Address to be evaluated.
:return: Returns true if seed address matches the specified address, and false otherwise.
:rtype: bool
"""
if not is_seed_valid(seed) \
or not is_address_valid(address):
return False
keypair = Keypair.from_seed(seed=seed)
if keypair.address().decode() == address:
return True
return False
def is_account_existent(address):
"""
Checks if a given Stellar address exists in the network. It assumes that the address
parameter received is a valid address string.
:param str address: address to be evaluated.
:return: Returns true if the given address exists in the network and false otherwise.
:rtype: bool
"""
return True if get_address_details_from_network(address) is not None else False
def get_address_details_from_network(address):
"""
Queries the Stellar network regarding the details of the specified account address.
:param str address: address to be evaluated.
:return: In case of success returns a Stellar Address object with the updated address information, fetched from
the Stellar network. In case of failure returns None
:rtype: Address or None
"""
if not is_address_valid(address):
print('Trying to get information of an invalid address.')
return None
try:
address = Address(address=address)
address.get() # Get the latest information from Horizon
except AccountNotExistError:
print('The specified account does not exist.')
return None
except HorizonError:
print('A connection error occurred (Please check your Internet connection).')
return None
return address
|
nilq/baby-python
|
python
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback """
import pytest
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor, ms_function, context
import mindspore.common.dtype as mstype
context.set_context(mode=context.GRAPH_MODE)
class ControlNet(nn.Cell):
def inner_function_1(self, a, b):
return a + b
def inner_function_2(self, a, b):
return a - b
def construct(self, x):
a = Tensor(np.array(4), mstype.int32)
b = Tensor(np.array(5), mstype.int32)
if a + b > x:
return self.inner_function_1(a, b)
return self.inner_function_2(a, b)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_fallback_control_sink_tensor():
"""
Feature: Fallback feature: support define Tensor in Class construct.
Description: Fallback feature: support define Tensor in Class construct.
Expectation: Fallback feature: support define Tensor in Class construct.
"""
x = Tensor(np.array(1), mstype.int32)
net = ControlNet()
output = net(x)
output_expect = Tensor(9, mstype.int32)
assert output == output_expect
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_tensor_list():
"""
Feature: Fallback feature
Description: support Basic method of Tensor list.
Expectation: No exception.
"""
@ms_function
def np_tensor_list():
a = Tensor(np.array(4), mstype.int32)
b = Tensor(np.array(5), mstype.int32)
c = Tensor(np.array(6), mstype.int32)
tensor_list = [a, b]
for tensor in tensor_list:
print(tensor)
tensor_list.append(tensor_list[-1] + c)
return tensor_list
tensor_list = np_tensor_list()
print("tensor_list:", tensor_list)
assert len(tensor_list) == 3
|
nilq/baby-python
|
python
|
"""Converts ECMWF levels into heights"""
import numpy as np
def readLevels(file_name='supra/Supracenter/level_conversion_ECMWF_37.txt', header=2):
""" Gets the conversion of heights from a .txt file, for use with convLevels().
Arguments:
file_name: [string] name of conversion file, .txt
header: [int] number of headers in the .txt file to ignore
Returns:
data: [ndarray] contents of the level conversion file to convert with
"""
with open(file_name) as f:
# Skip the header
for i in range(header):
next(f)
data = np.array([0, 0, 0])
# Parse file contents
for line in f:
# Remove the newline char
line = line.replace('\n', '').replace('\r', '')
# Split the line by the delimiter
line = line.split()
# Strip whitespaces from individual entries in the line
for i, entry in enumerate(line):
line[i] = float(entry.strip())
# Add the contents of the line to the data list
data = np.vstack((data, line))
# First row was all zeroes
data = np.delete(data, 0, 0)
return data
def convLevels(typ=1):
""" HELPER FUCNTION: Converts levels from ECMWF data into geopotential or geometeric heights.
see https://www.ecmwf.int/en/forecasts/documentation-and-support/137-model-levels
The conversion is done with a .txt file with the contents of that table.
Arguments:
typ: [int] 0 - levels to geopotential heights, 1 - levels to geometric heights
Returns:
data: [list] list of converted heights
"""
data = readLevels()
if typ == 0:
# Geopotential Heights
return data[:, 1]
else:
# Geometric Heights
return data[:, 2]
|
nilq/baby-python
|
python
|
#
# Copyright 2018 herd-mdl contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#!/usr/bin/env python
import json
import logging
import boto3
from botocore.exceptions import ClientError
from botocore.vendored import requests
SUCCESS = "SUCCESS"
FAILED = "FAILED"
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s %(levelname)-8s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# Lambda function 'script' which creates/destroys an EC2 Keypair with a user-specified name. It also stores the
# private key material as a 'SecureString' parameter in SSM's parameter store. This is packaged to work with an AWS
# 'CustomResource'. Further reading here: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template
# -custom-resources-lambda.html
# Entry-point of script which is invoked by the Lambda function
def handler(event, context):
logger.info('Request: Event: \n {}'.format(event))
logger.info('Request: Context: \n {}'.format(context))
# Get the keypair name as defined in the Resource properties and convert to lowercase for consistency
keypair_name = construct_keypair_name(event)
keypair_ssm_key_name = str(event['ResourceProperties']['KeypairSsmKeyName'])
physical_resource_id = str(event['LogicalResourceId']) + '-' + keypair_name
# On stack-create, does the following things:
# 1. Checks if a keypair with the specified name already exists, if it does- skips to step #4.
# 2. Creates the keypair with the given name.
# 3. Stores the keypair material in SSM as an encrypted value.
# 4. Signals CloudFormation that the process is complete.
if event['RequestType'] == 'Create':
if not ssm_parameter_exists(keypair_ssm_key_name, event, context, physical_resource_id):
logger.info(
'Attempting to create new SSM parameter to store keypair name: \'{}\''.format(keypair_ssm_key_name))
description = 'keypair name'
put_parameter_in_ssm(keypair_ssm_key_name, description, keypair_name, 'String', event, context,
physical_resource_id)
else:
logger.warning('SSM parameter for key pair name already exists, will not create a new one.')
if not keypair_exists(keypair_name, event, context, physical_resource_id):
logger.info('Attempting to create a new keypair: \{}\''.format(keypair_name))
keypair_material = create_key_pair(keypair_name, event, context,
physical_resource_id)
description = 'private key material'
put_parameter_in_ssm(keypair_name, description, keypair_material, 'SecureString', event, context,
physical_resource_id)
response_data = construct_response_message(
'Created new keypair: \'{}\' and stored in parameter store.'.format(
keypair_name))
send(event, context, SUCCESS, response_data, physical_resource_id)
else:
response_data = construct_response_message(
'Keypair: \'{}\' already exists, nothing to do.'.format(keypair_name))
send(event, context, SUCCESS, response_data, physical_resource_id)
# On stack-delete, do the following things:
# 1. Checks if a keypair with the specified name already exists, if it does- deletes it.
# 2. Checks if an SSM parameter exists with the specified name, if it does- deletes it.
# 3. Signals CloudFormation that the process is complete.
elif event['RequestType'] == 'Delete':
message = ''
if keypair_exists(keypair_name, event, context, physical_resource_id):
logger.info('Attempting to delete the keypair')
delete_key_pair(keypair_name, event, context, physical_resource_id)
message += 'Deleted keypair: \'{}\''.format(keypair_name)
if ssm_parameter_exists(keypair_name, event, context, physical_resource_id):
delete_key_pair_parameter_key(keypair_name, event, context,
physical_resource_id)
message += '\nDeleted parameter with key: \'{}\' from SSM.'.format(
keypair_name)
response_data = construct_response_message(message)
else:
response_data = construct_response_message(
'Keypair: \'{}\' and parameter: \'{}\' do not exist. Nothing '
'to delete.'.format(keypair_name, keypair_name))
send(event, context, SUCCESS, response_data, physical_resource_id)
# On stack-update, does nothing and simply exits.
elif event['RequestType'] == 'Update':
logger.info('Nothing to update')
response_data = construct_response_message('Nothing to update')
send(event, context, SUCCESS, response_data, physical_resource_id)
def construct_keypair_name(event):
delimiter = '_'
app_prefix = 'app'
instance_name = str(event['ResourceProperties']['MDLInstanceName'])
environment = str(event['ResourceProperties']['Environment'])
# lower-case the keypair name for consistency
return delimiter.join([app_prefix, instance_name, environment]).lower()
# Function to check if a keypair exists with the specified name. Returns a Boolean.
def keypair_exists(keypair_name, event, context, physical_resource_id):
logger.info(
'Checking if a keypair already exists with the specified name: \'{}\''.format(
keypair_name))
try:
ec2 = boto3.client('ec2')
response = ec2.describe_key_pairs(
KeyNames=[
keypair_name
]
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200 and len(response['KeyPairs']) == 1:
logger.warning("KeyPair: \'{}\' found.".format(keypair_name))
return True
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidKeyPair.NotFound':
logger.info('KeyPair: \'{}\' not found.'.format(keypair_name))
return False
else:
logger.error('Unexpected error: {}'.format(e))
response_data = construct_response_message(
'Unexpected error while trying to \'describe\' the Keypair: \'{}\'. Exception: {}'
.format(keypair_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
return False
# Function to check if a key-value pair exists in SSM with the specified name. Returns a Boolean.
def ssm_parameter_exists(key_name, event, context, physical_resource_id):
logger.info(
'Checking if a parameter exists in SSM with the specified name: \'{}\''.format(
key_name))
try:
ssm = boto3.client('ssm')
response = ssm.get_parameter(
Name=key_name
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
logger.info(
'Found parameter with key name: \'{}\' in SSM.'.format(key_name))
return True
except ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == 400:
logger.info(
'Parameter with key: \'{}\' not found in SSM.'.format(key_name))
return False
else:
logger.error('Unexpected error: {}'.format(e))
response_data = construct_response_message(
'Unexpected error while trying to get parameter: \'{}\'. Exception: {}'.format(
key_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
return False
# Creates an EC2 keypair with the specified name
def create_key_pair(keypair_name, event, context, physical_resource_id):
try:
ec2 = boto3.resource('ec2')
logging.info(
'Attempting to create a keypair with name: {}'.format(keypair_name))
response = ec2.create_key_pair(
KeyName=keypair_name,
DryRun=False
)
return response.key_material
except ClientError as e:
logger.error(
'Could not create keypair with name: \'{}\'. Exception: {}'.format(
keypair_name, e))
response_data = construct_response_message(
'Unexpected error while trying to create keypair with given name: \'{}\'. Exception: {}'
.format(keypair_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
# Stores a specified key-material with a given name in SSM.
def put_parameter_in_ssm(key_name, description, material, value_type, event, context, physical_resource_id):
logger.info('Attempting to put parameter in SSM with name: \'{}\'.'.format(
key_name))
try:
ssm = boto3.client('ssm')
response = ssm.put_parameter(
Name=key_name,
Description=description,
Value=material,
Type=value_type,
Overwrite=True
)
return response
except ClientError as e:
logger.error(
'Could not store key material in SSM with key: \'{}\'. Exception: {}'.format(
key_name, e))
response_data = construct_response_message(
'Unexpected error while trying to \'pur\' parameter in SSM with given name: \'{}\'. Exception: {}'
.format(key_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
# Deletes a parameter from SSM of a given name
def delete_key_pair_parameter_key(keypair_name, event, context,
physical_resource_id):
logger.info(
'Attempting to delete the key with name: \'{}\''.format(keypair_name))
try:
ssm = boto3.client('ssm')
ssm.delete_parameter(
Name=keypair_name
)
return True
except ClientError as e:
logger.error('Could not delete key: \'{}\' from SSM. Exception: {}'.format(
keypair_name, e))
response_data = construct_response_message(
'Unexpected error while trying to \'delete\' parameter from SSM with given name: \'{}\'. Exception: {}'
.format(keypair_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
return False
# Deletes a keypair of a given name
def delete_key_pair(keypair_name, event, context, physical_resource_id):
logger.info(
'Attempting to delete the keypair with name: \'{}\''.format(keypair_name))
try:
ec2 = boto3.client('ec2')
ec2.delete_key_pair(
KeyName=keypair_name
)
return True
except ClientError as e:
logger.error(
'Could not delete keypair with name: \'{}\'. Exception: {}'.format(
keypair_name, e))
response_data = construct_response_message(
'Unexpected error while trying to \'delete\' keypair with given name: \'{}\'. Exception: {}'
.format(keypair_name, e))
send(event, context, FAILED, response_data, physical_resource_id)
return False
# Function to construct a formatted response message to send to CloudFormation while signaling it
def construct_response_message(message):
return {'Message': message}
# Function to signal CloudFormation.
def send(event, context, response_status, response_data, physical_resource_id):
response_url = event['ResponseURL']
logger.debug('ResponseURL: {}'.format(response_url))
responseBody = {'Status': response_status,
'Reason': 'See the details in CloudWatch Log Stream: ' + context.log_stream_name,
'PhysicalResourceId': physical_resource_id or context.log_stream_name,
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'NoEcho': 'false',
'Data': response_data}
json_response_body = json.dumps(responseBody)
logger.debug("Response body: {}".format(json_response_body))
headers = {
'content-type': '',
'content-length': str(len(json_response_body))
}
try:
response = requests.put(response_url,
data=json_response_body,
headers=headers)
logger.info("Status code: {}".format(response.reason))
except Exception as e:
logger.error("Send failed: {}".format(str(e)))
|
nilq/baby-python
|
python
|
import torch.optim
import torch.nn as nn
class AgentControl:
def __init__(self, hyperparameters):
self.gamma = hyperparameters['gamma']
self.device = 'cpu'# 'cuda' if torch.cuda.is_available() else 'cpu'
self.loss = nn.MSELoss()
#Return accumulated discounted estimated reward from memory
def get_rewards(self, old_rewards, new_states, critic_nn):
# Variable i represents number of rows in memory starting from 0 (number i is basically n-step)
i = len(old_rewards) - 1
# Calculate Critic value of new state of last step which we will add to accumulated rewards
v_new = critic_nn(torch.tensor(new_states[i], dtype=torch.float64).to(self.device)).detach()
rewards = []
# We take just a value of Critic output which will act as base when we add discounted rewards backwards
temp = v_new.item()
while i > -1:
# For rewards we do backwards discounted sum
rewards.append(old_rewards[i] + self.gamma * temp)
temp = old_rewards[i] + self.gamma * temp
i -= 1
return rewards
# Return states and actions in arrays sorted backward. It needs to be backward because rewards have to be calculated from last step.
# Since we need rewards (target) to match its current state we need to sort states backwards as well.
def get_states_actions_entropies(self, st, ac, en):
# Variable i represents number of rows in memory starting from 0 (number is basically n-step)
i = len(st) - 1
states = []
actions = []
entropies = []
while i > -1:
# For states and actions we create simple lists which we need for critic/actor paralel input
states.append(st[i])
actions.append(ac[i])
entropies.append(en[i])
i -= 1
return states, actions, entropies
# Update Critic NN parameters based on estimated target (rewards) and current value (v_curr)
def update_critic(self, rewards, states, entropies, critic_nn, critic_optim):
rewards = torch.tensor(rewards, dtype=torch.float64).to(self.device)
states = torch.tensor(states, dtype=torch.float64).to(self.device)
# NN output needs to be squeeze(-1) to lower dimension from matrix to vector of outputs
v_curr = critic_nn(states).squeeze(-1)
# Calculate MSE loss between target (rewards) and NN output (v_curr)
loss = self.loss(rewards, v_curr)
# Add entropy, if flag is False it will add 0
loss += torch.mean(torch.tensor(entropies, dtype=torch.float64).to(self.device).detach())
# We need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes
critic_optim.zero_grad()
# Calculate loss derivative
loss.backward()
# Update current parameters based on calculated derivatives wtih Adam optimizer
critic_optim.step()
return loss.item()
# Estimate advantage as difference between estimated return and actual value
def estimate_advantage(self, rewards, states, critic_nn):
rewards = torch.tensor(rewards, dtype=torch.float64).to(self.device)
states = torch.tensor(states, dtype=torch.float64).to(self.device)
v_curr = critic_nn(states).squeeze(-1)
# We estimate advantage as how much Critic NN is right or wrong
return (rewards - v_curr).detach()
# Update Actor NN parameters based on gradient log(action probability) * action probability
def update_actor(self, states, actions, entropies, advantage, actor_nn, actor_optim):
states = torch.tensor(states, dtype=torch.float64).to(self.device)
action_prob = actor_nn(states)
# action_prob is n_step x 2 matrix. We will transfrorm it to n_step x 1 by selecting only probabilities of actions we took
action_prob = action_prob[range(action_prob.shape[0]), actions]
# Loss is calculated as log(x) * x for each step. We calculate mean to get single value loss and add minus because torch.log will add additional minus.
loss = -torch.mean(torch.log(action_prob) * advantage)
# Add entropy, if flag is False it will add 0
loss += torch.mean(torch.tensor(entropies, dtype=torch.float64).to(self.device).detach())
# We need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes
actor_optim.zero_grad()
# Calculate loss derivative
loss.backward()
# Update current parameters based on calculated derivatives wtih Adam optimizer
actor_optim.step()
# We need to reset entropy since we have done one n-step iteration.
return loss.item()
|
nilq/baby-python
|
python
|
"""Support for TPLink HS100/HS110/HS200 smart switch."""
import logging
import time
from homeassistant.components.switch import (
ATTR_CURRENT_POWER_W, ATTR_TODAY_ENERGY_KWH, SwitchDevice)
from homeassistant.const import ATTR_VOLTAGE
import homeassistant.helpers.device_registry as dr
from . import CONF_SWITCH, DOMAIN as TPLINK_DOMAIN
PARALLEL_UPDATES = 0
_LOGGER = logging.getLogger(__name__)
ATTR_TOTAL_ENERGY_KWH = 'total_energy_kwh'
ATTR_CURRENT_A = 'current_a'
async def async_setup_platform(hass, config, add_entities,
discovery_info=None):
"""Set up the platform.
Deprecated.
"""
_LOGGER.warning('Loading as a platform is no longer supported, '
'convert to use the tplink component.')
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up discovered switches."""
devs = []
for dev in hass.data[TPLINK_DOMAIN][CONF_SWITCH]:
devs.append(SmartPlugSwitch(dev))
async_add_entities(devs, True)
return True
class SmartPlugSwitch(SwitchDevice):
"""Representation of a TPLink Smart Plug switch."""
def __init__(self, smartplug):
"""Initialize the switch."""
self.smartplug = smartplug
self._sysinfo = None
self._state = None
self._available = False
# Set up emeter cache
self._emeter_params = {}
@property
def unique_id(self):
"""Return a unique ID."""
return self._sysinfo["mac"]
@property
def name(self):
"""Return the name of the Smart Plug."""
return self._sysinfo["alias"]
@property
def device_info(self):
"""Return information about the device."""
return {
"name": self.name,
"model": self._sysinfo["model"],
"manufacturer": 'TP-Link',
"connections": {
(dr.CONNECTION_NETWORK_MAC, self._sysinfo["mac"])
},
"sw_version": self._sysinfo["sw_ver"],
}
@property
def available(self) -> bool:
"""Return if switch is available."""
return self._available
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.smartplug.turn_on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.smartplug.turn_off()
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
def update(self):
"""Update the TP-Link switch's state."""
from pyHS100 import SmartDeviceException
try:
if not self._sysinfo:
self._sysinfo = self.smartplug.sys_info
self._state = self.smartplug.state == \
self.smartplug.SWITCH_STATE_ON
if self.smartplug.has_emeter:
emeter_readings = self.smartplug.get_emeter_realtime()
self._emeter_params[ATTR_CURRENT_POWER_W] \
= "{:.2f}".format(emeter_readings["power"])
self._emeter_params[ATTR_TOTAL_ENERGY_KWH] \
= "{:.3f}".format(emeter_readings["total"])
self._emeter_params[ATTR_VOLTAGE] \
= "{:.1f}".format(emeter_readings["voltage"])
self._emeter_params[ATTR_CURRENT_A] \
= "{:.2f}".format(emeter_readings["current"])
emeter_statics = self.smartplug.get_emeter_daily()
try:
self._emeter_params[ATTR_TODAY_ENERGY_KWH] \
= "{:.3f}".format(
emeter_statics[int(time.strftime("%e"))])
except KeyError:
# Device returned no daily history
pass
self._available = True
except (SmartDeviceException, OSError) as ex:
if self._available:
_LOGGER.warning("Could not read state for %s: %s",
self.smartplug.host, ex)
self._available = False
|
nilq/baby-python
|
python
|
from .Util import *
|
nilq/baby-python
|
python
|
'''This file defines user interfaces to sDNA tools and how to convert inputs to config'''
##This file (and this file only) is released under the MIT license
##
##The MIT License (MIT)
##
##Copyright (c) 2015 Cardiff University
##
##Permission is hereby granted, free of charge, to any person obtaining a copy
##of this software and associated documentation files (the "Software"), to deal
##in the Software without restriction, including without limitation the rights
##to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
##copies of the Software, and to permit persons to whom the Software is
##furnished to do so, subject to the following conditions:
##
##The above copyright notice and this permission notice shall be included in
##all copies or substantial portions of the Software.
##
##THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
##THE SOFTWARE.
def metric_dropdown(name,label,include_match_analytical=False):
optlist = ["EUCLIDEAN","ANGULAR","CUSTOM","CYCLE","CYCLE_ROUNDTRIP","EUCLIDEAN_ANGULAR"]
return (name,label,"Text",optlist,optlist[0],True)
# when this changes, add it to geodesics etc
def weighting_options():
return [("weighting","Weighting","Text",["Link","Length","Polyline"],"Link",True),
("origweight","Origin weight","Field",("Numeric","input"),"",False),
("destweight","Destination weight","Field",("Numeric","input"),"",False)]
def weighting_config(args):
return "origweight=%(origweight)s;destweight=%(destweight)s;weight_type=%(weighting)s"%args
def radius_options(include_banded = True,include_cont = True):
retval = [("radii","Radii (in units of source data projection)","Text",None,"n",True)]
if include_banded:
retval += [("bandedradii","Banded radius","Bool",None,False,False)]
if include_cont:
retval += [("cont","Continuous Space","Bool",None,False,False)]
return retval
def radius_config(args):
retval = ";radii=%(radii)s;"%args
if args.has_key("bandedradii") and args["bandedradii"]:
retval += "bandedradii;"
if args.has_key("cont") and args["cont"]:
retval += "cont;"
return retval
def quote(x):
return '"'+x+'"'
class sDNAIntegral(object):
alias = "Integral Analysis"
desc = \
"""<p>sDNA Integral is the core analysis tool of sDNA. It computes several flow, accessibility, severance and efficiency measures on networks.
<p>For full details, see the sDNA documentation.
"""
category = "Analysis"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output features","OFC",None,"",True),
("betweenness","Compute betweenness","Bool",None,True,False),
("bidir","Betweenness is bidirectional","Bool",None,False,False),
("junctions","Compute junction counts","Bool",None,False,False),
("hull","Compute convex hull statistics","Bool",None,False,False),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
metric_dropdown("analmet","Routing and analysis metric")]\
+radius_options()\
+weighting_options()\
+[
("zonefiles","Zone table input csv files","MultiInFile","csv","",False),
("odfile","Origin Destination Matrix input file","InFile","csv","",False),
("custommetric","Custom metric field","Field",("Numeric","input"),"",False),
("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("intermediates","Intermediate link filter (field name or expression)","Text",None,"",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"net":args["output"]}
syntax["config"] = "start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"metric=%(analmet)s;"\
"custommetric=%(custommetric)s;disable=%(disable)s;intermediates=%(intermediates)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args\
+ weighting_config(args) + radius_config(args)
for arg,conf,invert in [("betweenness","nobetweenness",True),("junctions","nojunctions",True),("hull","nohull",True),("bidir","bidir",False)]:
boolval = args[arg]
if invert:
boolval = not boolval
if boolval:
syntax["config"]+=";%s"%conf
syntax["inputs"]["tables"]=""
if args["odfile"]!="":
syntax["config"]+=";odmatrix"
syntax["inputs"]["tables"]+=args["odfile"]
if args["zonefiles"]!="":
syntax["inputs"]["tables"]=",".join([syntax["inputs"]["tables"]]+args["zonefiles"].split(";"))
syntax["config"] = quote(syntax["config"])
return syntax
class sDNAIntegralFromOD(object):
alias = "Integral from OD Matrix (assignment model)"
desc = \
"""<p>Runs Integral Analysis from a pre-specified Origin-Destination Matrix, allowing import from other models.
"""
category = "Analysis"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("odfile","Origin Destination Matrix input file","InFile","csv","",True),
("output","Output features","OFC",None,"",True),
("bidir","Betweenness is bidirectional","Bool",None,False,False),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
metric_dropdown("analmet","Routing and analysis metric"),
("custommetric","Custom metric field","Field",("Numeric","input"),"",False),
("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("intermediates","Intermediate link filter (field name or expression)","Text",None,"",False),
("zonedist","Zone weight distribution expression","Text",None,"euc",True),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"],"tables":args["odfile"]}
syntax["outputs"] = {"net":args["output"]}
syntax["config"] = "odmatrix;zonedist=%(zonedist)s;start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"metric=%(analmet)s;nojunctions;nohull;radii=n;"\
"custommetric=%(custommetric)s;disable=%(disable)s;intermediates=%(intermediates)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args
for arg,conf,invert in [("bidir","bidir",False)]:
boolval = args[arg]
if invert:
boolval = not boolval
if boolval:
syntax["config"]+=";%s"%conf
syntax["config"] = quote(syntax["config"])
return syntax
class sDNASkim(object):
alias = "Skim Matrix"
desc = \
"""<p>Captures mean distance between zones as a skim matrix for input into external modelling tools.
"""
category = "Analysis"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output Skim Matrix File","OutFile","csv","",True),
("skimorigzone","Origin zone field","Field",("String","input"),"",True),
("skimdestzone","Destination zone field","Field",("String","input"),"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
metric_dropdown("analmet","Routing and analysis metric"),
("custommetric","Custom metric field","Field",("Numeric","input"),"",False)]\
+weighting_options()\
+[("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("odfile","Origin Destination Matrix input file","InFile","csv","",False),
("zonefiles","Zone table input csv files","MultiInFile","csv","",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"skim":args["output"]}
syntax["config"] = "outputskim;skipzeroweightorigins;skimorigzone=%(skimorigzone)s;skimdestzone=%(skimdestzone)s;start_gs=%(start_gs)s;end_gs=%(end_gs)s;radii=n;nobetweenness;nojunctions;nohull;nonetdata;"\
"metric=%(analmet)s;"\
"custommetric=%(custommetric)s;disable=%(disable)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args\
+ weighting_config(args)
syntax["inputs"]["tables"]=""
if args["odfile"]!="":
syntax["config"]+=";odmatrix"
syntax["inputs"]["tables"]+=args["odfile"]
if args["zonefiles"]!="":
syntax["inputs"]["tables"]=",".join([syntax["inputs"]["tables"]]+args["zonefiles"].split(";"))
syntax["config"] = quote(syntax["config"])
return syntax
class sDNAGeodesics(object):
alias = "Geodesics"
desc = "<p>Outputs the geodesics (shortest paths) used by sDNA Integral analysis."
category = "Analysis geometry"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output geodesic polyline features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
("origins","Origin IDs (leave blank for all)","Text",None,"",False),
("destinations","Destination IDs (leave blank for all)","Text",None,"",False),
metric_dropdown("analmet","Routing and analysis metric"),
("custommetric","Custom metric field","Field",("Numeric","input"),"",False)]\
+weighting_options()+\
[("odfile","Origin Destination Matrix input file","InFile","csv","",False),
("zonefiles","Zone table input csv files","MultiInFile","csv","",False)]\
+radius_options()+\
[("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("intermediates","Intermediate link filter (field name or expression)","Text",None,"",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"geodesics":args["output"]}
syntax["config"] = "start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"metric=%(analmet)s;"\
"custommetric=%(custommetric)s;"\
"nonetdata;outputgeodesics;"\
"origins=%(origins)s;destinations=%(destinations)s;disable=%(disable)s;oneway=%(oneway)s;intermediates=%(intermediates)s;"\
"%(advanced)s;"%args\
+weighting_config(args) + radius_config(args)
syntax["inputs"]["tables"]=""
if args["odfile"]!="":
syntax["config"]+=";odmatrix"
syntax["inputs"]["tables"]+=args["odfile"]
if args["zonefiles"]!="":
syntax["inputs"]["tables"]=",".join([syntax["inputs"]["tables"]]+args["zonefiles"].split(";"))
syntax["config"] = quote(syntax["config"])
return syntax
class sDNAHulls(object):
alias = "Convex Hulls"
desc = "<p>Outputs the convex hulls of network radii used in sDNA Integral analysis."
category = "Analysis geometry"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output hull polygon features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False)]\
+radius_options(False)+\
[("origins","Origin IDs (leave blank for all)","Text",None,"",False),
("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"hulls":args["output"]}
syntax["config"] = "nobetweenness;start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"nonetdata;outputhulls;"\
"origins=%(origins)s;disable=%(disable)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args + radius_config(args)
syntax["config"] = quote(syntax["config"])
return syntax
class sDNANetRadii(object):
alias = "Network Radii"
desc = "<p>Outputs the network radii used in sDNA Integral analysis."
category = "Analysis geometry"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output net radius multipolyline features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False)]\
+radius_options()+\
[("origins","Origin IDs (leave blank for all)","Text",None,"",False),
("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"netradii":args["output"]}
syntax["config"] = "nobetweenness;start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"nonetdata;outputnetradii;"\
"origins=%(origins)s;disable=%(disable)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args + radius_config(args)
syntax["config"] = quote(syntax["config"])
return syntax
class sDNAAccessibilityMap(object):
alias = "Specific Origin Accessibility Maps"
desc = "<p>Outputs accessibility maps for specific origins."
category = "Analysis"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output polyline features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
("origins","Origin IDs (leave blank for all)","Text",None,"",False),
metric_dropdown("analmet","Routing and analysis metric"),
("custommetric","Custom metric field","Field",("Numeric","input"),"",False),
("disable","Disable lines (field name or expression)","Text",None,"",False),
("oneway","One way restrictions","Field",("Numeric","input"),"",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"destinations":args["output"]}
syntax["config"] = "start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"metric=%(analmet)s;"\
"custommetric=%(custommetric)s;"\
"nonetdata;outputdestinations;"\
"origins=%(origins)s;disable=%(disable)s;oneway=%(oneway)s;"\
"%(advanced)s;"%args
syntax["config"] = quote(syntax["config"])
return syntax
class sDNAPrepare(object):
alias = "Prepare Network"
desc = \
"""<p>Prepares spatial networks for analysis by checking and optionally repairing various kinds of error.
<p><b>Note that sDNA Prepare Network only provides a small subset of the functions needed for network preparation.</b> Other free tools, combined with a good understanding of the subject, can fill the gap. <b>Reading the Network Preparation chapter of the sDNA Manual is strongly advised.</b>
<p>The errors fixed by Prepare Network are:
<ul>
<li><b>endpoint near misses</b> (XY and Z tolerance specify how close a near miss)
<li><b>duplicate lines</b>
<li><b>traffic islands</b> (requires traffic island field set to 0 for no island and 1 for island). Traffic island lines are straightened; if doing so creates duplicate lines then these are removed.
<li><b>split links</b><i>. Note that fixing split links is no longer necessary as of sDNA 3.0 so this is not done by default</i>
<li><b>isolated systems</b>
</ul>
<p>Optionally, numeric data can be preserved through a prepare operation by providing the desired field names.
"""
category = "Preparation"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output polyline features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
("action","Action","Text",["DETECT","REPAIR"],"REPAIR",True),
("nearmisses","Endpoint near misses","Bool",None,True,True),
("trafficislands","Traffic islands","Bool",None,False,True),
("duplicates","Duplicate polylines","Bool",None,True,True),
("isolated","Isolated systems","Bool",None,True,True),
("splitlinks","Split links","Bool",None,False,True),
("tifield","Traffic island field","Field",("Numeric","input"),"",False),
("preserve_absolute","Absolute data to preserve (numeric field names separated by commas)","Text",None,"",False),
("preserve_unitlength","Unit length data to preserve (numeric field names separated by commas)","Text",None,"",False),
("preserve_text","Text data to preserve (text field names separated by commas)","Text",None,"",False),
("xytol","Custom XY Tolerance","Text",None,"",False),
("ztol","Custom Z Tolerance","Text",None,"",False)
]
def getSyntax(self, args):
boollist = [x for x in ["nearmisses","duplicates","isolated","trafficislands","splitlinks"]
if args[x]]
args["boolstring"] = ";".join(boollist)
syntax = {}
syntax["command"] = "sdnaprepare"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"net":args["output"],"errors":args["output"]}
syntax["config"] = "start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"xytol=%(xytol)s;ztol=%(ztol)s;"\
"action=%(action)s;"\
"%(boolstring)s;"\
"island=%(tifield)s;"\
"data_absolute=%(preserve_absolute)s;data_unitlength=%(preserve_unitlength)s;data_text=%(preserve_text)s"\
%args
syntax["config"] = quote(syntax["config"])
return syntax
class sDNALineMeasures(object):
alias = "Individual Line Measures"
desc = \
"""<p>Outputs connectivity, bearing, euclidean, angular and hybrid metrics for individual links.
<p>Connectivity output is useful for checking errors."""
category = "Preparation"
def getInputSpec(self):
return [("input","Input polyline features","FC","Polyline","",True),
("output","Output features","OFC",None,"",True),
("start_gs","Start grade separation","Field",("Numeric","input"),"",False),
("end_gs","End grade separation","Field",("Numeric","input"),"",False),
metric_dropdown("analmet","Routing and analysis metric")]\
+weighting_options()+\
[("custommetric","Custom metric field","Field",("Numeric","input"),"",False),
("zonefiles","Zone table input csv files","MultiInFile","csv","",False),
("advanced","Advanced config","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnaintegral"
syntax["inputs"] = {"net":args["input"]}
syntax["outputs"] = {"net":args["output"]}
syntax["config"] = "linkonly;start_gs=%(start_gs)s;end_gs=%(end_gs)s;"\
"metric=%(analmet)s;"\
"custommetric=%(custommetric)s;"\
"%(advanced)s;"%args\
+ weighting_config(args)
syntax["config"] = quote(syntax["config"])
if args["zonefiles"]!="":
syntax["inputs"]["tables"]=",".join(args["zonefiles"].split(";"))
return syntax
class sDNALearn(object):
alias = "Learn"
desc = \
"""<p>Uses measured data to calibrate an sDNA model ready for prediction. Proposed models are tested using cross-validation. The available models are
<ul>
<li>Single best variable - performs bivariate regression of target against all variables and picks single predictor with best cross-validated fit</li>
<li>Multiple variables - Regularized multivariate lassoo regression</li>
<li>All variables - Regularized multivariate ridge regression</li>
</ul>
<p>Optionally, variables to use and transform can be specified using regular expressions (regex). These follow the Python regex syntax. The equivalent to a wildcard is
<pre>.*</pre>
<p>thus for example to test Betweenness variables (from sDNA Integral) over all radii you could specify
<pre>Bt.*</pre>
<p>This would match Bt1000, Bt2000, Bt300c, etc.
<p>Optionally, the best model can be saved as a model file to be used by sDNA Predict.
<p>Weighting lambda weights data points by y^lambda/y. Setting to 1 implies unweighted regression. Setting to around 0.7 can improve GEH statistic.
<p>Regression lambda if set should specify min,max regularization parameter for multivariate regression.
"""
category = "Calibration"
def getInputSpec(self):
return [("input","Input features","FC",None,"",True),
("output","Output model file","OutFile","csv","",False),
("resids","Output residual features","OFC",None,"",False),
("target","Target variable","Field",("Numeric","input"),"",True),
("predictors","Predictor variables","MultiField",("Numeric","input"),"",False),
("regex","Predictor variable regex","Text",None,"",False),
("algorithm","Learning algorithm","Text",["Single_best_variable","Multiple_variables","All_variables"],"Single_best_variable",True),
("intercept","Use intercept in multivariate models","Bool",None,False,False),
("bctarget","Box-Cox transform target variable","Bool",None,False,False),
("bcregex","Regex for predictor variables to Box-Cox transform","Text",None,"",False),
("weightlambda","Weighting lambda","Text",None,"1",False),
("nfolds","Number of folds for cross-validation","Text",None,"7",True),
("reps","Number of repetitions for bootstrapping","Text",None,"50",True),
("gehtime","Time interval for measurements (in hours, for computing GEH)","Text",None,"1",True),
("reglambda","Regularization lambda min,max","Text",None,"",False)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnalearn"
syntax["inputs"] = {"calibfile":args["input"]}
syntax["outputs"] = {"modelout":args["output"],"resids":args["resids"]}
syntax["config"] = "--target %(target)s --mode %(algorithm)s --nfolds %(nfolds)s --weightlambda %(weightlambda)s --reps %(reps)s --gehtime %(gehtime)s --bcregex \"%(bcregex)s\""%args
if args["predictors"]:
syntax["config"] += " --vars \""+args["predictors"]+"\""
if args["regex"]:
syntax["config"] += " --varregex \""+args["regex"]+"\""
if args["bctarget"]:
syntax["config"] += " --boxcoxtarget"
if args["intercept"]:
syntax["config"] += " --intercept"
if args["reglambda"].strip() != "":
syntax["config"] += " --reglambda " + args["reglambda"]
return syntax
class sDNAPredict(object):
alias = "Predict"
desc = "<p>Uses a model file created by sDNA Learn to predict unknown data."
category = "Calibration"
def getInputSpec(self):
return [("input","Input features","FC",None,"",True),
("output","Output features","OFC",None,"",True),
("predvar","Prediction variable name","Text",None,"prediction",True),
("modelfile","Model file from sDNA Learn","InFile","csv","",True)
]
def getSyntax(self, args):
syntax = {}
syntax["command"] = "sdnapredict"
syntax["inputs"] = {"infile":args["input"]}
syntax["outputs"] = {"outfile":args["output"]}
syntax["config"] = '--predvarname %(predvar)s --modelfile "%(modelfile)s"'%args
return syntax
def get_tools():
return [sDNAIntegral,sDNASkim,sDNAIntegralFromOD,sDNAGeodesics,sDNAHulls,sDNANetRadii,sDNAAccessibilityMap,sDNAPrepare,sDNALineMeasures,sDNALearn,sDNAPredict]
|
nilq/baby-python
|
python
|
"""
This module contains helpers for the XGBoost python wrapper: https://xgboost.readthedocs.io/en/latest/python/index.html
The largest part of the module are helper classes which make
using a validation set to select the number of trees transparent.
"""
import logging
logger = logging.getLogger(__name__)
import joblib
import numpy as np
import pathlib
import sklearn.exceptions
import sklearn.metrics
import sklearn.preprocessing
import tempfile
import xgboost as xgb
import toolz.dicttoolz
import pyllars.shell_utils as shell_utils
import pyllars.validation_utils as validation_utils
from typing import Optional
def xgbooster_predict_proba(
booster:xgb.Booster,
d_x:xgb.DMatrix) -> np.ndarray:
""" Simulate the `predict_proba` interface from sklearn
This function will only work as expected if `booster` has been
training using the `binary:logistic` loss.
Parameters
----------
booster : xgboost.Booster
The trained booster
d_x : xgboost.DMatrix
The dataset
Returns
-------
y_proba_pred : numpy.ndarray
The probabilistic predictions. The shape of the array
is (n_row, 2).
"""
y_score = booster.predict(d_x)
y_false = 1-y_score
size = (d_x.num_row(), 2)
y_probas_pred = np.zeros(size)
y_probas_pred[:,0] = y_false
y_probas_pred[:,1] = y_score
return y_probas_pred
def xgbooster_to_json(booster:xgb.Booster) -> str:
""" Get the JSON representation of `booster`
Parameters
----------
booster : xgboost.Booster
The trained booster
Returns
-------
booster_json : str
The json string
"""
fd, fname = tempfile.mkstemp(suffix=".json")
booster.save_model(fname)
with open(fname) as b_f:
booster_json = b_f.readlines()
shell_utils.remove_file(fname)
booster_json = booster_json[0]
return booster_json
def xgbooster_from_json(booster_json:str) -> xgb.Booster:
""" Create a booster based on the json string
Parameters
----------
booster_json : str
The json string
Returns
-------
booster : xgboost.Booster
The trained booster
"""
fd, fname = tempfile.mkstemp(suffix=".json")
with open(fname, 'w') as b_f:
b_f.writelines(booster_json)
booster = xgb.Booster()
booster.load_model(fname)
shell_utils.remove_file(fname)
return booster
class XGBClassifierWrapper(object):
""" This class wraps xgboost to facilitate transparent
use of a validation set to select the number of trees.
It also optionally scales the input features. (In principle,
it is not necessary to scale input features for trees. Still,
in practice, it annecdotally helps, and the theory also suggests
that is should not hurt.)
**N.B.** Currently, this class is hard-coded to use (binary) AUC
as the metric for selecting the best model on the validation set.
Attributes
----------
num_boost_round : int
The number of boosting rounds
scale_features : bool
Whether to fit a StandardScaler on the training data
and use it to transform the validation and test data.
validation_period : int
The number of training iterations (that is, the number of new
trees) between checking the validation set.
name : str
A name for use in logging statements.
booster_ : xgboost.Booster
The trained model.
best_booster_ : xgboost.Booster
The best booster found according to performance on the
validation set.
scaler_ : sklearn.preprocessing.StandardScaler
The scaler fit on the training data set.
**kwargs : key=value pairs
Additional keyword arguments are passed through to the
xgboost.train constructor.
"""
def __init__(
self,
num_boost_round:int=10,
scale_features:bool=False,
validation_period:int=1,
name:str="XGBClassiferWrapper",
**kwargs):
self._initialize()
self.num_boost_round = num_boost_round
self.scale_features = scale_features
self.validation_period = validation_period
self.name = name
self.kwargs = kwargs
def _initialize(self):
self.num_boost_round = None
self.scale_features = None
self.validation_period = None
self.name = None
self.kwargs = None
self.xgb_hyperparams = dict()
def log(self, msg, level=logging.INFO):
msg = "[{}]: {}".format(self.name, msg)
logger.log(level, msg)
def _validate(self, xgboost_callback_env):
iteration = xgboost_callback_env.iteration
booster = xgboost_callback_env.model
y_score = booster.predict(self._dval)
# TODO: allow other validation metrics
validation_roc = sklearn.metrics.roc_auc_score(
y_true=self._dval.get_label(),
y_score=y_score
)
msg = "{}\tValidation AUC: {:.6f}".format(iteration, validation_roc)
self.log(msg, logging.DEBUG)
if validation_roc > self._best_validation_roc:
self._best_validation_roc = validation_roc
self.best_booster_ = booster.copy()
msg = "*** New Best ***"
self.log(msg, logging.DEBUG)
def _callback(self, xgboost_callback_env):
iteration = xgboost_callback_env.iteration
if iteration % self.validation_period == 0:
self._validate(xgboost_callback_env)
def fit(self,
X_t:np.ndarray,
y_t:np.ndarray,
X_v:Optional[np.ndarray]=None,
y_v:Optional[np.ndarray]=None):
""" Fit a model
Parameters
----------
{X,y}_t : numpy.ndarray
The training data. **N.B.**
{X,y}_v : typing.Optional[numpy.ndarray]
The validation data
Returns
-------
self
"""
if self.scale_features:
msg = "scaling the training data"
self.log(msg)
self.scaler_ = sklearn.preprocessing.StandardScaler()
X_t = self.scaler_.fit_transform(X_t)
if X_v is not None:
msg = "scaling the validation data"
X_v = self.scaler_.transform(X_v)
else:
self.scaler_ = None
self._dtrain = xgb.DMatrix(X_t, label=y_t)
callbacks = None # we will not use any callbacks by default
if X_v is not None:
self._dval = xgb.DMatrix(X_v, label=y_v)
# we *will* use a callback if we want to use the
# validation set
callbacks = [self._callback]
# we can set these either way. they will just not be used
# if there is no validation set.
self._best_validation_roc = -np.inf
self.best_booster_ = None
msg = "training the model"
self.log(msg)
self.kwargs
xgb_kwargs = toolz.dicttoolz.merge(self.kwargs, self.xgb_hyperparams)
self.booster_ = xgb.train(
xgb_kwargs,
self._dtrain,
self.num_boost_round,
callbacks=callbacks
)
# if we did not use a validation set, then just use the
# final learned model as the best model
if self.best_booster_ is None:
self.best_booster_ = self.booster_
return self
def predict_proba(self, X:np.ndarray) -> np.ndarray:
""" Predict the likelihood of each class.
This function will only work as expected if training
used the `binary:logistic` loss.
Parameters
----------
X : numpy.ndarray
The input data
Returns
-------
y_proba_pred : numpy.ndarray
The probabilistic predictions
"""
validation_utils.check_is_fitted(self, 'best_booster_', self.name)
if self.scaler_ is not None:
msg = "transforming the input data"
self.log(msg, logging.DEBUG)
X = self.scaler_.transform(X)
d_x = xgb.DMatrix(X)
y_proba_pred = xgbooster_predict_proba(self.best_booster_, d_x)
return y_proba_pred
def get_params(self, deep=False):
""" Get the hyperparameters and other meta data about this model
"""
params = {
'num_boost_round': self.num_boost_round,
'scale_features': self.scale_features,
'validation_period': self.validation_period,
'name': self.name
}
params.update(self.kwargs)
# we do not do anything with `deep`
return params
def set_params(self, **params):
""" Set the hyperparameters of the model
"""
# very similar to the sklearn implementation
valid_params = self.get_params(deep=True)
for k,v in params.items():
if k not in valid_params:
pass
#msg = "Invalid parameter: {}".format(k)
#raise ValueError(msg)
else:
# this is a hyperparameter for xgb
self.xgb_hyperparams[k] = v
# then this is a valid hyperparameter
setattr(self, k, v)
return self
def save_model(self, out):
""" Save the scaler (if present) and best model to disk.
This *does not* save the training or validation datasets.
"""
out = pathlib.Path(out)
out.mkdir(parents=True, exist_ok=True)
scaler_out = out / "scaler.jpkl"
joblib.dump(self.scaler_, str(scaler_out))
booster_out = out / "booster.jpkl"
joblib.dump(self.best_booster_, str(booster_out))
params_out = out / "params.jpkl"
joblib.dump(self.get_params(deep=True), str(params_out))
def __getstate__(self):
state = {
'scaler': self.scaler_,
'booster': xgbooster_to_json(self.best_booster_),
'params': self.get_params(deep=True)
}
return state
def __setstate__(self, state):
self._initialize()
self.scaler_ = state['scaler']
self.best_booster_ = xgbooster_from_json(state['booster'])
for k,v in state['params'].items():
setattr(self, k, v)
# further, set the appropriate kwargs
self.kwargs = state['params'].copy()
# remove the basic parameters
self.kwargs.pop('num_boost_round')
self.kwargs.pop('scale_features')
self.kwargs.pop('validation_period')
self.kwargs.pop('name')
@classmethod
def load_model(klass, f):
""" Load the scaler, model, and hyperparameters from disk
"""
in_f = pathlib.Path(f)
params_in = in_f / "params.jpkl"
params = joblib.load(str(params_in))
model = klass(**params)
scaler_in = in_f / "scaler.jpkl"
scaler = joblib.load(str(scaler_in))
model.scaler_ = scaler
booster_in = in_f / "booster.jpkl"
booster = joblib.load(str(booster_in))
model.best_booster_ = booster
return model
|
nilq/baby-python
|
python
|
#! /usr/bin/python3.5
# -*- coding: utf-8 -*-
import random
import numpy as np
import matplotlib.pyplot as plt
def subdivied(Liste, Size):
"""
On divise la liste Liste, en :
sum from i = 1 to N - Size [x_i,x_i+1,...x_i+Size]
"""
res = []
# pour chaque éléments de la liste
for index, ele in enumerate(Liste):
res_tmp = []
# on construit une sous-liste si on ne dépasse pas
if index + Size <= len(Liste):
for k in range(Size):
res_tmp.append(Liste[index+k])
# on ajoute la sous-liste construite au res
res.append(res_tmp)
# on retourne le reste
return res
def fitness(solution, K, dictionnaire):
"""
solution = un tableau de {0,1} qui représente une solution
K = le k du problème NK
dictionnaire = tableau de correspondance entre les suites de bits
et les valeurs
"""
# on divisie la liste en sous-liste de taille K+1
SubListe = subdivied(solution, K+1)
accumulateur = 0
# on accumule les résultats des fitness
for ele in SubListe:
accumulateur += dictionnaire[tuple(ele)]
return accumulateur
def Hamming(v1, v2):
"""
On fait la distance de hamming entre
v1 et v2
"""
d = 0
for index, ele in enumerate(v1):
d += np.abs(v1[index] - v2[index])
return d
def random_sequence(Size):
"""
On génère une séquence aléatoire de bit. Séquence de taille: Size
"""
res = []
for ele in range(Size):
# on utilise le random fourni par python
if random.random() < 0.5:
res.append(0)
else:
res.append(1)
return res
def neighbor(solution):
"""
solution est une solution. donc un vecteur de bit de taille n.
on veut trouver son voisinage: soit les vecteurs ayant un seul bits de différence avec lui
"""
res = []
# pour chaque élément, on va créer un nouveau vecteur, avec un seul bit
# qui diffère
for index, ele in enumerate(solution):
# on ajoute 1 et modulo 2. c'est comme un xor
ele = (ele + 1) % 2
# on ajoute la soluton
res_tmp = list(solution)
res_tmp[index] = ele
res.append(res_tmp)
return res
def choose_neighbor(neighbors, dictionnaire, K, sol_fitness):
"""
On va choisir le meilleur voisins parmis ceux possibles
"""
d = {}
# on calcule toutes les fitness des voisins
for ele in neighbors:
d[fitness(ele, K, dictionnaire)] = ele
# on prend le max s'il est meilleur, et on le retourne
if max(d) > sol_fitness:
return [True, d[max(d)]]
# si non on signifie qu'on s'arrête
else:
return [False]
def Hill_Climbing(N, K, dictionnaire):
"""
K = le k du NK-landscape
dictionnaire = la table de correspondance
On va appliquer le hill-climbing
"""
# on génère une solution aléatoire
sol = random_sequence(N)
steps = 0
# boucle infinie
while(True):
# on calcule les voisins
voisins = neighbor(sol)
# on choisit le meilleur voisin
Liste_tmp = choose_neighbor(voisins, dictionnaire, K, fitness(sol, K, dictionnaire))
# s'il y en a un, on continue
if Liste_tmp[0]:
sol = Liste_tmp[1]
steps += 1
# sinon on s'arrête et on renvoit le résultat
else:
print(sol, ":", fitness(sol, K, dictionnaire))
return [sol, steps]
def choose_neighbor_probabiliste(voisins, dictionnaire, K, fitness_sol, best_sol):
"""
Cf énoncé du tp. On utilise un raisonnement probabiliste
"""
# on calcul la fitness du best
fitness_best = fitness(best_sol, K, dictionnaire)
element = [a for a in range(len(voisins))]
fitness_res = []
# on calcule la fitness de tout les voisins
for ele in voisins:
fitness_res.append(fitness(ele, K, dictionnaire))
# on regarde s'il y a un meilleur élément absolu
if max(fitness_res) > fitness_best:
return [False, voisins[fitness_res.index(max(fitness_res))]]
# sinon on en choisit un
else:
fitness_res = list(map(lambda x: x/sum(fitness_res), fitness_res))
return [True, voisins[np.random.choice(element, p=fitness_res)]]
def Hill_Climbing_probabiliste(N, K, dictionnaire, Steps):
"""
On prend les mêmes paramètres que le Hill_Climbing ci dessus
mais on ajoute le nombre de pas avant la fin
"""
# on génère une solution aléatoire qui sera notre solution idéale initiale
sol = random_sequence(N)
best = list(sol)
# boucle à condition
while(Steps > 0):
# on calcule les voisins
voisins = neighbor(sol)
Liste_tmp = choose_neighbor_probabiliste(voisins, dictionnaire, K, fitness(sol, K, dictionnaire), best)
# si le premier élément est true, alors on a changé de solution, mais
# ce n'est pas la meilleure
if Liste_tmp[0]:
sol = list(Liste_tmp[1])
# si le premier élément est false, alors on a trouvé un nouveau best,
else:
best = list(Liste_tmp[1])
Steps -= 1
print(best, ":", fitness(best, K, dictionnaire))
return best
def Hill_50_times_proba(N, K, dictionnaire, Steps):
"""
On va lancer 50 fois le hill_climbing probabiliste
Avec les paramètres fixés du tp. On stock les résultats dans une liste
"""
a = 50
final = []
while(a):
a -= 1
# on récupère les résultats au fur et à mesure
final.append(Hill_Climbing_probabiliste(N, K, dictionnaire, 10*Steps))
return final
def Hill_50_times(N, K, dictionnaire):
"""
On va lancer 50 fois le hill_climbing déterministe
Avec les paramètres fixés du tp. On stock les résultats dans une liste
"""
a = 50
final = []
while(a):
a -= 1
# on récupère les résultats
final.append(Hill_Climbing(N, K, dictionnaire))
return final
def test_tp1():
d_k0 = {(0,): 2, (1,): 1}
d_k1 = {(0, 0): 2, (0, 1): 3, (1, 0): 2, (1, 1): 0}
d_k2 = {(0, 0, 0): 0, (0, 0, 1): 1, (0, 1, 0): 1, (0, 1, 1): 0, (1, 0, 0): 2, (1, 0, 1): 0, (1, 1, 0): 0, (1, 1, 1): 0}
final_0 = Hill_50_times(21, 0, d_k0)
tmp = np.matrix(final_0)
tmp = sum(tmp[:, 1])
moyenne_steps_0 = tmp[0, 0] / 50
print("Moyenne pas K=0 : ", moyenne_steps_0)
final_1 = Hill_50_times(21, 1, d_k1)
tmp = np.matrix(final_1)
tmp = sum(tmp[:, 1])
moyenne_steps_1 = tmp[0, 0] / 50
print("Moyenne pas K=1 : ", moyenne_steps_1)
final_2 = Hill_50_times(21, 2, d_k2)
tmp = np.matrix(final_2)
tmp = sum(tmp[:, 1])
moyenne_steps_2 = tmp[0, 0] / 50
print("Moyenne pas K=2 : ", moyenne_steps_2)
final_prob_0 = Hill_50_times_proba(21, 0, d_k0, moyenne_steps_0)
print("Fin proba K=0")
final_prob_1 = Hill_50_times_proba(21, 1, d_k1, moyenne_steps_1)
print("Fin proba K=1")
final_prob_2 = Hill_50_times_proba(21, 2, d_k2, moyenne_steps_2)
print("Fin proba K=2")
# la distance max est de N, donc 21, on crée un dictionnaire de
# taille 21, on aura plus qu'a incrémenter
d_d_0 = []
d_d_1 = []
d_d_2 = []
d_p_0 = []
d_p_1 = []
d_p_2 = []
for i in range(50):
for j in range(50-i):
d_d_0.append(Hamming(final_0[i][0], final_0[j][0]))
d_d_1.append(Hamming(final_1[i][0], final_1[j][0]))
d_d_2.append(Hamming(final_2[i][0], final_2[j][0]))
d_p_0.append(Hamming(final_prob_0[i], final_prob_0[j]))
d_p_1.append(Hamming(final_prob_1[i], final_prob_1[j]))
d_p_2.append(Hamming(final_prob_2[i], final_prob_2[j]))
plt.hist(d_d_0)
print("d_d_0")
plt.show()
plt.hist(d_d_1)
print("d_d_1")
plt.show()
plt.hist(d_d_2)
print("d_d_2")
plt.show()
plt.hist(d_p_0)
print("d_p_0")
plt.show()
plt.hist(d_p_1)
print("d_p_1")
plt.show()
plt.hist(d_p_2)
print("d_p_2")
plt.show()
def interface():
"""
Il s'agit d'une interface.
On prend une entrée pour choisir le type e méthode.
Puis la valeur de K
"""
# les valeurs des dictionnaires
d_k0 = {(0,): 2, (1,): 1}
d_k1 = {(0, 0): 2, (0, 1): 3, (1, 0): 2, (1, 1): 0}
d_k2 = {(0, 0, 0): 0, (0, 0, 1): 1, (0, 1, 0): 1, (0, 1, 1): 0, (1, 0, 0): 2, (1, 0, 1): 0, (1, 1, 0): 0, (1, 1, 1): 0}
print("[1] : Hill-Climbing déterministe (default)\n[2] : Hill-Climbing probabiliste\n")
i = input()
if i != 2:
print("Hill-Climbing déterministe")
k = input("K=")
if str(k) == '0':
Hill_Climbing(21, 0, d_k0)
elif str(k) == '1':
Hill_Climbing(21, 1, d_k1)
elif str(k) == '2':
Hill_Climbing(21, 2, d_k2)
else:
print("Erreur\n")
else:
print("Hill-Climbing probabiliste")
k = input("K=")
if str(k) == '0':
Hill_Climbing_probabiliste(21, 0, d_k0, 10 * 10)
elif str(k) == '1':
Hill_Climbing_probabiliste(21, 1, d_k1, 10 * 6)
elif str(k) == '2':
Hill_Climbing_probabiliste(21, 2, d_k2, 10 * 6)
else:
print("Erreur\n")
if __name__ == '__main__':
while(True):
interface()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 2 16:22:45 2019
@author: Soumitra
"""
import math
import numpy as np
import numpy.fft as f
import matplotlib.pyplot as plt
n = np.arange(12);
x = ((-1)**n)*(n+1)
plt.xlabel('n');
plt.ylabel('x[n]');
plt.title(r'Plot of DT signal x[n]');
plt.stem(n, x);
#dft
n = np.arange(12);
x = ((-1)**n)*(n+1)
y = f.fft(x)
print(y)
#magnitude vs frequency
import cmath as cm
p=[]
for i in range(12):
p.append(cm.phase(y[i]))
m=[]
for i in range(12):
m.append(abs(y[i]))
k = [0]
for i in range(11):
k.append(((i+1)*math.pi)/12)
plt.xlabel('k');
plt.ylabel('magnitude');
plt.title(r'Plot of mag vs frequency');
plt.stem(k, m);
|
nilq/baby-python
|
python
|
"""Iterative Compression Module."""
from experiments import RESULTS_DIR, TABLES_DIR
from pathlib import Path
# Paths
IC_DIR = Path(__file__).parent
SELF_COMPARISON_DATA_PATH = RESULTS_DIR / 'ic_preprocessing_level.csv'
IC_TABLE_FORMAT_STR = 'timeout_{}.tex'
IC_TABLES_DIR = TABLES_DIR / 'ic'
IC_TABLES_DIR.mkdir(exist_ok=True)
BASELINE_FILE = str(RESULTS_DIR / 'ic_baseline_experiment_results.csv')
# Constants
PREPROCESSING_LEVELS = [0, 1, 2]
|
nilq/baby-python
|
python
|
"""Examples showing how one might use the Result portion of this library."""
import typing as t
import requests
from safetywrap import Result, Ok, Err
# ######################################################################
# One: Validation Pipeline
# ######################################################################
# Sometimes you've got a bunch of validation functions that you would
# like to run on some data, and you want to bail early if any of them
# fails. Particularly when you want to send back some information about
# what failed to validate, you're forced to e.g. return a 2-tuple of
# validation status and a string with info, or to raise a custom
# exception with that data ensconced inside. In either case, you wind
# up having to do a lot of if/else or try/except logic in the calling
# context. The Result type allows you to get rid of all that extra
# boilerplate and get down to what matters: defining a pipeline of
# validation errors with early exiting.
# ######################################################################
class Validator:
"""A validator for validating hopefully valid things.
In this case, let's say we've got a a string we want to validate.
We want the string to be at least X characters long, to not contain
any disallowed characters, to start with a capital letter, to end
with a period, and to contain the substring "shabazz".
"""
MIN_LEN = 10
DISALLOWED_CHARS = ("^", "_", "O")
MUST_CONTAIN = "shabazz"
def validated(self, string: str) -> Result[str, str]:
"""Return the validated string or any validation error.
We return a Result, where the Ok value is the validated string,
and the Err value is a descriptive string.
"""
# Because all of our validation methods return Results, we can
# easily chain them.
return (
self._validate_length(string)
.and_then(self._validate_chars) # and_then == flatmap
.and_then(self._validate_capitalized)
.and_then(self._validate_end_char)
.and_then(self._validate_substring)
# Because we're returning a Result, this is all we need to
# to! We don't even have to figure out if there was an error
# here, because any error would have short-circuited the
# pipeline and will get returned by this method.
)
# Because we're returning a Result, we are _forcing_ the caller
# to deal with the fact that validation might fail. They only
# way they can get the result back is by calling `.unwrap()`
# or a similar method, checking `is_ok()` first, or otherwise
# continuing to pipeline on it and pass the Result on up the
# chain.
def _validate_length(self, string: str) -> Result[str, str]:
"""Check that all the strings are of the proper length."""
if len(string) < self.MIN_LEN:
return Err("String is too short")
return Ok(string)
def _validate_chars(self, string: str) -> Result[str, str]:
"""Check that none of the strings have disallowed chars."""
if set(string).intersection(set(self.DISALLOWED_CHARS)):
return Err("String has disallowed chars")
return Ok(string)
def _validate_capitalized(self, string: str) -> Result[str, str]:
"""Check that the starting character is a capital."""
if len(string) > 0 and not string[0].isupper():
return Err("Starting character is not uppercase.")
return Ok(string)
def _validate_end_char(self, string: str) -> Result[str, str]:
"""Check the string ends with a period."""
if len(string) > 0 and string[-1] != ".":
return Err("String does not end with a period")
return Ok(string)
def _validate_substring(self, string: str) -> Result[str, str]:
"""Check the string has the required substring."""
if self.MUST_CONTAIN not in string:
return Err(f"String did not contain '{self.MUST_CONTAIN}'")
return Ok(string)
def test_self(self) -> None:
"""Quick test to make sure we're not crazy."""
goods = ("AshabazzB.", "Abshabazz.")
bads = ("shabazz", "Ab.", "Ashabazz^B.")
assert all(map(lambda g: self.validated(g).is_ok(), goods))
assert all(map(lambda g: self.validated(g).is_err(), bads))
print("Validator.test_self: everything as expected!")
# ######################################################################
# Two: Wrangling Exceptions
# ######################################################################
# It's common in FP-related tutorials to hear exceptions described as
# children throwing tantrums, but it's really worse than that. Calling
# a method that might throw involves either figuring out in detail any
# exception that might be thrown or catching every exception all
# william-nilliam and then dealing with them generically. Doing either
# of the two means that you've got to litter your code with try/except
# blocks, forcing you to consider what the _implementation_ of the thing
# you're using is than what _interface_ you're trying to create.
# Using Result.of can make life easier.
# ######################################################################
class CatFactGetter:
"""Do something fraught with error.
Let's forget them all the possible errors and just care about what
we're trying to do, which is to get a cat fact.
NOTE: this requires the `requests` library to be installed
"""
def get_fact(self) -> str:
"""Get a cat fact!"""
return (
# Create a Result from making our GET request.
# Now we can start chaining!
Result.of(
requests.get, "https://cat-fact.herokuapp.com/facts/random"
)
# Let's first consider the success path.
# If we got a response, it should be JSON, so let's try to parse
.and_then(lambda resp: Result.of(resp.json))
# If we successfully parsed JSON, we must have a dict, so let's
# grab our cat fact, or a useful message.
.map(
lambda parsed: t.cast(
str, parsed.get("text", "Unexpected cat fact format!")
)
)
# From here, all we need to do to consider the error case is
# convert our Err type (which for Result.of() is any exception
# that was raised) into the expected return type, which we
# do by passing the error to `str()`
.unwrap_or_else(str)
)
# Note it would also be totally reasonable to return something like
# Result[str, Exception] here! In which case you drop the final
# `.unwrap_or_else()`, and then the caller can decide what to
# do with any errors.
def get_fact_result(self) -> Result[str, Exception]:
"""Return a Result for a cat fact."""
return (
Result.of(
requests.get,
"https://cat-fact.herokuapp.com/facts/random",
# this is the default, but sometimes the type checker wants us
# to make it explicit. See python/mypy#3737 for deets.
catch=Exception,
)
.and_then(lambda resp: Result.of(resp.json))
.map(
lambda parsed: t.cast(
str, parsed.get("text", "Unexpected cat fact format!")
)
)
)
def test_get_fact(self) -> None:
"""Test getting a cat fact."""
fact = self.get_fact()
assert isinstance(fact, str)
print(fact)
def test_get_fact_result(self) -> None:
"""Test getting a cat fact as a result!
Note that here, the caller has to decide what to do with any
potential error in order to get to the cat fact.
"""
fact_res = self.get_fact_result()
fact_str = fact_res.unwrap_or_else(lambda exc: f"ERROR: {str(exc)}")
assert isinstance(fact_str, str)
if fact_res.is_err():
assert "ERROR" in fact_str
print(fact_str)
if __name__ == "__main__":
Validator().test_self()
CatFactGetter().test_get_fact()
CatFactGetter().test_get_fact_result()
|
nilq/baby-python
|
python
|
from django.shortcuts import render, redirect, render_to_response
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.forms.util import ErrorList
from django.contrib import auth, messages
from django.conf import settings
from django.http import HttpResponseRedirect
from django.template import RequestContext
from datetime import datetime, timedelta
from djkatta.accounts.models import pass_reset_validb
from djkatta.accounts.forms import (
RegistrationForm, LoginForm, PasswordResetRequestForm,
PasswordChangeForm, PasswordResetChangeForm
)
from djkatta.accounts.utils import (
generate_random_string, get_username_from_email, get_email_from_username,
send_pass_reset_mail, reCaptcha,
)
# import logging
# template (DRY) for message box rendering
def message_box(request=None, message="Something went wrong.", redir=settings.LOGIN_URL):
messages.success(request, message)
return redirect(redir)
@csrf_protect
def login(request, *args, **kwargs):
"""Login view for User accounts"""
# Redirects user if already logged in
if request.user.is_authenticated():
redir = request.GET.get('next', None)
if not redir:
redir = settings.LOGIN_REDIRECT_URL
return redirect(redir)
else:
form = LoginForm()
if request.POST:
form = LoginForm(request.POST)
if form.is_valid():
usernm = form.cleaned_data['username'].strip()
if '@' in usernm:
usernm = usernm[:usernm.find('@')]
passwd = form.cleaned_data['password']
user = auth.authenticate(username=usernm, password=passwd)
if user and user.is_active:
# Correct password, and the user is marked "active"
auth.login(request, user)
if form.cleaned_data['login_rem']:
request.session.set_expiry(7*60*60*24)
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
else:
errors = form._errors.setdefault("username", ErrorList())
errors.append("Invalid username or password")
return render_to_response('accounts/login.html',locals(),RequestContext(request))
@csrf_protect
def register(request):
form = RegistrationForm()
if request.POST:
form = RegistrationForm(request.POST)
if form.is_valid():
usernm = form.cleaned_data['username'].strip()
if '@' in usernm:
usernm = usernm[:usernm.find('@')]
try:
user = auth.models.User.objects.get(username__iexact=usernm)
except:
user = False
if user and user.is_active:
errors = form._errors.setdefault("username", ErrorList())
errors.append("That username is already registered! "
"If you have recently registered, you need to reset your password.")
elif not usernm:
errors = form._errors.setdefault("username", ErrorList())
errors.append("Please enter a valid username.")
else:
# check for captcha response
remote_ip = request.META.get('REMOTE_ADDR', '')
captcha_response = request.POST.get('g-recaptcha-response','')
captcha_ok, captcha_msg = reCaptcha(remote_ip,captcha_response)
if captcha_ok:
passwd = generate_random_string()
email = get_email_from_username(usernm)
user = auth.models.User.objects.create_user(
username=usernm,
password=passwd,
email=email,
first_name=form.cleaned_data['first_name'].strip().title(),
last_name=form.cleaned_data['last_name'].strip().title(),
)
validb = pass_reset_validb.objects.create(username=usernm)
send_pass_reset_mail(validb.username, validb.valid_hash, reg=True)
message = "Check your Mu Sigma email for further instructions."
return message_box(request, message)
else:
errors = form._errors.setdefault("username", ErrorList())
errors.append("Invalid captcha request.")
errors.append(captcha_msg)
return render_to_response('accounts/register.html', locals(),
RequestContext(request))
def check_mail(request):
message = "Registration successful! Check your email for further instructions."
return message_box(request, message)
@login_required
@csrf_protect
def password_change_form(request):
if not request.POST:
form = PasswordChangeForm()
# logging.error('pass change')
return render_to_response('accounts/password_change_form.html',
locals(), RequestContext(request))
else:
form = PasswordChangeForm(request.POST)
if form.is_valid():
if request.user.check_password(form.cleaned_data['password_old']):
request.user.set_password(form.cleaned_data['password'])
request.user.save()
auth.logout(request)
return redirect(reverse('user:password_change_success'))
else:
# form = PasswordChangeForm()
form.add_error("password_old", "Original password is incorrect")
return render_to_response('accounts/password_change_form.html',
locals(), RequestContext(request))
def password_change_success(request):
return message_box(
request,
"Your password was successfully changed. You have been logged out."
)
# reset request validation function
def validate_pass_reset_req(username="", given_hash="", delete=False):
if username and given_hash:
try:
reset_req = pass_reset_validb.objects.filter(username=username)[0]
if delete:
reset_req.delete()
else:
if all((reset_req.valid_hash == given_hash,
reset_req.valid_upto >= datetime.today())):
return True
except pass_reset_validb.DoesNotExist:
return None
@csrf_protect
def password_reset_req(request):
"""Landing page."""
if request.POST:
form = PasswordResetRequestForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username'].strip()
if '@' in username:
username = username[:username.find('@')]
try:
reset_req = pass_reset_validb.objects.get(username=username)
if reset_req.valid_upto:
reset_req.valid_upto = datetime.today() + timedelta(days=1)
reset_req.save()
except pass_reset_validb.DoesNotExist:
reset_req = pass_reset_validb.objects.create(username=username)
send_pass_reset_mail(reset_req.username, reset_req.valid_hash)
message = "Check your Mu Sigma email for further instructions."
return message_box(request, message)
else:
form = PasswordResetRequestForm()
return render_to_response('accounts/password_reset_req.html', locals(),
RequestContext(request))
@csrf_protect
def password_reset_change(request, username="", hash=""):
if not request.POST:
form = PasswordResetChangeForm()
return render_to_response('accounts/password_reset_change.html',
locals(), RequestContext(request))
else:
form = PasswordResetChangeForm(request.POST)
if form.is_valid():
try:
if validate_pass_reset_req(username, hash):
user = auth.models.User.objects.get(username=username)
user.set_password(form.cleaned_data['password'])
user.save()
# delete the reset request entry
validate_pass_reset_req(username, hash, delete=True)
return message_box(
request,
"Your password was successfully reset!"
)
# invalid request, raise error & trigger exception
raise pass_reset_validb.DoesNotExist
except pass_reset_validb.DoesNotExist:
form.add_error("password", "Invalid reset request hash")
form.add_error("password_re", "Invalid reset request hash")
return render_to_response('accounts/password_reset_change.html',
locals(), RequestContext(request))
def password_reset_success(request):
return message_box(request, "Your password was successfully reset!")
@login_required
def indi(request, username=""):
if username:
try:
user = auth.models.User.objects.get(username=username)
except auth.models.User.DoesNotExist:
user = None
return render_to_response('accounts/indi.html',
locals(), RequestContext(request))
@login_required
def index(request):
return redirect(reverse('user:indi', kwargs={'username':request.user.username}))
|
nilq/baby-python
|
python
|
# <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
def to_u_s_miles_per_gallon(value):
return value * 2.35215
def to_miles_per_gallon(value):
return value * 2.82481
def to_litres_per100_kilometres(value):
return 100.0 / value
|
nilq/baby-python
|
python
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
"""
from pyramid.compat import itervalues_
from everest.entities.utils import get_root_aggregate
from everest.querying.specifications import eq
from everest.repositories.rdb.session import ScopedSessionMaker
from thelma.interfaces import IRack
from thelma.tools.base import BaseTool
from thelma.tools.semiconstants import get_item_status_future
__docformat__ = 'reStructuredText en'
__all__ = ['PlateEraser'
]
class PlateEraser(BaseTool):
NAME = 'Plate Eraser'
def __init__(self, barcodes, parent=None):
BaseTool.__init__(self, parent=parent)
self.__barcodes = barcodes.split(',')
def run(self):
sess = ScopedSessionMaker()
for bc in self.__barcodes:
rack = self.__get_rack(bc)
for src_cnt in itervalues_(rack.container_positions):
if not src_cnt is None:
if not src_cnt.sample is None:
sess.delete(src_cnt.sample)
rack.status = get_item_status_future()
def __get_rack(self, barcode):
rack_agg = get_root_aggregate(IRack)
rack_agg.filter = eq(barcode=barcode)
return next(rack_agg.iterator())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import logging
import logging.config
import os
import sys
import yaml
from datetime import datetime
from importlib import import_module
from pkgutil import iter_modules
from plastron import commands, version
from plastron.exceptions import FailureException
from plastron.logging import DEFAULT_LOGGING_OPTIONS
from plastron.http import Repository
logger = logging.getLogger(__name__)
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
def main():
"""Parse args and handle options."""
parser = argparse.ArgumentParser(
prog='plastron',
description='Batch operation tool for Fedora 4.'
)
parser.set_defaults(cmd_name=None)
common_required = parser.add_mutually_exclusive_group(required=True)
common_required.add_argument(
'-r', '--repo',
help='Path to repository configuration file.',
action='store'
)
common_required.add_argument(
'-V', '--version',
help='Print version and exit.',
action='version',
version=version
)
parser.add_argument(
'-v', '--verbose',
help='increase the verbosity of the status output',
action='store_true'
)
parser.add_argument(
'-q', '--quiet',
help='decrease the verbosity of the status output',
action='store_true'
)
subparsers = parser.add_subparsers(title='commands')
# load all defined subcommands from the plastron.commands package
command_modules = {}
for finder, name, ispkg in iter_modules(commands.__path__):
module = import_module(commands.__name__ + '.' + name)
if hasattr(module, 'configure_cli'):
module.configure_cli(subparsers)
command_modules[name] = module
# parse command line args
args = parser.parse_args()
# if no subcommand was selected, display the help
if args.cmd_name is None:
parser.print_help()
sys.exit(0)
# load required repository config file and create repository object
with open(args.repo, 'r') as repo_config_file:
repo_config = yaml.safe_load(repo_config_file)
fcrepo = Repository(
repo_config, ua_string='plastron/{0}'.format(version)
)
# get basic logging options
if 'LOGGING_CONFIG' in repo_config:
with open(repo_config.get('LOGGING_CONFIG'), 'r') as logging_config_file:
logging_options = yaml.safe_load(logging_config_file)
else:
logging_options = DEFAULT_LOGGING_OPTIONS
# log file configuration
log_dirname = repo_config.get('LOG_DIR')
if not os.path.isdir(log_dirname):
os.makedirs(log_dirname)
log_filename = 'plastron.{0}.{1}.log'.format(args.cmd_name, now)
logfile = os.path.join(log_dirname, log_filename)
logging_options['handlers']['file']['filename'] = logfile
# manipulate console verbosity
if args.verbose:
logging_options['handlers']['console']['level'] = 'DEBUG'
elif args.quiet:
logging_options['handlers']['console']['level'] = 'WARNING'
# configure logging
logging.config.dictConfig(logging_options)
logger.info('Loaded repo configuration from {0}'.format(args.repo))
# get the selected subcommand
command = command_modules[args.cmd_name].Command()
try:
# dispatch to the selected subcommand
print_header(args)
command(fcrepo, args)
print_footer(args)
except FailureException:
# something failed, exit with non-zero status
sys.exit(1)
except KeyboardInterrupt:
# aborted due to Ctrl+C
sys.exit(2)
def print_header(args):
"""Common header formatting."""
if not args.quiet:
title = '| PLASTRON |'
bar = '+' + '=' * (len(title) - 2) + '+'
spacer = '|' + ' ' * (len(title) - 2) + '|'
print('\n'.join(['', bar, spacer, title, spacer, bar, '']), file=sys.stderr)
def print_footer(args):
"""Report success or failure and resources created."""
if not args.quiet:
print('\nScript complete. Goodbye!\n', file=sys.stderr)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
alphabet = 'qw2rty534plkjhgfds1zxcvbnm'
alpha_dict = {'q':0,'w':1,'2':2,'r':3,'t':4,'y':5,'5':6,'3':7,'4':8,'p':9,'l':10,'k':11,'j':12,'h':13,'g':14,'f':15,'d':16,'s':17,'1':18,'z':19,'x':20,'c':21,'v':22,'b':23,'n':24,'m':25}
list_out = open("full_pass_list","w")
def reduction(my_letter):
while my_letter >= 0:
my_letter = my_letter-26
return my_letter+26
for letter1 in alphabet:
for letter2 in alphabet:
for letter3 in alphabet:
for letter4 in alphabet:
for letter5 in alphabet:
a = alpha_dict[letter1]
b = alpha_dict[letter2]
c = alpha_dict[letter3]
d = alpha_dict[letter4]
e = alpha_dict[letter5]
a = reduction(a)
b = reduction(b+a)
c = reduction(b+c)
d = reduction(d+c)
if d==e:
list_out.write(letter1+letter2+letter3+letter4+letter5+'\n')
list_out.close()
|
nilq/baby-python
|
python
|
class Node(object):
"""
Represents a node in the query plan structure. Provides a `parse` function to
parse JSON into a heirarchy of nodes. Executors take a plan consisting of
nodes and use it to apply the Transformations to the source.
"""
@classmethod
def parse(cls, _dict):
raise NotImplemented
def to_dict(self):
_dict = {}
for key in dir(self):
if (key.startswith('_') or key.lower() != key):
continue
value = getattr(self, key)
if (callable(value) or value is None or value is False or value == []):
continue
if isinstance(value, Node):
value = value.to_dict()
if isinstance(value, list):
for i, v in enumerate(value):
if isinstance(v, Node):
value[i] = v.to_dict()
_dict[key] = value
return _dict
def __eq__(self, other):
if not isinstance(other, Node):
return False
return self.to_dict() == other.to_dict()
class ExecutableNode(Node):
pass
|
nilq/baby-python
|
python
|
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field, asdict
from datetime import datetime
from typing import List
@dataclass(frozen=True)
class SalaryPayment:
basic_payment: int = field(default_factory=int, metadata={"jp": "基本給"})
overtime_fee: int = field(default_factory=int, metadata={"jp": "残業代"})
static_overtime_fee: int = field(default_factory=int, metadata={"jp": "固定残業代"})
commuting_fee: int = field(default_factory=int, metadata={"jp": "通勤(非課税)"})
additional_allowance: int = field(default_factory=int, metadata={"jp": "その他手当"})
def total(self):
return sum(self.__dict__.values())
def taxable(self):
return sum([self.basic_payment, self.overtime_fee, self.static_overtime_fee])
@staticmethod
def loads(data: dict):
_data = {k: v for k, v in data.items() if k in SalaryPayment.__dataclass_fields__.keys()}
return SalaryPayment(**_data)
def dumps(self):
return asdict(self)
@dataclass(frozen=True)
class SalaryDeduction:
health_insurance: int = field(default_factory=int, metadata={"jp": "健康保険"})
nursing_insurance: int = field(default_factory=int, metadata={"jp": "介護保険"})
welfare_pension: int = field(default_factory=int, metadata={"jp": "厚生年金"})
pension_fund: int = field(default_factory=int, metadata={"jp": "年金基金"})
employment_insurance: int = field(default_factory=int, metadata={"jp": "雇用保険"})
def total(self):
return sum(self.__dict__.values())
@staticmethod
def loads(data: dict):
_data = {k: v for k, v in data.items() if k in SalaryDeduction.__dataclass_fields__.keys()}
return SalaryDeduction(**_data)
def dumps(self):
return asdict(self)
@dataclass(frozen=True)
class SalaryTax:
income_tax: int = field(default_factory=int, metadata={"jp": "源泉所得税"})
inhabitant_tax: int = field(default_factory=int, metadata={"jp": "住民税"})
year_end_tax_adjustment: int = field(default_factory=int, metadata={"jp": "年末調整"})
def total(self):
return sum(self.__dict__.values())
@staticmethod
def loads(data: dict):
_data = {k: v for k, v in data.items() if k in SalaryTax.__dataclass_fields__.keys()}
return SalaryTax(**_data)
def dumps(self):
return asdict(self)
@dataclass(frozen=True)
class Salary:
payment_date: str = field(default=str, metadata={"jp": "支給日"})
calc_start_date: str = field(default=str, metadata={"jp": "計算開始日"})
calc_end_date: str = field(default=str, metadata={"jp": "計算締め日"})
salary_payment: SalaryPayment = field(default_factory=SalaryPayment, metadata={"jp": "給与"})
salary_deduction: SalaryDeduction = field(default_factory=SalaryDeduction, metadata={"jp": "保険"})
salary_tax: SalaryTax = field(default_factory=SalaryTax, metadata={"jp": "所得税など"})
company: str = field(default=str, metadata={"jp": "所得税など"})
version: str = field(default="1", metadata={"jp": "版"})
@staticmethod
def loads(data: dict):
_data = {k: v for k, v in data.items() if k in ["payment_date", "calc_start_date", "calc_end_date"]}
_data.update(
{
"salary_payment": SalaryPayment.loads(data.get("salary_payment", {})),
"salary_deduction": SalaryDeduction.loads(data.get("salary_deduction", {})),
"salary_tax": SalaryTax.loads(data.get("salary_tax", {})),
}
)
return Salary(**_data)
def dumps(self):
return asdict(self)
def total_payments(self) -> int:
"""
総支給額
Returns:
"""
return self.salary_payment.total()
def total_deductions(self) -> int:
"""
控除額合計
Returns:
"""
return self.salary_deduction.total() + self.salary_tax.total()
def net_payment(self) -> int:
"""
差引支給額
Returns:
"""
return self.total_payments() - self.total_deductions()
def dt(self) -> str:
return datetime.strptime(self.payment_date, "%Y-%m-%d").strftime("%Y_%m")
@staticmethod
def of(
company: str,
payment_date: str,
calc_start_date: str,
calc_end_date: str,
basic_payment: int,
overtime_fee: int,
static_overtime_fee: int,
commuting_fee: int,
additional_allowance: int,
health_insurance: int,
nursing_insurance: int,
welfare_pension: int,
pension_fund: int,
employment_insurance: int,
income_tax: int,
inhabitant_tax: int,
year_end_tax_adjustment: int,
) -> "Salary":
salary_payment = SalaryPayment(
basic_payment=basic_payment,
overtime_fee=overtime_fee,
static_overtime_fee=static_overtime_fee,
commuting_fee=commuting_fee,
additional_allowance=additional_allowance
)
salary_deduction = SalaryDeduction(
health_insurance=health_insurance,
nursing_insurance=nursing_insurance,
welfare_pension=welfare_pension,
pension_fund=pension_fund,
employment_insurance=employment_insurance,
)
salary_tax = SalaryTax(
income_tax=income_tax, inhabitant_tax=inhabitant_tax, year_end_tax_adjustment=year_end_tax_adjustment
)
return Salary(
company=company,
payment_date=payment_date,
calc_start_date=calc_start_date,
calc_end_date=calc_end_date,
salary_payment=salary_payment,
salary_deduction=salary_deduction,
salary_tax=salary_tax,
)
@dataclass(frozen=True)
class SalaryRepository(metaclass=ABCMeta):
@staticmethod
def file_name(salary: Salary) -> str:
return f"{salary.dt()}_{salary.company}.json"
@abstractmethod
def path(self) -> str:
raise NotImplementedError
@abstractmethod
def save(self, salary: Salary):
raise NotImplementedError
@abstractmethod
def load(self, dt: str) -> List[Salary]:
raise NotImplementedError
|
nilq/baby-python
|
python
|
import os
def get_ip_name():
return "base_ip"
class base_ip:
ID = "base"
def __init__(self, io_hash):
return
def matched_id(in_key):
return in_key is self.ID
def get_rst_case_text(self):
return ''
def get_dft_case_text(self):
return ''
def get_pinmux_setting(self):
return ""
def get_v_file_list(self):
return ""
def get_module_caller(self):
return ""
def get_wire_defines(self):
return ""
def get_assigement(self):
return ""
def matched_id(self, in_key):
return ""
|
nilq/baby-python
|
python
|
import requests
import conf
import urllib2
import xml.etree.ElementTree as ET
import io
def get_min_temp_phrase_from_values(min_observed, min_forecast):
if abs(min_forecast) != 1:
degrees_text = "degrees"
else:
degrees_text = "degree"
s = "The temperature tonight will be %s %s, " % (min_forecast, degrees_text)
degrees_warmer_tonight = min_forecast - min_observed
if abs(degrees_warmer_tonight) > 1:
degrees_text = "degrees"
else:
degrees_text = "degree"
if degrees_warmer_tonight == 0:
s += "which is the same as last night"
elif degrees_warmer_tonight > 0:
s += "which is %s %s warmer than last night" % \
(abs(degrees_warmer_tonight), degrees_text)
else:
s += "which is %s %s cooler than last night" % \
(abs(degrees_warmer_tonight), degrees_text)
return s
def get_min_observed_and_forecasted(bom_obs_url, bom_forecast_url, bom_forecast_area):
# BOM observation data is available for several weather stations, and
# in several formats (including the JSON that we use here).
# e.g. http://www.bom.gov.au/products/IDN60901/IDN60901.94768.shtml
r = requests.get(bom_obs_url)
# this will only be used in the late afternoon and
# min reading is usually about 5am on the same day.
# Comes as a float, so let's round and cast
min_obs = int(round(min([reading["air_temp"] for reading
in r.json()["observations"]["data"]])))
# State forecast URLs are in XML format and are accessible from
# http://www.bom.gov.au/info/precis_forecasts.shtml
f = urllib2.urlopen(bom_forecast_url)
forecast_report = io.StringIO(unicode(f.read()))
tree = ET.parse(forecast_report)
# Get the first (zeroth) minimum air temperature reading.
# The current day will not have a minimum reading so this corresponds
# to tomorrow's minimum forecast temperature
min_forecast = int(
tree.findall("./forecast"
"/area[@aac='%s']"
"/forecast-period"
"/element[@type='air_temperature_minimum']" %
(bom_forecast_area,))[0].text)
return min_obs, min_forecast
if __name__ == "__main__":
print get_min_temp_phrase_from_values(*get_min_observed_and_forecasted(
conf.LOCAL_BOM_OBSERVATIONS_URL,
conf.STATE_BOM_FORECAST_URL,
conf.LOCAL_BOM_FORECAST_AREA))
# print get_min_temp_phrase_from_values(12, 0)
|
nilq/baby-python
|
python
|
from flask import Flask
from flask import render_template,redirect,request
import pandas as pd
import sys
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
#from sklearn.metrics import mean_squared_log_error
from sklearn.linear_model import LogisticRegression
df=pd.read_csv('heart-data.csv')
df.rename(columns={"class":"target"},inplace=True)
df['target'].replace(['absent','present'],[0,1],inplace=True)
df=pd.get_dummies(df)
x=df.drop('target', axis=1)
y=df['target']
train_x,valid_x,train_y,valid_y=train_test_split(x,y,test_size=0.3,random_state=35)
logr=LogisticRegression()
logr.fit(train_x,train_y)
#new_data=np.array(new_data,dtype='int64')
#new_data=new_data.reshape(1,13)
#xnew_data=pd.DataFrame(new_data)
pickle.dump(logr,open('model.pkl','wb'))
model=pickle.load(open('model.pkl','rb'))
result=model.predict(valid_x)
newdata=valid_x.head(1)
print(newdata)
result2=model.predict(newdata)
print(result)
print(result2)
|
nilq/baby-python
|
python
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from manila import context
from manila import db
from manila import exception
from manila.scheduler import driver
from manila.scheduler import manager
from manila.scheduler import simple
from manila.share import rpcapi as share_rpcapi
from manila import test
from manila.tests import db_utils
from manila import utils
CONF = cfg.CONF
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'manila.scheduler.driver.Scheduler'
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertTrue(isinstance(manager.driver, self.driver_cls))
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
with mock.patch.object(self.manager.driver,
'update_service_capabilities', mock.Mock()):
self.manager.update_service_capabilities(
self.context, service_name=service_name, host=host)
self.manager.driver.update_service_capabilities.\
assert_called_once_with(service_name, host, {})
with mock.patch.object(self.manager.driver,
'update_service_capabilities', mock.Mock()):
capabilities = {'fake_capability': 'fake_value'}
self.manager.update_service_capabilities(
self.context, service_name=service_name, host=host,
capabilities=capabilities)
self.manager.driver.update_service_capabilities.\
assert_called_once_with(service_name, host, capabilities)
@mock.patch.object(db, 'share_update', mock.Mock())
def test_create_share_exception_puts_share_in_error_state(self):
"""Test that a NoValideHost exception for create_share.
Puts the share in 'error' state and eats the exception.
"""
def raise_no_valid_host(*args, **kwargs):
raise exception.NoValidHost(reason="")
fake_share_id = 1
request_spec = {'share_id': fake_share_id}
with mock.patch.object(self.manager.driver,
'schedule_create_share',
mock.Mock(side_effect=raise_no_valid_host)):
self.mock_object(manager.LOG, 'error')
self.manager.create_share_instance(
self.context, request_spec=request_spec, filter_properties={})
db.share_update.assert_called_once_with(
self.context, fake_share_id, {'status': 'error'})
self.manager.driver.schedule_create_share.assert_called_once_with(
self.context, request_spec, {})
manager.LOG.error.assert_called_once_with(mock.ANY, mock.ANY)
def test_get_pools(self):
"""Ensure get_pools exists and calls driver.get_pools."""
mock_get_pools = self.mock_object(self.manager.driver, 'get_pools',
mock.Mock(return_value='fake_pools'))
result = self.manager.get_pools(self.context, filters='fake_filters')
mock_get_pools.assert_called_once_with(self.context, 'fake_filters')
self.assertEqual('fake_pools', result)
@mock.patch.object(db, 'consistency_group_update', mock.Mock())
def test_create_cg_no_valid_host_puts_cg_in_error_state(self):
"""Test that NoValidHost is raised for create_consistency_group.
Puts the share in 'error' state and eats the exception.
"""
def raise_no_valid_host(*args, **kwargs):
raise exception.NoValidHost(reason="")
fake_cg_id = 1
cg_id = fake_cg_id
request_spec = {"consistency_group_id": cg_id}
with mock.patch.object(self.manager.driver,
'schedule_create_consistency_group',
mock.Mock(side_effect=raise_no_valid_host)):
self.manager.create_consistency_group(self.context,
fake_cg_id,
request_spec=request_spec,
filter_properties={})
db.consistency_group_update.assert_called_once_with(
self.context, fake_cg_id, {'status': 'error'})
self.manager.driver.schedule_create_consistency_group\
.assert_called_once_with(self.context, cg_id,
request_spec, {})
@mock.patch.object(db, 'consistency_group_update', mock.Mock())
def test_create_cg_exception_puts_cg_in_error_state(self):
"""Test that exceptions for create_consistency_group.
Puts the share in 'error' state and raises the exception.
"""
fake_cg_id = 1
cg_id = fake_cg_id
request_spec = {"consistency_group_id": cg_id}
with mock.patch.object(self.manager.driver,
'schedule_create_consistency_group',
mock.Mock(side_effect=exception.NotFound)):
self.assertRaises(exception.NotFound,
self.manager.create_consistency_group,
self.context, fake_cg_id,
request_spec=request_spec,
filter_properties={})
def test_migrate_share_to_host(self):
share = db_utils.create_share()
host = 'fake@backend#pool'
self.mock_object(db, 'share_get', mock.Mock(return_value=share))
self.mock_object(share_rpcapi.ShareAPI, 'migrate_share')
self.mock_object(driver.Scheduler, 'host_passes_filters',
mock.Mock(return_value=host))
self.manager.migrate_share_to_host(self.context, share['id'], host,
False, {}, None)
def test_migrate_share_to_host_no_valid_host(self):
share = db_utils.create_share()
host = 'fake@backend#pool'
self.mock_object(
driver.Scheduler, 'host_passes_filters',
mock.Mock(side_effect=[exception.NoValidHost('fake')]))
self.manager.migrate_share_to_host(self.context, share['id'], host,
False, {}, None)
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
def test_update_service_capabilities(self):
service_name = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
with mock.patch.object(self.driver.host_manager,
'update_service_capabilities', mock.Mock()):
self.driver.update_service_capabilities(
service_name, host, capabilities)
self.driver.host_manager.update_service_capabilities.\
assert_called_once_with(service_name, host, capabilities)
def test_hosts_up(self):
service1 = {'host': 'host1'}
service2 = {'host': 'host2'}
services = [service1, service2]
def fake_service_is_up(*args, **kwargs):
if args[0]['host'] == 'host1':
return False
return True
with mock.patch.object(db, 'service_get_all_by_topic',
mock.Mock(return_value=services)):
with mock.patch.object(utils, 'service_is_up',
mock.Mock(side_effect=fake_service_is_up)):
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
db.service_get_all_by_topic.assert_called_once_with(
self.context, self.topic)
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods.
These can't fail if the driver is changed.
"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
@mock.patch.object(db, 'share_update', mock.Mock())
def test_share_host_update_db(self):
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value='fake-now')):
driver.share_update_db(self.context, 31337, 'fake_host')
db.share_update.assert_called_once_with(
self.context, 31337,
{'host': 'fake_host', 'scheduled_at': 'fake-now'})
class SimpleSchedulerSharesTestCase(test.TestCase):
"""Test case for simple scheduler create share method."""
def setUp(self):
super(SimpleSchedulerSharesTestCase, self).setUp()
self.mock_object(share_rpcapi, 'ShareAPI')
self.driver = simple.SimpleScheduler()
self.context = context.RequestContext('fake_user', 'fake_project')
self.admin_context = context.RequestContext('fake_admin_user',
'fake_project')
self.admin_context.is_admin = True
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_if_two_services_up(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 1}
fake_service_1 = {'disabled': False, 'host': 'fake_host1'}
fake_service_2 = {'disabled': False, 'host': 'fake_host2'}
fake_result = [(fake_service_1, 2), (fake_service_2, 1)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
self.mock_object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result))
self.mock_object(driver, 'share_update_db',
mock.Mock(return_value=db_utils.create_share()))
self.driver.schedule_create_share(self.context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(utils.IsAMatcher(dict))
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id, 'fake_host1')
def test_create_share_if_services_not_available(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 1}
fake_result = []
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result)):
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_share,
self.context, fake_request_spec, {})
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
def test_create_share_if_max_gigabytes_exceeded(self):
share_id = 'fake'
fake_share = {'id': share_id, 'size': 10001}
fake_service_1 = {'disabled': False, 'host': 'fake_host1'}
fake_service_2 = {'disabled': False, 'host': 'fake_host2'}
fake_result = [(fake_service_1, 5), (fake_service_2, 7)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
with mock.patch.object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result)):
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_share,
self.context, fake_request_spec, {})
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_availability_zone(self):
share_id = 'fake'
fake_share = {
'id': share_id,
'size': 1,
}
fake_instance = {
'availability_zone_id': 'fake',
}
fake_service_1 = {
'disabled': False, 'host': 'fake_host1',
'availability_zone_id': 'fake',
}
fake_service_2 = {
'disabled': False, 'host': 'fake_host2',
'availability_zone_id': 'super_fake',
}
fake_result = [(fake_service_1, 0), (fake_service_2, 1)]
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
'share_instance_properties': fake_instance,
}
self.mock_object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=fake_result))
self.mock_object(driver, 'share_update_db',
mock.Mock(return_value=db_utils.create_share()))
self.driver.schedule_create_share(self.context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(fake_service_1)
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id,
fake_service_1['host'])
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
@mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True))
def test_create_share_availability_zone_on_host(self):
share_id = 'fake'
fake_share = {
'id': share_id,
'availability_zone': 'fake:fake',
'size': 1,
}
fake_service = {'disabled': False, 'host': 'fake'}
fake_request_spec = {
'share_id': share_id,
'share_properties': fake_share,
}
self.mock_object(db, 'service_get_all_share_sorted',
mock.Mock(return_value=[(fake_service, 1)]))
self.mock_object(driver, 'share_update_db',
mock.Mock(return_value=db_utils.create_share()))
self.driver.schedule_create_share(self.admin_context,
fake_request_spec, {})
utils.service_is_up.assert_called_once_with(fake_service)
db.service_get_all_share_sorted.assert_called_once_with(
utils.IsAMatcher(context.RequestContext))
driver.share_update_db.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share_id, 'fake')
|
nilq/baby-python
|
python
|
import os
import platform
import textwrap
from collections import OrderedDict
from jinja2 import Template
from conans.errors import ConanException
from conans.util.files import normalize
sh_activate = textwrap.dedent("""\
#!/usr/bin/env sh
{%- for it in modified_vars %}
export CONAN_OLD_{{it}}="${{it}}"
{%- endfor %}
while read -r line; do
LINE="$(eval echo $line)";
export "$LINE";
done < "{{ environment_file }}"
export CONAN_OLD_PS1="$PS1"
export PS1="({{venv_name}}) $PS1"
""")
sh_deactivate = textwrap.dedent("""\
#!/usr/bin/env sh
export PS1="$CONAN_OLD_PS1"
unset CONAN_OLD_PS1
{% for it in modified_vars %}
export {{it}}="$CONAN_OLD_{{it}}"
unset CONAN_OLD_{{it}}
{%- endfor %}
{%- for it in new_vars %}
unset {{it}}
{%- endfor %}
""")
bat_activate = textwrap.dedent("""\
@echo off
{%- for it in modified_vars %}
SET "CONAN_OLD_{{it}}=%{{it}}%"
{%- endfor %}
FOR /F "usebackq tokens=1,* delims==" %%i IN ("{{ environment_file }}") DO (
CALL SET "%%i=%%j"
)
SET "CONAN_OLD_PROMPT=%PROMPT%"
SET "PROMPT=({{venv_name}}) %PROMPT%"
""")
bat_deactivate = textwrap.dedent("""\
@echo off
SET "PROMPT=%CONAN_OLD_PROMPT%"
SET "CONAN_OLD_PROMPT="
{% for it in modified_vars %}
SET "{{it}}=%CONAN_OLD_{{it}}%"
SET "CONAN_OLD_{{it}}="
{%- endfor %}
{%- for it in new_vars %}
SET "{{it}}="
{%- endfor %}
""")
ps1_activate = textwrap.dedent("""\
{%- for it in modified_vars %}
$env:CONAN_OLD_{{venv_name}}_{{it}}=$env:{{it}}
{%- endfor %}
foreach ($line in Get-Content "{{ environment_file }}") {
$var,$value = $line -split '=',2
$value_expanded = $ExecutionContext.InvokeCommand.ExpandString($value)
Set-Item env:\\$var -Value "$value_expanded"
}
function global:_old_conan_{{venv_name}}_prompt {""}
$function:_old_conan_{{venv_name}}_prompt = $function:prompt
function global:prompt {
write-host "({{venv_name}}) " -nonewline; & $function:_old_conan_{{venv_name}}_prompt
}
""")
ps1_deactivate = textwrap.dedent("""\
$function:prompt = $function:_old_conan_{{venv_name}}_prompt
remove-item function:_old_conan_{{venv_name}}_prompt
{% for it in modified_vars %}
$env:{{it}}=$env:CONAN_OLD_{{venv_name}}_{{it}}
Remove-Item env:CONAN_OLD_{{venv_name}}_{{it}}
{%- endfor %}
{%- for it in new_vars %}
Remove-Item env:{{it}}
{%- endfor %}
""")
BAT_FLAVOR = "bat"
PS1_FLAVOR = "ps1"
SH_FLAVOR = "sh"
def _variable_placeholder(flavor, name, append_with_spaces):
"""
:param flavor: flavor of the execution environment
:param name: variable name
:return: placeholder for the variable name formatted for a certain execution environment.
(e.g., cmd, ps1, sh).
"""
if flavor == BAT_FLAVOR:
return "%{}%".format(name)
if flavor == PS1_FLAVOR:
return "$env:%s" % name
# flavor == sh
return "${%s:+ $%s}" % (name, name) if append_with_spaces else "${%s:+:$%s}" % (name, name)
def _format_values(flavor, variables, append_with_spaces):
"""
Formats the values for the different supported script language flavors.
:param flavor: flavor of the execution environment
:param variables: variables to be formatted
:return:
"""
if flavor in [BAT_FLAVOR, PS1_FLAVOR] and platform.system() == "Windows":
path_sep, quote_elements = ";", False
elif flavor == PS1_FLAVOR:
path_sep, quote_elements = ":", False
else:
path_sep, quote_elements = ":", True
for name, value in variables:
# activate values
if isinstance(value, list):
value = list(OrderedDict.fromkeys(value)) # Avoid repeated entries, while keeping order
append_space = name in append_with_spaces
placeholder = _variable_placeholder(flavor, name, append_space)
if append_space:
# Variables joined with spaces look like: CPPFLAGS="one two three"
if flavor == SH_FLAVOR:
value = " ".join(value) + placeholder
else:
value = " ".join(value + [placeholder])
value = "\"%s\"" % value if quote_elements else value
else:
# Quoted variables joined with pathset may look like:
# PATH="one path":"two paths"
# Unquoted variables joined with pathset may look like: PATH=one path;two paths
value = ["\"%s\"" % v for v in value] if quote_elements else value
if flavor == SH_FLAVOR:
value = path_sep.join(value) + placeholder
else:
value = path_sep.join(value + [placeholder])
else:
# single value
value = "\"%s\"" % value if quote_elements else value
if platform.system() != "Windows":
value = value.replace("\\", "\\\\")
# deactivate values
existing = name in os.environ
yield name, value, existing
def _files(env_vars, vars_with_spaces, flavor, activate_tpl, deactivate_tpl, venv_name,
env_filepath):
ret = list(_format_values(flavor, env_vars.items(), vars_with_spaces))
modified_vars = [name for name, _, existing in ret if existing]
new_vars = [name for name, _, existing in ret if not existing]
activate_content = activate_tpl.render(environment_file=env_filepath,
modified_vars=modified_vars, new_vars=new_vars,
venv_name=venv_name)
deactivate_content = deactivate_tpl.render(modified_vars=modified_vars, new_vars=new_vars,
venv_name=venv_name)
environment_lines = ["{}={}".format(name, value) for name, value, _ in ret]
# This blank line is important, otherwise the script doens't process last line
environment_lines.append('')
if flavor == SH_FLAVOR:
# replace CRLF->LF guarantee it is always LF, irrespective of current .py file
activate_content = activate_content.replace("\r\n", "\n")
deactivate_content = deactivate_content.replace("\r\n", "\n")
environment = "\n".join(environment_lines)
else:
activate_content = normalize(activate_content)
deactivate_content = normalize(deactivate_content)
environment = os.linesep.join(environment_lines)
return activate_content, deactivate_content, environment
def env_files(env_vars, vars_with_spaces, flavor, folder, name, venv_name):
env_filename = "environment{}.{}.env".format(name, flavor)
activate_filename = "activate{}.{}".format(name, flavor)
deactivate_filename = "deactivate{}.{}".format(name, flavor)
templates = {SH_FLAVOR: (sh_activate, sh_deactivate),
BAT_FLAVOR: (bat_activate, bat_deactivate),
PS1_FLAVOR: (ps1_activate, ps1_deactivate)}
try:
activate, deactivate = templates[flavor]
except KeyError:
raise ConanException("Unrecognized flavor: %s" % flavor)
activate_tpl, deactivate_tpl = Template(activate), Template(deactivate)
env_filepath = os.path.abspath(os.path.join(folder, env_filename))
activate, deactivate, envfile = _files(env_vars, vars_with_spaces, flavor, activate_tpl,
deactivate_tpl, venv_name, env_filepath)
result = {activate_filename: activate,
deactivate_filename: deactivate,
env_filename: envfile}
return result
|
nilq/baby-python
|
python
|
import logging
from copy import copy
from inspect import isfunction
import ibutsu_server.tasks
from flask_testing import TestCase
from ibutsu_server import get_app
from ibutsu_server.tasks import create_celery_app
from ibutsu_server.util import merge_dicts
def mock_task(*args, **kwargs):
if args and isfunction(args[0]):
func = args[0]
def wrap(*args, **kwargs):
return func(*args, **kwargs)
wrap._orig_func = func
return wrap
else:
def decorate(func):
def _wrapped(*args, **kwargs):
return func(*args, **kwargs)
_wrapped._orig_func = func
return _wrapped
return decorate
class BaseTestCase(TestCase):
def create_app(self):
logging.getLogger("connexion.operation").setLevel("ERROR")
extra_config = {
"TESTING": True,
"LIVESERVER_PORT": 0,
"SQLALCHEMY_DATABASE_URI": "sqlite:///:memory:",
}
app = get_app(**extra_config)
create_celery_app(app.app)
if ibutsu_server.tasks.task is None:
ibutsu_server.tasks.task = mock_task
return app.app
def assert_201(self, response, message=None):
"""
Checks if response status code is 201
:param response: Flask response
:param message: Message to display on test failure
"""
self.assert_status(response, 201, message)
def assert_equal(self, first, second, msg=None):
"""Alias"""
return self.assertEqual(first, second, msg)
def assert_not_equal(self, first, second, msg=None):
"""Alias"""
return self.assertNotEqual(first, second, msg)
class MockModel(object):
"""Mock model object"""
COLUMNS = ["id"]
def __init__(self, **fields):
for column in self.COLUMNS:
if column in fields.keys():
setattr(self, column, fields[column])
else:
setattr(self, column, None)
def to_dict(self):
record_dict = copy(self.__dict__)
# when outputting info, translate data to metadata
if record_dict.get("data"):
record_dict["metadata"] = record_dict.pop("data")
return record_dict
@classmethod
def from_dict(cls, **record_dict):
# because metadata is a reserved attr name, translate it to data
if record_dict.get("metadata"):
record_dict["data"] = record_dict.pop("metadata")
return cls(**record_dict)
def update(self, record_dict):
if "id" in record_dict:
record_dict.pop("id")
group_dict = self.to_dict()
merge_dicts(group_dict, record_dict)
if group_dict.get("metadata"):
group_dict["data"] = group_dict.pop("metadata")
if record_dict.get("metadata"):
record_dict["data"] = record_dict.get("metadata")
for key, value in record_dict.items():
setattr(self, key, value)
class MockArtifact(MockModel):
COLUMNS = ["id", "filename", "result_id", "data", "content"]
class MockGroup(MockModel):
COLUMNS = ["id", "name", "data"]
class MockImport(MockModel):
COLUMNS = ["id", "filename", "format", "data", "status"]
class MockProject(MockModel):
COLUMNS = ["id", "name", "title", "owner_id", "group_id"]
class MockResult(MockModel):
COLUMNS = [
"id",
"component",
"data",
"duration",
"env",
"params",
"project_id",
"result",
"run_id",
"source",
"start_time",
"test_id",
]
class MockReport(MockModel):
COLUMNS = [
"id",
"created",
"download_url",
"filename",
"mimetype",
"name",
"params",
"project_id",
"status",
"url",
"view_url",
]
class MockRun(MockModel):
COLUMNS = [
"id",
"component",
"created",
"data",
"duration",
"env",
"project_id",
"source",
"start_time",
"summary",
]
# Mock out the task decorator
ibutsu_server.tasks.task = mock_task
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from kivy.app import App
from kivy.clock import Clock
from kivy.config import Config
from kivy.core.window import Window
from kivy.properties import (ListProperty,
NumericProperty,
ObjectProperty,
ReferenceListProperty)
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.vector import Vector
class SnakesGame(Widget):
"""The root `Widget` for displaying and running the game."""
trails = ListProperty()
snake1 = ObjectProperty()
snake2 = ObjectProperty()
status_bar = ObjectProperty()
def __init__(self, **kwargs):
super(SnakesGame, self).__init__(**kwargs)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode[1] == 's':
if self.snake1.direction != [0, 1]:
self.snake1.direction = (0, -1)
elif keycode[1] == 'w':
if self.snake1.direction != [0, -1]:
self.snake1.direction = (0, 1)
elif keycode[1] == 'a':
if self.snake1.direction != [1, 0]:
self.snake1.direction = (-1, 0)
elif keycode[1] == 'd':
if self.snake1.direction != [-1, 0]:
self.snake1.direction = (1, 0)
elif keycode[1] == 'down':
if self.snake2.direction != [0, 1]:
self.snake2.direction = (0, -1)
elif keycode[1] == 'up':
if self.snake2.direction != [0, -1]:
self.snake2.direction = (0, 1)
elif keycode[1] == 'left':
if self.snake2.direction != [1, 0]:
self.snake2.direction = (-1, 0)
elif keycode[1] == 'right':
if self.snake2.direction != [-1, 0]:
self.snake2.direction = (1, 0)
def run(self):
Clock.schedule_interval(self.update, 1/60.)
def update(self, dt):
"""Moves snakes and gives points if collision occured."""
if self.snake1.move(self.snake2):
self.snake2.score += 1
self.reset()
elif self.snake2.move(self.snake1):
self.snake1.score += 1
self.reset()
def reset(self):
"""Resets the positions/directions and removes trails."""
self.snake1.center = (self.width/3., self.height/2.)
self.snake2.center = (self.width*2/3., self.height/2.)
self.snake1.direction = (0, 0)
self.snake2.direction = (0, 0)
for trail in self.trails:
self.remove_widget(trail)
del self.trails[:]
class Snake(Widget):
"""Represents the head of a snake, which can be moved around."""
color = ListProperty()
direction_x = NumericProperty()
direction_y = NumericProperty()
direction = ReferenceListProperty(direction_x, direction_y)
trail = ObjectProperty()
score = NumericProperty()
def collide_widget(self, wid):
"""Collision detection that works with negative sizes.
Args:
wid: The `Widget` to check for collision against.
Returns:
True if a collision occured, otherwise False.
"""
if wid.width < 0:
if self.right < wid.right + 1:
return False
if self.x > wid.x - 1:
return False
else:
if self.right < wid.x + 1:
return False
if self.x > wid.right - 1:
return False
if wid.height < 0:
if self.top < wid.top + 1:
return False
if self.y > wid.y - 1:
return False
else:
if self.top < wid.y + 1:
return False
if self.y > wid.top - 1:
return False
return True
def move(self, other):
"""Moves the `Snake` and returns whether a collision occured."""
# Scale speed in relation to game widget size
if self.parent.width < self.parent.height:
speed_scale = self.parent.width / 250.
else:
speed_scale = self.parent.height / 250.
self.pos = Vector(self.direction) * speed_scale + self.pos
if self.trail:
self.trail.width += self.direction_x * speed_scale
self.trail.height += self.direction_y * speed_scale
# Check for collision with edges of arena and other snake
if self.right >= self.parent.width or self.x <= 0:
return True
if self.top >= self.parent.status_bar.y or self.y <= 0:
return True
if self.collide_widget(other):
self.score += 1 # Gives point to self as well
return True
# Check for collision with all trails
for trail in self.parent.trails:
if self.collide_widget(trail):
return True
return False
def on_direction(self, snake, direction):
"""Creates and positions a new trail."""
self.trail = Trail(size=self.size, pos=self.pos, color=self.color)
# Position trail for following directly behind snake head
if self.direction_x == 1:
self.trail.width = 0
elif self.direction_x == -1:
self.trail.width = 0
self.trail.x = self.right
elif self.direction_y == 1:
self.trail.height = 0
elif self.direction_y == -1:
self.trail.height = 0
self.trail.y = self.top
self.parent.add_widget(self.trail)
self.parent.trails.append(self.trail)
class Trail(Widget):
"""Represents a trail left behind as the `Snake` moves."""
color = ListProperty()
class StatusBar(BoxLayout):
"""A container for displaying scores."""
pass
class SnakesApp(App):
def build(self):
"""Creates and runs the game."""
Config.set('kivy', 'exit_on_escape', '0')
game = SnakesGame()
game.run()
return game
def main():
SnakesApp().run()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# -selenium webdriver-
from logging import fatal
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
# config file import
import config
import requests # for discord webhook
# makes the bot only use the specified cores. (logical processors)
#import psutil
#p = psutil.Process()
# p.cpu_affinity([2,3,5,6,7,9,10,11,12,13,14])
# get the webdriver you want to use.
browser = webdriver.Firefox(executable_path=r'.\webdrivers\geckodriver.exe')
browser.get(config.PageURL)
#wait = WebDriverWait(browser, 2)
print('waiting')
element = WebDriverWait(browser, 20).until(lambda x: x.find_element_by_xpath(config.Size_Xpath)) # waits for page to finish loading
print('finished waiting')
# timestamp of when the bot was started
print('Time started =', config.current_time)
print('--------------------------------------')
SizeAvailable = browser.find_element_by_xpath(config.Size_Xpath).get_attribute('data-qa')
ShoeNamePrimary = browser.find_element_by_xpath('//*[@id="root"]/div/div/div[1]/div/div[1]/div[2]/div/section[1]/div[2]/aside/div/div[1]/h1').get_attribute('innerHTML')
ShoeNameSecondary = browser.find_element_by_xpath('//*[@id="root"]/div/div/div[1]/div/div[1]/div[2]/div/section[1]/div[2]/aside/div/div[1]/h5').get_attribute('innerHTML')
ShoeThumbnail = browser.find_element_by_xpath('/html/body/div[2]/div/div/div[1]/div/div[1]/div[2]/div/section[1]/div[1]/div/div[6]/figure/img').get_attribute('src')
#all_children_by_xpath = browser.find_elements_by_xpath(f'{config.Size_Xpath}.//*')
#print(all_children_by_xpath)
parentElement = browser.find_element_by_xpath(config.Size_Xpath)
elementList = parentElement.find_elements_by_tag_name('button')
print('===============')
print(parentElement)
print(elementList)
print('===============')
print(ShoeNamePrimary)
print(ShoeNameSecondary)
print(SizeAvailable)
# ~~ debug ~~
if config.DebugMode == True:
print('debug value is true.')
print('~~~~~~~~~~~~~~~~~~~~~~~~')
print(f'*Debug Info*\nAutoBuy = {config.AutoBuy}\nAutoCart = {config.AutoCart}\nWinToast = {config.WindowsToasts}\nTestMode = {config.TestMode}')
print('--------------------------------------')
# check if {InStockColor} is the same as the {SizeBgColor}
if SizeAvailable == 'size-unavailable': # size is unavailable
print('out of stock')
# browser.refresh()
elif SizeAvailable != 'size-unavailable': # size is available
print('in stock')
# notification settings (you can change them in the config)
if config.WindowsToasts == True:
from logging import debug
from win10toast import ToastNotifier
toaster = ToastNotifier()
toaster.show_toast(f'Sneaker Bot v{config.SneakerBotVersion}',
(f'{ShoeNamePrimary} are in stock'),
icon_path='assets\\sneakerbot-icon.ico',
duration=999999,
threaded=True)
if config.DiscordWebhooks == True:
url = config.DiscordWebhookURL
if config.DebugMode == True:
embed = {
'title': 'Sneakers in stock!',
'color': 15052624, # 15052624 orange, 14708343 bright red, 12017246 darker red
'thumbnail': {
'url': ShoeThumbnail
},
'fields': [
{
'name': (f'{ShoeNamePrimary} - {ShoeNameSecondary}'),
'value': (f'[store link]({config.PageURL})\n▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬\n```▬ Debug Info ▬\nAutoBuy = {config.AutoBuy}\nAutoCart = {config.AutoCart}\nDiscordHook = {config.DiscordWebhooks}\nWinToast = {config.WindowsToasts}\nTestMode = {config.TestMode}\n```')
}
],
'footer': {
'text': (f'Made by MBlais.dev • {config.current_time} • SneakerBot v{config.SneakerBotVersion}'),
'icon_url': 'https://i.imgur.com/MbrG9HM.png'
}
}
elif config.DebugMode == False:
embed = {
'title': 'Sneakers in stock!',
'color': 5546086, # dark green:5546086, light green:8776060
'thumbnail': {
'url': ShoeThumbnail
},
'fields': [
{
'name': (f'{ShoeNamePrimary} - {ShoeNameSecondary}'),
'value': (f'[store link]({config.PageURL})\n▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬')
}
],
'footer': {
'text': (f'Made by MBlais.dev • {config.current_time} • SneakerBot v{config.SneakerBotVersion}'),
'icon_url': 'https://i.imgur.com/MbrG9HM.png'
}
}
data = {
'username': (f'SneakerBot v{config.SneakerBotVersion}'),
'avatar_url': 'https://i.imgur.com/eVDSFTr.png',
'embeds': [
embed
],
}
headers = {
'Content-Type': 'application/json'
}
result = requests.post(url, json=data, headers=headers)
if 200 <= result.status_code < 300:
print(f'Webhook sent {result.status_code}')
else:
print(f'Not sent with {result.status_code}, response:\n{result.json()}')
|
nilq/baby-python
|
python
|
import argparse
import os
import numpy as np
import scipy.io as sio
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from tqdm import tqdm
from net.models import LeNet_5 as LeNet
import util
os.makedirs('saves', exist_ok=True)
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST pruning from deep compression paper')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 100)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=12345678, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--log', type=str, default='log.txt',
help='log file name')
parser.add_argument('--model', type=str, default='saves/initial_model',
help='path to model pretrained with sparsity-inducing regularizer')
parser.add_argument('--sensitivity', type=float, default=0.001,
help="pruning threshold set as the sensitivity value")
args = parser.parse_args()
# Control Seed
torch.manual_seed(args.seed)
# Select Device
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else 'cpu')
if use_cuda:
print("Using CUDA!")
torch.cuda.manual_seed(args.seed)
else:
print('Not using CUDA!!!')
# Loader
kwargs = {'num_workers': 5, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=False, transform=transforms.Compose([
transforms.ToTensor()])),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
# Define which model to use
model = LeNet(mask=False).to(device)
# NOTE : `weight_decay` term denotes L2 regularization loss term
optimizer = optim.Adam(model.parameters(), lr=args.lr)
initial_optimizer_state_dict = optimizer.state_dict()
def train(epochs):
model.train()
pbar = tqdm(range(epochs), total=epochs)
for epoch in pbar:
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
total_loss = loss
total_loss.backward()
# zero-out all the gradients corresponding to the pruned connections
for name, p in model.named_parameters():
if 'mask' in name:
continue
tensor = p.data.cpu().numpy()
grad_tensor = p.grad.data.cpu().numpy()
grad_tensor = np.where(tensor==0, 0, grad_tensor)
p.grad.data = torch.from_numpy(grad_tensor).to(device)
optimizer.step()
if batch_idx % args.log_interval == 0:
done = batch_idx * len(data)
percentage = 100. * batch_idx / len(train_loader)
pbar.set_description(f'Train Epoch: {epoch} [{done:5}/{len(train_loader.dataset)} ({percentage:3.0f}%)] Loss: {loss.item():.6f} Total: {total_loss.item():.6f}')
def test():
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
print(f'Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({accuracy:.2f}%)')
return accuracy
model.load_state_dict(torch.load(args.model+'.pth'))
# Initial training
print("--- Pruning ---")
for name, p in model.named_parameters():
if 'mask' in name:
continue
tensor = p.data.cpu().numpy()
new_mask = np.where(abs(tensor) < args.sensitivity, 0, tensor)
p.data = torch.from_numpy(new_mask).to(device)
accuracy = test()
util.print_nonzeros(model)
print("--- Finetuning ---")
train(args.epochs)
accuracy = test()
torch.save(model.state_dict(), args.model+'_T_'+str(args.sensitivity)+'.pth')
|
nilq/baby-python
|
python
|
import random
import numpy as np
import tensorflow as tf
import tqdm
from pipeline import SEED
from pipeline.dataset.gc import get_client
random.seed(SEED)
TRAIN = tf.estimator.ModeKeys.TRAIN
EVAL = tf.estimator.ModeKeys.EVAL
PREDICT = tf.estimator.ModeKeys.PREDICT
class MNISTDataset:
"""MNIST dataset."""
TABLES = {TRAIN: "train", EVAL: "valid", PREDICT: "test"}
def get_data(self):
def _string_to_float(_raw_image: str):
arr = np.asarray(_raw_image.split(","), "float")
return arr.reshape([28, 28])
mode = self.mode
dataset_ref = self.client.dataset(self.dataset_id)
print(f"Mode: {mode} -> Load data from table")
rows = self.client.list_rows(dataset_ref.table(self.TABLES[self.mode]))
arrows = rows.to_arrow()
arrows_dict = arrows.to_pydict()
labels, images = arrows_dict["key"], arrows_dict["image"]
labels = np.array(labels)
images = np.array(
[_string_to_float(image) for image in tqdm.tqdm(images)], "float"
)
# Result from BigQuery are sorted by key.
data = list(zip(images, labels))
random.shuffle(data)
images, labels = zip(*data)
feature = np.array(images, "float")
label = np.array(labels, "int")
if self.mode == PREDICT:
return feature[:1000], label[:1000]
return feature, label
def __init__(self, mode: tf.estimator.ModeKeys, dataset_id: str):
self.mode = mode
self.dataset_id = dataset_id
self.client = get_client("bigquery")
self.data = self.get_data()
def data_generator(self):
def _gen():
for image, label in zip(*self.data):
yield image, label
return _gen
def get_input_fn(self, batch_size: int, shuffle=False):
def _preprocess(image, label):
image = image / 255.0
image = tf.reshape(image, [28, 28, 1])
return {"image": image}, label
def _get_input_fn():
output_types = (tf.float32, tf.int32)
output_shapes = [28, 28]
dataset = tf.data.Dataset.from_generator(
self.data_generator(), output_types, output_shapes
)
if self.mode == TRAIN:
dataset = dataset.repeat()
if shuffle:
dataset = dataset.shuffle(batch_size * 10)
dataset = dataset.map(_preprocess)
dataset = dataset.batch(batch_size).prefetch(8)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return _get_input_fn
|
nilq/baby-python
|
python
|
"""\
Demo app for the ARDrone.
This simple application allows to control the drone and see the drone's video
stream.
Copyright (c) 2011 Bastian Venthur
The license and distribution terms for this file may be
found in the file LICENSE in this distribution.
"""
import pygame
from pydrone import libardrone
if __name__ == '__main__':
pygame.init()
W, H = 320, 240
screen = pygame.display.set_mode((W, H))
drone = libardrone.ARDrone()
clock = pygame.time.Clock()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYUP:
drone.hover()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
drone.reset()
running = False
# takeoff / land
elif event.key == pygame.K_RETURN:
drone.takeoff()
elif event.key == pygame.K_SPACE:
drone.land()
# emergency
elif event.key == pygame.K_BACKSPACE:
drone.reset()
# forward / backward
elif event.key == pygame.K_w:
drone.move_forward()
elif event.key == pygame.K_s:
drone.move_backward()
# left / right
elif event.key == pygame.K_a:
drone.move_left()
elif event.key == pygame.K_d:
drone.move_right()
# up / down
elif event.key == pygame.K_UP:
drone.move_up()
elif event.key == pygame.K_DOWN:
drone.move_down()
# turn left / turn right
elif event.key == pygame.K_LEFT:
drone.turn_left()
elif event.key == pygame.K_RIGHT:
drone.turn_right()
# speed
elif event.key == pygame.K_1:
drone.speed = 0.1
elif event.key == pygame.K_2:
drone.speed = 0.2
elif event.key == pygame.K_3:
drone.speed = 0.3
elif event.key == pygame.K_4:
drone.speed = 0.4
elif event.key == pygame.K_5:
drone.speed = 0.5
elif event.key == pygame.K_6:
drone.speed = 0.6
elif event.key == pygame.K_7:
drone.speed = 0.7
elif event.key == pygame.K_8:
drone.speed = 0.8
elif event.key == pygame.K_9:
drone.speed = 0.9
elif event.key == pygame.K_0:
drone.speed = 1.0
try:
surface = pygame.image.fromstring(drone.image, (W, H), 'RGB')
# battery status
hud_color = (10, 10, 255)
if drone.navdata.get('drone_state', dict()).get('emergency_mask', 1):
hud_color = (255, 0, 0)
bat = drone.navdata.get(0, dict()).get('battery', 0)
f = pygame.font.Font(None, 20)
hud = f.render('Battery: %i%%' % bat, True, hud_color)
screen.blit(surface, (0, 0))
screen.blit(hud, (10, 10))
except:
pass
pygame.display.flip()
clock.tick()
pygame.display.set_caption("FPS: %.2f" % clock.get_fps())
print "Shutting down...",
drone.halt()
print "Ok."
|
nilq/baby-python
|
python
|
from Parameter import Parameter, registerParameterType
from ParameterTree import ParameterTree
from ParameterItem import ParameterItem
import parameterTypes as types
|
nilq/baby-python
|
python
|
# encoding: utf-8
import uuid
import os
import random
import json
from collections import Counter
from flask import request, abort, jsonify, g, url_for, current_app, session
from flask_restful import Resource, reqparse
from flask_socketio import (
emit,
disconnect
)
from app.ansibles.ansible_task import INVENTORY
from app.ansibles.ansible_core import Runner
from app import redis, socketio, api
from tasks.task import long_task
from app import redis
class LoginView(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('username', type=str, location=[
'json', 'args', 'headers'])
self.reqparse.add_argument('password', type=str, location=[
'json', 'args', 'headers'])
self.args = self.reqparse.parse_args()
super(LoginView, self).__init__()
def get(self):
print("clients")
return jsonify({'clients': "unlogin"})
def post(self):
print(self.args)
return jsonify({"user": "admin", "token": "dsdsdufsffjfjudss789h", "code": 200})
class UserInfoView(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('token', type=str, location=[
'json', 'args', 'headers'])
self.args = self.reqparse.parse_args()
super(UserInfoView, self).__init__()
def get(self):
print(self.args)
# if token == user["token"]:
# name = "admin"
return jsonify({"token": "dsdsdufsffjfjudss789h", "code": 200, "name": "admin"})
def post(self):
print(self.args)
return jsonify({"token": "dsdsdufsffjfjudss789h", "code": 200})
class LogoutView(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('method', type=str, location=[
'json', 'args', 'headers'])
self.args = self.reqparse.parse_args()
super(LogoutView, self).__init__()
def post(self):
print(self.args)
return jsonify({"method": "logout", "code": 200})
|
nilq/baby-python
|
python
|
import unittest
from random import uniform
from pysim import epcstd
class TestDataTypes(unittest.TestCase):
def test_divide_ratio_encoding(self):
self.assertEqual(epcstd.DivideRatio.DR_8.code, "0")
self.assertEqual(epcstd.DivideRatio.DR_643.code, "1")
def test_divide_ratio_str(self):
self.assertEqual(str(epcstd.DivideRatio.DR_8), '8')
self.assertEqual(str(epcstd.DivideRatio.DR_643), '64/3')
def test_divide_ratio_eval(self):
self.assertAlmostEqual(epcstd.DivideRatio.DR_8.eval(), 8.0)
self.assertAlmostEqual(epcstd.DivideRatio.DR_643.eval(), 64.0/3)
def test_session_encoding(self):
self.assertEqual(epcstd.Session.S0.code, "00")
self.assertEqual(epcstd.Session.S1.code, "01")
self.assertEqual(epcstd.Session.S2.code, "10")
self.assertEqual(epcstd.Session.S3.code, "11")
def test_session_number(self):
self.assertEqual(epcstd.Session.S0.index, 0)
self.assertEqual(epcstd.Session.S1.index, 1)
self.assertEqual(epcstd.Session.S2.index, 2)
self.assertEqual(epcstd.Session.S3.index, 3)
def test_session_str(self):
self.assertEqual(str(epcstd.Session.S0).upper(), "S0")
self.assertEqual(str(epcstd.Session.S1).upper(), "S1")
self.assertEqual(str(epcstd.Session.S2).upper(), "S2")
self.assertEqual(str(epcstd.Session.S3).upper(), "S3")
def test_tag_encoding_encoding(self):
self.assertEqual(epcstd.TagEncoding.FM0.code, '00')
self.assertEqual(epcstd.TagEncoding.M2.code, '01')
self.assertEqual(epcstd.TagEncoding.M4.code, '10')
self.assertEqual(epcstd.TagEncoding.M8.code, '11')
def test_tag_encoding_symbols_per_bit(self):
self.assertEqual(epcstd.TagEncoding.FM0.symbols_per_bit, 1)
self.assertEqual(epcstd.TagEncoding.M2.symbols_per_bit, 2)
self.assertEqual(epcstd.TagEncoding.M4.symbols_per_bit, 4)
self.assertEqual(epcstd.TagEncoding.M8.symbols_per_bit, 8)
def test_tag_encoding_str(self):
self.assertEqual(str(epcstd.TagEncoding.FM0).upper(), "FM0")
self.assertEqual(str(epcstd.TagEncoding.M2).upper(), "M2")
self.assertEqual(str(epcstd.TagEncoding.M4).upper(), "M4")
self.assertEqual(str(epcstd.TagEncoding.M8).upper(), "M8")
def test_inventory_flag_encoding(self):
self.assertEqual(epcstd.InventoryFlag.A.code, '0')
self.assertEqual(epcstd.InventoryFlag.B.code, '1')
def test_inventory_flag_str(self):
self.assertEqual(str(epcstd.InventoryFlag.A).upper(), "A")
self.assertEqual(str(epcstd.InventoryFlag.B).upper(), "B")
def test_sel_flag_encoding(self):
self.assertIn(epcstd.SelFlag.ALL.code, ['00', '01'])
self.assertEqual(epcstd.SelFlag.NOT_SEL.code, '10')
self.assertEqual(epcstd.SelFlag.SEL.code, '11')
def test_sel_flag_str(self):
self.assertEqual(str(epcstd.SelFlag.ALL).lower(), "all")
self.assertEqual(str(epcstd.SelFlag.SEL).lower(), "sl")
self.assertEqual(str(epcstd.SelFlag.NOT_SEL).lower(), "~sl")
def test_memory_bank_encoding(self):
self.assertEqual(epcstd.MemoryBank.RESERVED.code, '00')
self.assertEqual(epcstd.MemoryBank.EPC.code, '01')
self.assertEqual(epcstd.MemoryBank.TID.code, '10')
self.assertEqual(epcstd.MemoryBank.USER.code, '11')
def test_command_code_encoding(self):
self.assertEqual(epcstd.CommandCode.QUERY.code, '1000')
self.assertEqual(epcstd.CommandCode.QUERY_REP.code, '00')
self.assertEqual(epcstd.CommandCode.ACK.code, '01')
self.assertEqual(epcstd.CommandCode.REQ_RN.code, '11000001')
self.assertEqual(epcstd.CommandCode.READ.code, '11000010')
def test_command_code_str(self):
self.assertEqual(str(epcstd.CommandCode.QUERY).lower(), "query")
self.assertIn(str(epcstd.CommandCode.QUERY_REP).lower(),
['query_rep', 'qrep', 'queryrep'])
self.assertEqual(str(epcstd.CommandCode.ACK).lower(), 'ack')
self.assertIn(str(epcstd.CommandCode.REQ_RN).lower(),
['req_rn', 'reqrn'])
self.assertEqual(str(epcstd.CommandCode.READ).lower(), 'read')
class TestEncodingFunctions(unittest.TestCase):
def test_encode_bool(self):
self.assertEqual(epcstd.encode_bool(True), '1')
self.assertEqual(epcstd.encode_bool(False), '0')
def test_encode_int(self):
self.assertEqual(epcstd.encode_int(0, 4), '0000')
self.assertEqual(epcstd.encode_int(0xF, 4), '1111')
self.assertEqual(epcstd.encode_byte(0xA5), '10100101')
self.assertEqual(epcstd.encode_word(0xAB3C), '1010101100111100')
def test_ebv(self):
self.assertEqual(epcstd.encode_ebv(0), '00000000')
self.assertEqual(epcstd.encode_ebv(1), '00000001')
self.assertEqual(epcstd.encode_ebv(127), '01111111')
self.assertEqual(epcstd.encode_ebv(128), '1000000100000000')
self.assertEqual(epcstd.encode_ebv(16383), '1111111101111111')
self.assertEqual(epcstd.encode_ebv(16384), '100000011000000000000000')
class TestCommands(unittest.TestCase):
def test_query_command_encoding(self):
cmd1 = epcstd.Query(dr=epcstd.DivideRatio.DR_8,
m=epcstd.TagEncoding.FM0, trext=False,
sel=epcstd.SelFlag.ALL,
session=epcstd.Session.S0,
target=epcstd.InventoryFlag.A, q=0,
crc=0x00)
self.assertEqual(cmd1.encode(), '1000000000000000000000')
self.assertEqual(cmd1.bitlen, 22)
cmd2 = epcstd.Query(dr=epcstd.DivideRatio.DR_643,
m=epcstd.TagEncoding.M8, trext=True,
sel=epcstd.SelFlag.SEL,
session=epcstd.Session.S3,
target=epcstd.InventoryFlag.B, q=6,
crc=0x0B)
self.assertEqual(cmd2.encode(), '1000111111111011001011')
def test_query_command_str(self):
cmd = epcstd.Query(dr=epcstd.DivideRatio.DR_8,
m=epcstd.TagEncoding.FM0, trext=False,
sel=epcstd.SelFlag.ALL,
session=epcstd.Session.S0,
target=epcstd.InventoryFlag.A, q=13,
crc=0x1F)
string = str(cmd)
self.assertIn(str(epcstd.CommandCode.QUERY), string)
self.assertIn(str(epcstd.DivideRatio.DR_8), string)
self.assertIn(str(epcstd.TagEncoding.FM0), string)
self.assertIn(str(epcstd.SelFlag.ALL), string)
self.assertIn(str(epcstd.Session.S0), string)
self.assertIn(str(epcstd.InventoryFlag.A), string)
self.assertIn("13", string)
self.assertIn("1F", string)
def test_query_command_using_modelParams(self):
#
# 1) Setting some initial values for Query fields in readerParams
# and making sure they are passed to Query as default values
#
epcstd.stdParams.divide_ratio = epcstd.DivideRatio.DR_8
epcstd.stdParams.tag_encoding = epcstd.TagEncoding.FM0
epcstd.stdParams.sel = epcstd.SelFlag.SEL
epcstd.stdParams.session = epcstd.Session.S0
epcstd.stdParams.target = epcstd.InventoryFlag.A
epcstd.stdParams.Q = 3
epcstd.stdParams.trext = False
query1 = epcstd.Query()
def assert_query_params(query):
self.assertEqual(query.dr, epcstd.stdParams.divide_ratio)
self.assertEqual(query.m, epcstd.stdParams.tag_encoding)
self.assertEqual(query.sel, epcstd.stdParams.sel)
self.assertEqual(query.session, epcstd.stdParams.session)
self.assertEqual(query.target, epcstd.stdParams.target)
self.assertEqual(query.q, epcstd.stdParams.Q)
self.assertEqual(query.trext, epcstd.stdParams.trext)
assert_query_params(query1)
#
# 2) Altering values in readerParams and making sure they are
# passed to Query
#
epcstd.stdParams.divide_ratio = epcstd.DivideRatio.DR_643
epcstd.stdParams.tag_encoding = epcstd.TagEncoding.M8
epcstd.stdParams.sel = epcstd.SelFlag.NOT_SEL
epcstd.stdParams.session = epcstd.Session.S3
epcstd.stdParams.target = epcstd.InventoryFlag.B
epcstd.stdParams.Q = 8
epcstd.stdParams.trext = True
query2 = epcstd.Query()
assert_query_params(query2)
def test_query_rep_command_encoding(self):
cmd1 = epcstd.QueryRep(session=epcstd.Session.S0)
self.assertEqual(cmd1.encode(), '0000')
self.assertEqual(cmd1.bitlen, 4)
cmd2 = epcstd.QueryRep(session=epcstd.Session.S3)
self.assertEqual(cmd2.encode(), '0011')
def test_query_rep_command_str(self):
cmd = epcstd.QueryRep(session=epcstd.Session.S1)
string = str(cmd)
self.assertIn(str(epcstd.CommandCode.QUERY_REP), string)
self.assertIn(str(epcstd.Session.S1), string)
def test_query_rep_using_modelparams(self):
def assert_fields_match_reader_params(query_rep):
self.assertEqual(query_rep.session, epcstd.stdParams.session)
# 1) Setting readerParams and checking they were passed to the
# command as the default params
epcstd.stdParams.session = epcstd.Session.S0
query_rep_1 = epcstd.QueryRep()
assert_fields_match_reader_params(query_rep_1)
# 2) Changing readerParams and checking the changed
# values were passed to new command as the default params
epcstd.stdParams.session = epcstd.Session.S3
query_rep_2 = epcstd.QueryRep()
assert_fields_match_reader_params(query_rep_2)
def test_ack_command_encoding(self):
cmd1 = epcstd.Ack(rn=0x0000)
self.assertEqual(cmd1.encode(), '010000000000000000')
self.assertEqual(cmd1.bitlen, 18)
cmd2 = epcstd.Ack(rn=0xFFFF)
self.assertEqual(cmd2.encode(), '011111111111111111')
def test_ack_command_str(self):
cmd = epcstd.Ack(rn=0xAB)
string = str(cmd)
self.assertIn(str(epcstd.CommandCode.ACK), string)
self.assertIn('0x00AB', string)
def test_req_rn_command_encoding(self):
cmd1 = epcstd.ReqRN(rn=0x0000, crc=0x0000)
cmd2 = epcstd.ReqRN(rn=0xAAAA, crc=0x5555)
self.assertEqual(cmd1.encode(),
'1100000100000000000000000000000000000000')
self.assertEqual(cmd1.bitlen, 40)
self.assertEqual(cmd2.encode(),
'1100000110101010101010100101010101010101')
def test_req_rn_command_str(self):
cmd1 = epcstd.ReqRN(rn=0x1234, crc=0xABCD)
cmd2 = epcstd.ReqRN(rn=0xAABB, crc=0xCCDD)
string1 = str(cmd1)
string2 = str(cmd2)
self.assertIn('0x1234', string1)
self.assertIn('0xABCD', string1)
self.assertIn('0xAABB', string2)
self.assertIn('0xCCDD', string2)
def test_read_command_encoding(self):
cmd1 = epcstd.Read(bank=epcstd.MemoryBank.RESERVED, word_ptr=0,
word_count=0, rn=0x0000, crc=0x0000)
cmd2 = epcstd.Read(bank=epcstd.MemoryBank.USER, word_ptr=0x80,
word_count=255, rn=0xAAAA, crc=0x5555)
self.assertEqual(cmd1.encode(), '11000010' + '0' * 50)
self.assertEqual(cmd1.bitlen, 58)
self.assertEqual(cmd2.encode(), '11000010' + '11' + '1000000100000000'
+ '1' * 8 + '1010' * 4 + '0101' * 4)
def test_read_using_modelParams(self):
def assert_fields_match_reader_params(cmd):
assert isinstance(cmd, epcstd.Read)
self.assertEqual(cmd.bank, epcstd.stdParams.read_default_bank)
self.assertEqual(cmd.word_ptr,
epcstd.stdParams.read_default_word_ptr)
self.assertEqual(cmd.word_count,
epcstd.stdParams.read_default_word_count)
# 1) Setting readerParams and checking they were passed to the
# command as the default params
epcstd.stdParams.read_default_bank = epcstd.MemoryBank.EPC
epcstd.stdParams.read_default_word_ptr = 0
epcstd.stdParams.read_default_word_count = 10
cmd1 = epcstd.Read()
assert_fields_match_reader_params(cmd1)
# 2) Changing readerParams and checking the changed
# values were passed to new command as the default params
epcstd.stdParams.read_default_bank = epcstd.MemoryBank.TID
epcstd.stdParams.read_default_word_ptr = 5
epcstd.stdParams.read_default_word_count = 23
cmd2 = epcstd.Read()
assert_fields_match_reader_params(cmd2)
def test_read_command_str(self):
cmd1 = epcstd.Read(bank=epcstd.MemoryBank.EPC, word_ptr=2,
word_count=5, rn=0xAABB, crc=0xCCDD)
cmd2 = epcstd.Read(bank=epcstd.MemoryBank.TID, word_ptr=3,
word_count=1, rn=0xABCD, crc=0xEFEF)
string1 = str(cmd1)
string2 = str(cmd2)
self.assertIn('EPC', string1.upper())
self.assertIn('0x02', string1)
self.assertIn('5', string1)
self.assertIn('0xAABB', string1)
self.assertIn('0xCCDD', string1)
self.assertIn('TID', string2.upper())
self.assertIn('0x03', string2)
self.assertIn('1', string2)
self.assertIn('0xABCD', string2)
self.assertIn('0xEFEF', string2)
class TestReplies(unittest.TestCase):
def test_to_bytes(self):
self.assertEqual(epcstd.to_bytes('1122'), [0x11, 0x22])
self.assertEqual(epcstd.to_bytes((0xAB,)), [0xAB])
with self.assertRaises(ValueError):
epcstd.to_bytes(0xAB)
def test_query_reply_bitlen(self):
msg = epcstd.QueryReply(rn=0x0000)
self.assertEqual(msg.bitlen, 16)
def test_query_reply_str(self):
msg1 = epcstd.QueryReply(rn=0xABCD)
msg2 = epcstd.QueryReply(rn=0x1122)
string1 = str(msg1)
string2 = str(msg2)
self.assertIn('ABCD', string1.upper())
self.assertNotIn('1122', string1)
self.assertIn('1122', string2)
self.assertNotIn('ABCD', string2.upper())
def test_ack_reply_bitlen(self):
msg1 = epcstd.AckReply(pc=0x0000, epc='0011223344556677', crc=0x0000)
msg2 = epcstd.AckReply(pc=0x0000, epc='001122334455', crc=0x0000)
msg3 = epcstd.AckReply(pc=0x0000, epc=[0x00, 0x11, 0x22], crc=0x0000)
self.assertEqual(msg1.bitlen, 96)
self.assertEqual(msg2.bitlen, 80)
self.assertEqual(msg3.bitlen, 56)
def test_ack_reply_str(self):
msg1 = epcstd.AckReply(pc=0xABCD, epc='0011223344556677', crc=0x1234)
msg2 = epcstd.AckReply(pc=0xDCBA, epc='001122334455', crc=0x4321)
s1 = str(msg1)
s2 = str(msg2)
self.assertIn('ABCD', s1.upper())
self.assertNotIn('DCBA', s1.upper())
self.assertIn('1234', s1)
self.assertNotIn('4321', s1)
self.assertIn('0011223344556677', s1)
self.assertIn('DCBA', s2.upper())
self.assertIn('4321', s2)
self.assertIn('001122334455', s2)
self.assertNotIn('6677', s2)
def test_req_rn_reply_bitlen(self):
msg = epcstd.ReqRnReply(rn=0x0000, crc=0x0000)
self.assertEqual(msg.bitlen, 32)
def test_req_rn_reply_str(self):
msg1 = epcstd.ReqRnReply(rn=0xABCD, crc=0x1234)
msg2 = epcstd.ReqRnReply(rn=0xDCBA, crc=0x4321)
s1 = str(msg1)
s2 = str(msg2)
self.assertIn('ABCD', s1.upper())
self.assertIn('1234', s1)
self.assertNotIn('DCBA', s1.upper())
self.assertNotIn('4321', s1)
self.assertIn('DCBA', s2.upper())
self.assertIn('4321', s2)
def test_read_reply_bitlen(self):
msg1 = epcstd.ReadReply(data='00112233', rn=0x0000, crc=0x0000)
msg2 = epcstd.ReadReply(data='001122334455', rn=0x0000, crc=0x0000)
msg3 = epcstd.ReadReply(data=[0x00, 0x11], rn=0x0000, crc=0x0000)
self.assertEqual(msg1.bitlen, 65)
self.assertEqual(msg2.bitlen, 81)
self.assertEqual(msg3.bitlen, 49)
def test_read_reply_str(self):
msg1 = epcstd.ReadReply(data='00112233', rn=0x1234, crc=0xABCD)
msg2 = epcstd.ReadReply(data='AABBCC', rn=0x4321, crc=0xDCBA)
s1 = str(msg1)
s2 = str(msg2)
self.assertIn('00112233', s1)
self.assertIn('1234', s1)
self.assertIn('ABCD', s1.upper())
self.assertNotIn('AABBCC', s1.upper())
self.assertNotIn('4321', s1)
self.assertNotIn('DCBA', s1)
self.assertIn('AABBCC', s2.upper())
self.assertIn('4321', s2)
self.assertIn('DCBA', s2.upper())
class TestReaderPreambles(unittest.TestCase):
def test_reader_preamble_durations(self):
p = epcstd.ReaderPreamble(tari=6.25e-6, rtcal=18.75e-6, trcal=56.25e-6)
self.assertAlmostEqual(p.data0, p.tari, 9)
self.assertAlmostEqual(p.delim, 12.5e-6, 9)
self.assertAlmostEqual(p.data0, 6.25e-6, 9)
self.assertAlmostEqual(p.data1, 12.5e-6, 9)
self.assertAlmostEqual(p.rtcal, 18.75e-6, 9)
self.assertAlmostEqual(p.trcal, 56.25e-6, 9)
self.assertAlmostEqual(p.duration, 93.75e-6, 9)
def test_reader_preamble_str(self):
p = epcstd.ReaderPreamble(tari=12.5e-6, rtcal=33.45e-6, trcal=60.15e-6,
delim=13.0e-6)
s = str(p)
self.assertIn("12.5", s)
self.assertIn("33.45", s)
self.assertIn("60.15", s)
self.assertIn("13.0", s)
def test_reader_sync_durations(self):
sync = epcstd.ReaderSync(tari=12.5e-6, rtcal=31.25e-6, delim=13.0e-6)
self.assertAlmostEqual(sync.tari, sync.data0, 9)
self.assertAlmostEqual(sync.data0, 12.5e-6, 9)
self.assertAlmostEqual(sync.data1, 18.75e-6, 9)
self.assertAlmostEqual(sync.rtcal, 31.25e-6, 9)
self.assertAlmostEqual(sync.delim, 13.0e-6)
self.assertAlmostEqual(sync.duration, 56.75e-6, 9)
def test_reader_sync_str(self):
sync = epcstd.ReaderSync(tari=25e-6, rtcal=75e-6, delim=12.0e-6)
s = str(sync)
self.assertIn("12.0", s)
self.assertIn("25.0", s)
self.assertIn("75.0", s)
class TestTagPreambles(unittest.TestCase):
def test_tag_FM0_preamble_bitlen_and_duration(self):
short_preamble = epcstd.FM0Preamble(extended=False)
long_preamble = epcstd.FM0Preamble(extended=True)
self.assertEqual(short_preamble.bitlen, 6)
self.assertEqual(long_preamble.bitlen, 18)
self.assertAlmostEqual(short_preamble.get_duration(blf=320e3),
1.875e-5)
self.assertAlmostEqual(long_preamble.get_duration(blf=320e3),
5.625e-5)
self.assertAlmostEqual(short_preamble.get_duration(blf=40e3), 15e-5)
self.assertAlmostEqual(long_preamble.get_duration(blf=40e3), 45e-5)
def test_tag_miller_preamble_bitlen_and_duration(self):
m2_short = epcstd.MillerPreamble(m=2, extended=False)
m2_long = epcstd.MillerPreamble(m=2, extended=True)
m4_short = epcstd.MillerPreamble(m=4)
m8_long = epcstd.MillerPreamble(m=8, extended=True)
self.assertEqual(m2_short.bitlen, 10)
self.assertEqual(m2_long.bitlen, 22)
self.assertEqual(m4_short.bitlen, 10)
self.assertEqual(m8_long.bitlen, 22)
self.assertAlmostEqual(m2_short.get_duration(blf=320e3), 6.25e-5)
self.assertAlmostEqual(m2_long.get_duration(blf=320e3), 13.75e-5)
self.assertAlmostEqual(m4_short.get_duration(blf=320e3), 12.5e-5)
self.assertAlmostEqual(m8_long.get_duration(blf=320e3), 55e-5)
self.assertAlmostEqual(m2_short.get_duration(blf=64e3), 31.25e-5)
def test_tag_preamble_factory(self):
fm0_preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0)
fm0_extended_preamble = epcstd.create_tag_preamble(
epcstd.TagEncoding.FM0, True)
m2_preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.M2)
m4_preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.M2)
m8_preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.M2)
self.assertIsInstance(fm0_preamble, epcstd.FM0Preamble)
self.assertIsInstance(fm0_extended_preamble, epcstd.FM0Preamble)
self.assertIsInstance(m2_preamble, epcstd.MillerPreamble)
self.assertIsInstance(m4_preamble, epcstd.MillerPreamble)
self.assertIsInstance(m8_preamble, epcstd.MillerPreamble)
self.assertEqual(fm0_preamble.bitlen, 6)
self.assertEqual(fm0_extended_preamble.bitlen, 18)
def test_tag_preamble_has_str(self):
s1 = str(epcstd.FM0Preamble(True))
s2 = str(epcstd.MillerPreamble(2, True))
self.assertNotIn("0x", s1)
self.assertNotIn("0x", s2)
def test_tag_preamble_bitlen(self):
epcstd.stdParams.trext = False
fm0_normal = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, False)
fm0_extended = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, True)
m2_normal = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, False)
m2_extended = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, True)
self.assertEqual(fm0_normal.bitlen, epcstd.tag_preamble_bitlen(
epcstd.TagEncoding.FM0))
self.assertEqual(fm0_extended.bitlen, epcstd.tag_preamble_bitlen(
epcstd.TagEncoding.FM0, True))
self.assertEqual(m2_normal.bitlen, epcstd.tag_preamble_bitlen(
epcstd.TagEncoding.M2))
self.assertEqual(m2_extended.bitlen, epcstd.tag_preamble_bitlen(
epcstd.TagEncoding.M2, True))
def test_tag_preamble_duration(self):
fm0_normal = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, False)
fm0_extended = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, True)
m2_normal = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, False)
m2_extended = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, True)
blf_slow = epcstd.get_blf(epcstd.DivideRatio.DR_8, 25.0e-6*9)
blf_fast = epcstd.get_blf(epcstd.DivideRatio.DR_643, 6.25e-6 * 9)
self.assertEqual(
fm0_normal.get_duration(blf_slow), epcstd.tag_preamble_duration(
blf_slow, epcstd.TagEncoding.FM0, False))
self.assertEqual(
fm0_normal.get_duration(blf_fast), epcstd.tag_preamble_duration(
blf_fast, epcstd.TagEncoding.FM0, False))
self.assertEqual(
fm0_extended.get_duration(blf_slow), epcstd.tag_preamble_duration(
blf_slow, epcstd.TagEncoding.FM0, True))
self.assertEqual(
fm0_extended.get_duration(blf_fast), epcstd.tag_preamble_duration(
blf_fast, epcstd.TagEncoding.FM0, True))
self.assertEqual(
m2_normal.get_duration(blf_slow), epcstd.tag_preamble_duration(
blf_slow, epcstd.TagEncoding.M2, False))
self.assertEqual(
m2_normal.get_duration(blf_fast), epcstd.tag_preamble_duration(
blf_fast, epcstd.TagEncoding.M2, False))
self.assertEqual(
m2_extended.get_duration(blf_slow), epcstd.tag_preamble_duration(
blf_slow, epcstd.TagEncoding.M2, True))
self.assertEqual(
m2_extended.get_duration(blf_fast), epcstd.tag_preamble_duration(
blf_fast, epcstd.TagEncoding.M2, True))
class TestReaderFrames(unittest.TestCase):
def setUp(self):
# The following query will be encoded as 1000011011010001101010
# number of 1s: 10, number of 0s: 12
self.query = epcstd.Query(
dr=epcstd.DivideRatio.DR_8, m=epcstd.TagEncoding.M8, trext=False,
sel=epcstd.SelFlag.SEL, session=epcstd.Session.S1,
target=epcstd.InventoryFlag.A, q=3, crc=0xAA)
# The following QueryRep will be encoded as 0011
self.query_rep = epcstd.QueryRep(session=epcstd.Session.S3)
# Now we define a fast preamble, a slow preamble and a SYNC:
self.fast_preamble = epcstd.ReaderPreamble(
tari=6.25e-6, rtcal=18.75e-6, trcal=56.25e-6)
self.slow_preamble = epcstd.ReaderPreamble(
tari=25e-6, rtcal=75e-6, trcal=225e-6)
self.fast_sync = epcstd.ReaderSync(tari=12.5e-6, rtcal=31.25e-6)
self.slow_sync = epcstd.ReaderSync(tari=25e-6, rtcal=62.5e-6)
def test_query_frame_fast_preamble_duration(self):
f = epcstd.ReaderFrame(preamble=self.fast_preamble, command=self.query)
self.assertAlmostEqual(f.duration, 293.75e-6, 9)
self.assertAlmostEqual(f.body_duration, 200e-6, 9)
def test_query_frame_slow_preamble_duration(self):
f = epcstd.ReaderFrame(preamble=self.slow_preamble, command=self.query)
self.assertAlmostEqual(f.duration, 1137.5e-6, 9)
self.assertAlmostEqual(f.body_duration, 800.0e-6, 9)
def test_query_rep_frame_fast_sync_duration(self):
f = epcstd.ReaderFrame(preamble=self.fast_sync, command=self.query_rep)
self.assertAlmostEqual(f.body_duration, 62.5e-6, 9)
self.assertAlmostEqual(f.duration, 118.75e-6, 9)
def test_query_rep_frame_slow_sync_duration(self):
f = epcstd.ReaderFrame(preamble=self.slow_sync, command=self.query_rep)
self.assertAlmostEqual(f.body_duration, 125e-6, 9)
self.assertAlmostEqual(f.duration, 225e-6, 9)
class TestTagFrames(unittest.TestCase):
def setUp(self):
self.ack_reply = epcstd.AckReply(epc="ABCDEF01", pc=0, crc=0)
self.rn16_reply = epcstd.QueryReply(rn=0)
self.slow_blf = 120e3
self.fast_blf = 640e3
def test_tag_fm0_frame_duration(self):
pn = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, extended=False)
pe = epcstd.create_tag_preamble(epcstd.TagEncoding.FM0, extended=True)
ne_ack_frame = epcstd.TagFrame(preamble=pn, reply=self.ack_reply)
ex_ack_frame = epcstd.TagFrame(preamble=pe, reply=self.ack_reply)
ex_rn16_frame = epcstd.TagFrame(preamble=pe, reply=self.rn16_reply)
self.assertAlmostEqual(ne_ack_frame.get_body_duration(self.slow_blf),
0.00053333333, 8)
self.assertAlmostEqual(ne_ack_frame.get_duration(self.slow_blf),
0.00059166667, 8)
self.assertAlmostEqual(ex_ack_frame.get_body_duration(self.slow_blf),
0.00053333333, 8)
self.assertAlmostEqual(ex_ack_frame.get_duration(self.slow_blf),
0.00069166667, 8)
self.assertAlmostEqual(ex_rn16_frame.get_body_duration(self.slow_blf),
0.00013333333, 8)
self.assertAlmostEqual(ex_rn16_frame.get_duration(self.slow_blf),
0.00029166667, 8)
self.assertAlmostEqual(ex_rn16_frame.get_body_duration(self.fast_blf),
2.5e-05, 8)
self.assertAlmostEqual(ex_rn16_frame.get_duration(self.fast_blf),
5.46875e-05)
def test_tag_m2_frame_duration(self):
preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, False)
ext_preamble = epcstd.create_tag_preamble(epcstd.TagEncoding.M2, True)
frame = epcstd.TagFrame(preamble, self.rn16_reply)
ext_frame = epcstd.TagFrame(ext_preamble, self.rn16_reply)
self.assertAlmostEqual(frame.get_body_duration(self.slow_blf),
0.0002666666666666667, 8)
self.assertAlmostEqual(frame.get_duration(self.slow_blf),
0.00045, 8)
self.assertAlmostEqual(frame.get_body_duration(self.fast_blf),
5e-05, 8)
self.assertAlmostEqual(frame.get_duration(self.fast_blf),
8.4375e-05, 8)
self.assertAlmostEqual(ext_frame.get_body_duration(self.slow_blf),
frame.get_body_duration(self.slow_blf), 8)
self.assertAlmostEqual(ext_frame.get_duration(self.slow_blf),
0.00065, 8)
class TestReaderFrameAccessors(unittest.TestCase):
def setUp(self):
self.slow_tari = 12.5e-6
self.slow_rtcal = 37.5e-6
self.slow_trcal = 112.5e-6
self.fast_tari = 6.25e-6
self.fast_rtcal = 15.625e-6
self.fast_trcal = 46.875e-6
self.slow_sync = epcstd.ReaderSync(self.slow_tari, self.slow_rtcal)
self.fast_sync = epcstd.ReaderSync(self.fast_tari, self.fast_rtcal)
self.slow_preamble = epcstd.ReaderPreamble(
self.slow_tari, self.slow_rtcal, self.slow_trcal)
self.fast_preamble = epcstd.ReaderPreamble(
self.fast_tari, self.fast_rtcal, self.fast_trcal)
self.ack = epcstd.Ack(0xAAAA)
self.query_rep = epcstd.QueryRep(epcstd.Session.S1)
self.query = epcstd.Query()
self.slow_ack_frame = epcstd.ReaderFrame(self.slow_sync, self.ack)
self.fast_ack_frame = epcstd.ReaderFrame(self.fast_sync, self.ack)
self.slow_query_rep_frame = epcstd.ReaderFrame(
self.slow_sync, self.query_rep)
self.fast_query_rep_frame = epcstd.ReaderFrame(
self.fast_sync, self.query_rep)
self.slow_query_frame = epcstd.ReaderFrame(
self.slow_preamble, self.query)
self.fast_query_frame = epcstd.ReaderFrame(
self.fast_preamble, self.query)
def test_reader_frame_duration_return_equals_sync_frame_getter(self):
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal, command=self.ack),
self.slow_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal, command=self.ack),
self.fast_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal,
command=self.query_rep),
self.slow_query_rep_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal,
command=self.query_rep),
self.fast_query_rep_frame.duration, 8)
def test_reader_frame_duration_return_equals_query_frame_getter(self):
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal,
trcal=self.slow_trcal, command=self.query),
self.slow_query_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal,
trcal=self.fast_trcal, command=self.query),
self.fast_query_frame.duration, 8)
def test_reader_frame_duration_recognizes_encoded_sync_commands(self):
encoded_ack = self.ack.encode()
encoded_query_rep = self.query_rep.encode()
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal,
command=encoded_ack),
self.slow_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal,
command=encoded_ack),
self.fast_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal,
command=encoded_query_rep),
self.slow_query_rep_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal,
command=encoded_query_rep),
self.fast_query_rep_frame.duration, 8)
def test_reader_frame_duration_recognizes_encoded_query_command(self):
encoded_query = self.query.encode()
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.slow_tari, rtcal=self.slow_rtcal,
trcal=self.slow_trcal, command=encoded_query),
self.slow_query_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(
tari=self.fast_tari, rtcal=self.fast_rtcal,
trcal=self.fast_trcal, command=encoded_query),
self.fast_query_frame.duration, 8)
def test_reader_frame_duration_uses_default_modelParams(self):
#
# 1) Setting readerParams to slow frame type
#
epcstd.stdParams.tari = self.slow_tari
epcstd.stdParams.rtcal = self.slow_rtcal
epcstd.stdParams.trcal = self.slow_trcal
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.ack),
self.slow_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.query_rep),
self.slow_query_rep_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.query),
self.slow_query_frame.duration, 8)
#
# 1) Setting readerParams to fast frame type
#
epcstd.stdParams.tari = self.fast_tari
epcstd.stdParams.rtcal = self.fast_rtcal
epcstd.stdParams.trcal = self.fast_trcal
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.ack),
self.fast_ack_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.query_rep),
self.fast_query_rep_frame.duration, 8)
self.assertAlmostEqual(
epcstd.reader_frame_duration(self.query),
self.fast_query_frame.duration, 8)
class TestCommandsDurationEstimations(unittest.TestCase):
"""
Test-cases for functions ``command_duration``, ``query_duration``,
``query_rep_duration``, etc.
"""
def setUp(self):
self.slow = dict(
tari=25.0e-6, rtcal=75.0e-6, trcal=225.0e-6, delim=12.5e-6,
dr=epcstd.DivideRatio.DR_8, m=epcstd.TagEncoding.M8, trext=True,
sel=epcstd.SelFlag.SEL, session=epcstd.Session.S3,
target=epcstd.InventoryFlag.B, q=15, rn=0xFFFF,
bank=epcstd.MemoryBank.TID, word_ptr=0xF, word_cnt=15,
crc5=0x1F, crc16=0xFFFF)
self.fast = dict(
tari=6.25e-6, rtcal=15.625e-6, trcal=17.1875e-6, delim=12.5e-6,
dr=epcstd.DivideRatio.DR_643, m=epcstd.TagEncoding.FM0, trext=False,
sel=epcstd.SelFlag.ALL, session=epcstd.Session.S0,
target=epcstd.InventoryFlag.A, q=0, rn=0x0000,
bank=epcstd.MemoryBank.EPC, word_ptr=0x0, word_cnt=1,
crc5=0x00, crc16=0x0000)
self.slow['preamble'] = epcstd.ReaderPreamble(
self.slow['tari'], self.slow['rtcal'], self.slow['trcal'],
self.slow['delim'])
self.fast['preamble'] = epcstd.ReaderPreamble(
self.fast['tari'], self.fast['rtcal'], self.fast['trcal'],
self.fast['delim'])
self.slow['sync'] = epcstd.ReaderSync(
self.slow['tari'], self.slow['rtcal'], self.slow['delim'])
self.fast['sync'] = epcstd.ReaderSync(
self.fast['tari'], self.fast['rtcal'], self.fast['delim'])
@staticmethod
def get_command_duration(command_code, params):
return epcstd.command_duration(
command_code=command_code, tari=params['tari'],
rtcal=params['rtcal'], trcal=params['trcal'], delim=params['delim'],
dr=params['dr'], m=params['m'], trext=params['trext'],
sel=params['sel'], session=params['session'],
target=params['target'], q=params['q'], rn=params['rn'],
bank=params['bank'], word_ptr=params['word_ptr'],
word_count=params['word_cnt'], crc5=params['crc5'],
crc16=params['crc16'])
@staticmethod
def set_default_parameters(par):
epcstd.stdParams.tari = par['tari']
epcstd.stdParams.rtcal = par['rtcal']
epcstd.stdParams.trcal = par['trcal']
epcstd.stdParams.delim = par['delim']
epcstd.stdParams.divide_ratio = par['dr']
epcstd.stdParams.tag_encoding = par['m']
epcstd.stdParams.trext = par['trext']
epcstd.stdParams.sel = par['sel']
epcstd.stdParams.session = par['session']
epcstd.stdParams.target = par['target']
epcstd.stdParams.Q = par['q']
epcstd.stdParams.default_rn = par['rn']
epcstd.stdParams.read_default_bank = par['bank']
epcstd.stdParams.read_default_word_ptr = par['word_ptr']
epcstd.stdParams.read_default_word_count = par['word_cnt']
epcstd.stdParams.default_crc5 = par['crc5']
epcstd.stdParams.default_crc16 = par['crc16']
def test_query_duration(self):
slow_query = epcstd.Query(
self.slow['dr'], self.slow['m'], self.slow['trext'],
self.slow['sel'], self.slow['session'], self.slow['target'],
self.slow['q'], self.slow['crc5'])
fast_query = epcstd.Query(
self.fast['dr'], self.fast['m'], self.fast['trext'],
self.fast['sel'], self.fast['session'], self.fast['target'],
self.fast['q'], self.fast['crc5'])
slow_frame = epcstd.ReaderFrame(self.slow['preamble'], slow_query)
fast_frame = epcstd.ReaderFrame(self.fast['preamble'], fast_query)
self.assertAlmostEqual(
slow_frame.duration,
self.get_command_duration(epcstd.CommandCode.QUERY, self.slow),
8, "command_duration(QUERY, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.query_duration(
tari=self.slow['tari'], rtcal=self.slow['rtcal'],
trcal=self.slow['trcal'], delim=self.slow['delim'],
dr=self.slow['dr'], m=self.slow['m'], trext=self.slow['trext'],
sel=self.slow['sel'], session=self.slow['session'],
target=self.slow['target'], q=self.slow['q'],
crc=self.slow['crc5']),
8, "query_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
self.get_command_duration(epcstd.CommandCode.QUERY, self.fast),
8, "command_duration(QUERY, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.query_duration(
tari=self.fast['tari'], rtcal=self.fast['rtcal'],
trcal=self.fast['trcal'], delim=self.fast['delim'],
dr=self.fast['dr'], m=self.fast['m'], trext=self.fast['trext'],
sel=self.fast['sel'], session=self.fast['session'],
target=self.fast['target'], q=self.fast['q'],
crc=self.fast['crc5']),
8, "query_duration(fast params) doesn't match frame")
def test_query_duration_with_default_parameters(self):
slow_query = epcstd.Query(
self.slow['dr'], self.slow['m'], self.slow['trext'],
self.slow['sel'], self.slow['session'], self.slow['target'],
self.slow['q'], self.slow['crc5'])
fast_query = epcstd.Query(
self.fast['dr'], self.fast['m'], self.fast['trext'],
self.fast['sel'], self.fast['session'], self.fast['target'],
self.fast['q'], self.fast['crc5'])
slow_frame = epcstd.ReaderFrame(self.slow['preamble'], slow_query)
fast_frame = epcstd.ReaderFrame(self.fast['preamble'], fast_query)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.duration,
epcstd.command_duration(epcstd.CommandCode.QUERY), 8,
"command_duration(QUERY, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.query_duration(), 8,
"query_duration(default=slow) doesnt' match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.duration,
epcstd.command_duration(epcstd.CommandCode.QUERY), 8,
"command_duration(QUERY, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.query_duration(), 8,
"query_duration(default=fast) doesn't match frame")
def test_query_rep_duration(self):
slow_qrep = epcstd.QueryRep(self.slow['session'])
fast_qrep = epcstd.QueryRep(self.fast['session'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_qrep)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_qrep)
self.assertAlmostEqual(
slow_frame.duration,
self.get_command_duration(epcstd.CommandCode.QUERY_REP, self.slow),
8, "command_duration(QUERY_REP, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.query_rep_duration(
tari=self.slow['tari'], rtcal=self.slow['rtcal'],
trcal=self.slow['trcal'], delim=self.slow['delim'],
session=self.slow['session']),
8, "query_rep_duration(slow) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
self.get_command_duration(epcstd.CommandCode.QUERY_REP, self.fast),
8, "command_duration(QUERY_REP, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.query_rep_duration(
tari=self.fast['tari'], rtcal=self.fast['rtcal'],
trcal=self.fast['trcal'], delim=self.fast['delim'],
session=self.fast['session']),
8, "query_rep_duration(fast) doesn't match frame")
def test_query_rep_duration_with_default_parameters(self):
slow_qrep = epcstd.QueryRep(self.slow['session'])
fast_qrep = epcstd.QueryRep(self.fast['session'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_qrep)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_qrep)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.duration,
epcstd.command_duration(epcstd.CommandCode.QUERY_REP), 8,
"command_duration(QUERY_REP, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration, epcstd.query_rep_duration(), 8,
"query_rep_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.duration,
epcstd.command_duration(epcstd.CommandCode.QUERY_REP), 8,
"command_duration(QUERY_REP, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration, epcstd.query_rep_duration(), 8,
"query_rep_duration(default=fast) doesn't match frame")
def test_ack_duration(self):
slow_ack = epcstd.Ack(self.slow['rn'])
fast_ack = epcstd.Ack(self.fast['rn'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_ack)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_ack)
self.assertAlmostEqual(
slow_frame.duration,
self.get_command_duration(epcstd.CommandCode.ACK, self.slow),
8, "command_duration(ACK, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.ack_duration(
tari=self.slow['tari'], rtcal=self.slow['rtcal'],
trcal=self.slow['trcal'], delim=self.slow['delim'],
rn=self.slow['rn']),
8, "ack_duration(slow) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
self.get_command_duration(epcstd.CommandCode.ACK, self.fast),
8, "command_duration(ACK, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.ack_duration(
tari=self.fast['tari'], rtcal=self.fast['rtcal'],
trcal=self.fast['trcal'], delim=self.fast['delim'],
rn=self.fast['rn']),
8, "ack_duration(fast) doesn't match frame")
def test_ack_duration_with_default_parameters(self):
slow_ack = epcstd.Ack(self.slow['rn'])
fast_ack = epcstd.Ack(self.fast['rn'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_ack)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_ack)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.duration,
epcstd.command_duration(epcstd.CommandCode.ACK), 8,
"command_duration(ACK, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration, epcstd.ack_duration(), 8,
"ack_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.duration,
epcstd.command_duration(epcstd.CommandCode.ACK), 8,
"command_duration(ACK, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration, epcstd.ack_duration(), 8,
"ack_duration(default=fast) doesn't match frame")
def test_reqrn_duration(self):
slow_reqrn = epcstd.ReqRN(self.slow['rn'], self.slow['crc16'])
fast_reqrn = epcstd.ReqRN(self.fast['rn'], self.fast['crc16'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_reqrn)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_reqrn)
self.assertAlmostEqual(
slow_frame.duration,
self.get_command_duration(epcstd.CommandCode.REQ_RN, self.slow),
8, "command_duration(REQ_RN, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.reqrn_duration(
tari=self.slow['tari'], rtcal=self.slow['rtcal'],
trcal=self.slow['trcal'], delim=self.slow['delim'],
rn=self.slow['rn'], crc=self.slow['crc16']),
8, "reqrn_duration(slow) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
self.get_command_duration(epcstd.CommandCode.REQ_RN, self.fast),
8, "command_duration(REQ_RN, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.reqrn_duration(
tari=self.fast['tari'], rtcal=self.fast['rtcal'],
trcal=self.fast['trcal'], delim=self.fast['delim'],
rn=self.fast['rn'], crc=self.fast['crc16']),
8, "reqrn_duration(fast) doesn't match frame")
def test_reqrn_duration_with_default_parameters(self):
slow_reqrn = epcstd.ReqRN(self.slow['rn'], self.slow['crc16'])
fast_reqrn = epcstd.ReqRN(self.fast['rn'], self.fast['crc16'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_reqrn)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_reqrn)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.duration,
epcstd.command_duration(epcstd.CommandCode.REQ_RN), 8,
"command_duration(REQ_RN, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration, epcstd.reqrn_duration(), 8,
"reqrn_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.duration,
epcstd.command_duration(epcstd.CommandCode.REQ_RN), 8,
"command_duration(REQ_RN, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration, epcstd.reqrn_duration(), 8,
"reqrn_duration(default=fast) doesn't match frame")
def test_read_duration(self):
slow_read = epcstd.Read(self.slow['bank'], self.slow['word_ptr'],
self.slow['word_cnt'], self.slow['rn'],
self.slow['crc16'])
fast_read = epcstd.Read(self.fast['bank'], self.fast['word_ptr'],
self.fast['word_cnt'], self.fast['rn'],
self.fast['crc16'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_read)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_read)
self.assertAlmostEqual(
slow_frame.duration,
self.get_command_duration(epcstd.CommandCode.READ, self.slow),
8, "command_duration(READ, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration,
epcstd.read_duration(
tari=self.slow['tari'], rtcal=self.slow['rtcal'],
trcal=self.slow['trcal'], delim=self.slow['delim'],
bank=self.slow['bank'], word_ptr=self.slow['word_ptr'],
word_count=self.slow['word_cnt'], rn=self.slow['rn'],
crc=self.slow['crc16']),
8, "read_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
self.get_command_duration(epcstd.CommandCode.READ, self.fast),
8, "command_duration(READ, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration,
epcstd.read_duration(
tari=self.fast['tari'], rtcal=self.fast['rtcal'],
trcal=self.fast['trcal'], delim=self.fast['delim'],
bank=self.fast['bank'], word_ptr=self.fast['word_ptr'],
word_count=self.fast['word_cnt'], rn=self.fast['rn'],
crc=self.fast['crc16']),
8, "read_duration(fast params) doesn't match frame")
def test_read_duration_with_default_parameters(self):
slow_read = epcstd.Read(self.slow['bank'], self.slow['word_ptr'],
self.slow['word_cnt'], self.slow['rn'],
self.slow['crc16'])
fast_read = epcstd.Read(self.fast['bank'], self.fast['word_ptr'],
self.fast['word_cnt'], self.fast['rn'],
self.fast['crc16'])
slow_frame = epcstd.ReaderFrame(self.slow['sync'], slow_read)
fast_frame = epcstd.ReaderFrame(self.fast['sync'], fast_read)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.duration,
epcstd.command_duration(epcstd.CommandCode.READ), 8,
"command_duration(READ, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.duration, epcstd.read_duration(), 8,
"read_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.duration,
epcstd.command_duration(epcstd.CommandCode.READ), 8,
"command_duration(READ, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.duration, epcstd.read_duration(), 8,
"read_duration(default=fast) doesn't match frame")
class TestTagFrameAccessors(unittest.TestCase):
def setUp(self):
self.preambles = [epcstd.create_tag_preamble(m, trext)
for m in epcstd.TagEncoding
for trext in (True, False)]
self.replies = [epcstd.QueryReply(), epcstd.AckReply(epc="01234567")]
self.blfs = [40e3, 160e3, 360e3]
def test_get_tag_frame_duration_equals_tag_frame_getter(self):
for preamble in self.preambles:
for reply in self.replies:
for blf in self.blfs:
frame = epcstd.TagFrame(preamble, reply)
self.assertAlmostEqual(
epcstd.tag_frame_duration(
reply, blf, preamble.encoding, preamble.extended),
frame.get_duration(blf), 8)
def test_get_tag_frame_duration_uses_default_modelParams(self):
epcstd.stdParams.divide_ratio = epcstd.DivideRatio.DR_8
for preamble in self.preambles:
for reply in self.replies:
for blf in self.blfs:
epcstd.stdParams.trext = preamble.extended
epcstd.stdParams.tag_encoding = preamble.encoding
epcstd.stdParams.trcal = \
epcstd.stdParams.divide_ratio.eval() / blf
frame = epcstd.TagFrame(preamble, reply)
self.assertAlmostEqual(
epcstd.tag_frame_duration(reply),
frame.get_duration(blf), 8, "frame = {}".format(frame))
class TestRepliesDurationEstimations(unittest.TestCase):
"""
Test-cases for functions ``reply_duration``, ``query_reply_duration``,
``ack_reply_duration``, etc.
"""
def setUp(self):
self.slow = dict(dr=epcstd.DivideRatio.DR_8, trcal=225.0e-6,
encoding=epcstd.TagEncoding.M8, trext=True,
epc_bytelen=12, word_cnt=15)
self.fast = dict(dr=epcstd.DivideRatio.DR_643, trcal=17.875e-6,
encoding=epcstd.TagEncoding.FM0, trext=False,
epc_bytelen=4, word_cnt=1)
for par in [self.slow, self.fast]:
par['blf'] = epcstd.get_blf(par['dr'], par['trcal'])
par['preamble'] = epcstd.create_tag_preamble(
par['encoding'], par['trext'])
@staticmethod
def get_reply_duration(reply_type, par):
return epcstd.reply_duration(
reply_type=reply_type, dr=par['dr'], trcal=par['trcal'],
encoding=par['encoding'], trext=par['trext'],
epc_bytelen=par['epc_bytelen'], words_count=par['word_cnt'])
@staticmethod
def set_default_parameters(par):
epcstd.stdParams.divide_ratio = par['dr']
epcstd.stdParams.trcal = par['trcal']
epcstd.stdParams.tag_encoding = par['encoding']
epcstd.stdParams.trext = par['trext']
epcstd.stdParams.default_epc = "FF" * par['epc_bytelen']
epcstd.stdParams.read_default_word_count = par['word_cnt']
def test_query_reply_duration(self):
reply = epcstd.QueryReply(0x0000)
slow_frame = epcstd.TagFrame(self.slow['preamble'], reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], reply)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
self.get_reply_duration(epcstd.ReplyType.QUERY_REPLY, self.slow),
8, "reply_duration(QUERY_REPLY, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.query_reply_duration(
dr=self.slow['dr'], trcal=self.slow['trcal'],
encoding=self.slow['encoding'], trext=self.slow['trext']),
8, "query_reply_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
self.get_reply_duration(epcstd.ReplyType.QUERY_REPLY, self.fast),
8, "reply_duration(QUERY_REPLY, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.query_reply_duration(
self.fast['dr'], self.fast['trcal'], self.fast['encoding'],
self.fast['trext']),
8, "query_reply_duration(fast params) doesn't match frame")
def test_query_reply_duration_with_default_parameters(self):
reply = epcstd.QueryReply()
slow_frame = epcstd.TagFrame(self.slow['preamble'], reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], reply)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reply_duration(epcstd.ReplyType.QUERY_REPLY), 8,
"reply_duration(QUERY_REPLY, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.query_reply_duration(), 8,
"query_reply_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reply_duration(epcstd.ReplyType.QUERY_REPLY), 8,
"reply_duration(QUERY_REPLY, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.query_reply_duration(), 8,
"query_reply_duration(default=fast) doesn't match frame")
def test_ack_reply_duration(self):
slow_reply = epcstd.AckReply(epc=("FF" * self.slow['epc_bytelen']))
fast_reply = epcstd.AckReply(epc=("FF" * self.fast['epc_bytelen']))
slow_frame = epcstd.TagFrame(self.slow['preamble'], slow_reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], fast_reply)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
self.get_reply_duration(epcstd.ReplyType.ACK_REPLY, self.slow),
8, "reply_duration(ACK_REPLY, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.ack_reply_duration(
dr=self.slow['dr'], trcal=self.slow['trcal'],
encoding=self.slow['encoding'], trext=self.slow['trext'],
epc_bytelen=self.slow['epc_bytelen']),
8, "ack_reply_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
self.get_reply_duration(epcstd.ReplyType.ACK_REPLY, self.fast),
8, "reply_duration(ACK_REPLY, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.ack_reply_duration(
dr=self.fast['dr'], trcal=self.fast['trcal'],
encoding=self.fast['encoding'], trext=self.fast['trext'],
epc_bytelen=self.fast['epc_bytelen']),
8, "ack_reply_duration(fast params) doesn't match frame")
def test_ack_reply_duration_with_default_parameters(self):
slow_reply = epcstd.AckReply(epc=("FF" * self.slow['epc_bytelen']))
fast_reply = epcstd.AckReply(epc=("FF" * self.fast['epc_bytelen']))
slow_frame = epcstd.TagFrame(self.slow['preamble'], slow_reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], fast_reply)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reply_duration(epcstd.ReplyType.ACK_REPLY), 8,
"reply_duration(ACK_REPLY, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.ack_reply_duration(), 8,
"ack_reply_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reply_duration(epcstd.ReplyType.ACK_REPLY), 8,
"reply_duration(ACK_REPLY, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.ack_reply_duration(), 8,
"ack_reply_duration(default=fast) doesn't match frame")
def test_reqrn_reply_duration(self):
reply = epcstd.ReqRnReply()
slow_frame = epcstd.TagFrame(self.slow['preamble'], reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], reply)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
self.get_reply_duration(epcstd.ReplyType.REQRN_REPLY, self.slow),
8, "reply_duration(REQRN_REPLY, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reqrn_reply_duration(
dr=self.slow['dr'], trcal=self.slow['trcal'],
encoding=self.slow['encoding'], trext=self.slow['trext']),
8, "reqrn_reply_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
self.get_reply_duration(epcstd.ReplyType.REQRN_REPLY, self.fast),
8, "reply_duration(REQRN_REPLY, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reqrn_reply_duration(
dr=self.fast['dr'], trcal=self.fast['trcal'],
encoding=self.fast['encoding'], trext=self.fast['trext']),
8, "reqrn_reply_duration(fast params) doesn't match frame")
def test_reqrn_reply_duration_with_default_parameters(self):
reply = epcstd.ReqRnReply()
slow_frame = epcstd.TagFrame(self.slow['preamble'], reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], reply)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reply_duration(epcstd.ReplyType.REQRN_REPLY), 8,
"reply_duration(REQRN_REPLY, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reqrn_reply_duration(), 8,
"reqrn_reply_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reply_duration(epcstd.ReplyType.REQRN_REPLY), 8,
"reply_duration(REQRN_REPLY, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reqrn_reply_duration(), 8,
"reqrn_reply_duration(default=fast) doesn't match frame")
def test_read_reply_duration(self):
slow_reply = epcstd.ReadReply("FFFF" * self.slow['word_cnt'])
fast_reply = epcstd.ReadReply("FFFF" * self.fast['word_cnt'])
slow_frame = epcstd.TagFrame(self.slow['preamble'], slow_reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], fast_reply)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
self.get_reply_duration(epcstd.ReplyType.READ_REPLY, self.slow),
8, "reply_duration(READ_REPLY, slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.read_reply_duration(
dr=self.slow['dr'], trcal=self.slow['trcal'],
encoding=self.slow['encoding'], trext=self.slow['trext'],
words_count=self.slow['word_cnt']),
8, "read_reply_duration(slow params) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
self.get_reply_duration(epcstd.ReplyType.READ_REPLY, self.fast),
8, "reply_duration(READ_REPLY, fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.read_reply_duration(
dr=self.fast['dr'], trcal=self.fast['trcal'],
encoding=self.fast['encoding'], trext=self.fast['trext'],
words_count=self.fast['word_cnt']),
8, "read_reply_duration(fast params) doesn't match frame")
def test_read_reply_duration_with_default_parameters(self):
slow_reply = epcstd.ReadReply("FFFF" * self.slow['word_cnt'])
fast_reply = epcstd.ReadReply("FFFF" * self.fast['word_cnt'])
slow_frame = epcstd.TagFrame(self.slow['preamble'], slow_reply)
fast_frame = epcstd.TagFrame(self.fast['preamble'], fast_reply)
self.set_default_parameters(self.slow)
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.reply_duration(epcstd.ReplyType.READ_REPLY), 8,
"reply_duration(READ_REPLY, default=slow) doesn't match frame")
self.assertAlmostEqual(
slow_frame.get_duration(self.slow['blf']),
epcstd.read_reply_duration(), 8,
"read_reply_duration(default=slow) doesn't match frame")
self.set_default_parameters(self.fast)
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.reply_duration(epcstd.ReplyType.READ_REPLY), 8,
"reply_duration(READ_REPLY, default=fast) doesn't match frame")
self.assertAlmostEqual(
fast_frame.get_duration(self.fast['blf']),
epcstd.read_reply_duration(), 8,
"read_reply_duration(default=fast) doesn't match frame")
class TestFrequencyToleranceEstimator(unittest.TestCase):
NUM_RANDOM_CHECKS = 5
def assert_frt_node_values(self, node_values, dr, temp):
for trcal, frt in node_values:
self.assertAlmostEqual(
epcstd.get_frt(trcal*1e-6, dr, temp), frt, 8,
"trcal={} (table node)".format(trcal))
def assert_frt_interval_values(self, interval_values, dr, temp):
for lb, rb, frt in interval_values:
low_trcal = lb * 1.011 * 1e-6
top_trcal = rb * 0.989 * 1e-6
self.assertAlmostEqual(
epcstd.get_frt(low_trcal, dr, temp), frt, 8,
"trcal={} (interval left bound)".format(low_trcal))
self.assertAlmostEqual(
epcstd.get_frt(top_trcal, dr, temp), frt, 8,
"trcal={} (interval right bound)".format(top_trcal))
for i in range(TestFrequencyToleranceEstimator.NUM_RANDOM_CHECKS):
trcal = uniform(low_trcal, top_trcal)
self.assertAlmostEqual(
epcstd.get_frt(trcal, dr, temp), frt, 8,
"trcal={} (interval random internal point)".format(trcal))
def test_tolerance_for_dr643_nominal_temp(self):
node_values = [(33.3, 0.15), (66.7, 0.1), (83.3, 0.1)]
intervals = [(33.3, 66.7, 0.22), (66.7, 83.3, 0.12),
(83.3, 133.3, 0.1), (133.3, 200.0, 0.07),
(200.0, 225.0, 0.05)]
self.assert_frt_node_values(
node_values, epcstd.DivideRatio.DR_643, epcstd.TempRange.NOMINAL)
self.assert_frt_interval_values(
intervals, epcstd.DivideRatio.DR_643, epcstd.TempRange.NOMINAL)
def test_tolerance_for_dr643_extended_temp(self):
node_values = [(33.3, 0.15), (66.7, 0.15), (83.3, 0.1)]
intervals = [(33.3, 66.7, 0.22), (66.7, 83.3, 0.15),
(83.3, 133.3, 0.12), (133.3, 200.0, 0.07),
(200.0, 225.0, 0.05)]
self.assert_frt_node_values(
node_values, epcstd.DivideRatio.DR_643, epcstd.TempRange.EXTENDED)
self.assert_frt_interval_values(
intervals, epcstd.DivideRatio.DR_643, epcstd.TempRange.EXTENDED)
def test_tolerance_for_dr8_nominal_temp(self):
node_values = [(25.0, 0.10), (31.25, 0.10), (50.0, 0.07)]
intervals = [(17.2, 25.0, 0.19), (25.0, 31.25, 0.12),
(31.25, 50.0, 0.10), (50.0, 75.0, 0.07),
(75.0, 200.0, 0.04)]
self.assert_frt_node_values(
node_values, epcstd.DivideRatio.DR_8, epcstd.TempRange.NOMINAL)
self.assert_frt_interval_values(
intervals, epcstd.DivideRatio.DR_8, epcstd.TempRange.NOMINAL)
def test_tolerance_for_dr8_extended_temp(self):
node_values = [(25.0, 0.15), (31.25, 0.10), (50.0, 0.07)]
intervals = [(17.2, 25.0, 0.19), (25.0, 31.25, 0.15),
(31.25, 50.0, 0.10), (50.0, 75.0, 0.07),
(75.0, 200.0, 0.04)]
self.assert_frt_node_values(
node_values, epcstd.DivideRatio.DR_8, epcstd.TempRange.EXTENDED)
self.assert_frt_interval_values(
intervals, epcstd.DivideRatio.DR_8, epcstd.TempRange.EXTENDED)
def test_get_frt_uses_readerParams(self):
epcstd.stdParams.temp_range = epcstd.TempRange.NOMINAL
epcstd.stdParams.divide_ratio = epcstd.DivideRatio.DR_8
epcstd.stdParams.trcal = 31.25e-6
self.assertAlmostEqual(epcstd.get_frt(), 0.10, 3)
epcstd.stdParams.temp_range = epcstd.TempRange.EXTENDED
epcstd.stdParams.divide_ratio = epcstd.DivideRatio.DR_643
epcstd.stdParams.trcal = 66.7e-6
self.assertAlmostEqual(epcstd.get_frt(), 0.15, 3)
class TestLinkTimings(unittest.TestCase):
class Timeouts:
t1 = None
t2 = None
t3 = None
t4 = None
t5 = None
t6 = None
t7 = None
def __getitem__(self, index):
ts = [self.t1, self.t2, self.t3, self.t4, self.t5, self.t6, self.t7]
if 1 <= index <= 7:
return ts[index - 1]
else:
raise IndexError("timeout index must be between 1 and 7")
def __setitem__(self, key, value):
if key == 1:
self.t1 = value
elif key == 2:
self.t2 = value
elif key == 3:
self.t3 = value
elif key == 4:
self.t4 = value
elif key == 5:
self.t5 = value
elif key == 6:
self.t6 = value
elif key == 7:
self.t7 = value
else:
raise IndexError("timeout index must be between 1 and 7")
class TBounds:
t_min = None
t_max = None
def __init__(self):
self.t_min = TestLinkTimings.Timeouts()
self.t_max = TestLinkTimings.Timeouts()
def setUp(self):
self.temp = epcstd.TempRange.NOMINAL
self.slow_tari = 25.0e-6
self.slow_rtcal = self.slow_tari * 3
self.slow_trcal = self.slow_rtcal * 3
self.slow_dr = epcstd.DivideRatio.DR_8
self.slow_blf = epcstd.get_blf(self.slow_dr, self.slow_trcal)
self.slow_frt = epcstd.get_frt(self.slow_trcal, self.slow_dr,
self.temp)
self.fast_tari = 6.25e-6
self.fast_rtcal = self.fast_tari * 2.5
self.fast_trcal = self.fast_rtcal * 1.1
self.fast_dr = epcstd.DivideRatio.DR_643
self.fast_blf = epcstd.get_blf(self.fast_dr, self.fast_trcal)
self.fast_frt = epcstd.get_frt(self.fast_trcal, self.fast_dr,
self.temp)
self.expected_timeouts = {
"slow": self.TBounds(),
"fast": self.TBounds()
}
t_slow_min = self.expected_timeouts["slow"].t_min
t_slow_max = self.expected_timeouts["slow"].t_max
t_fast_min = self.expected_timeouts["fast"].t_min
t_fast_max = self.expected_timeouts["fast"].t_max
t_slow_min.t1 = 281.25e-6 * (1.0 - self.slow_frt) - 2e-6
t_slow_min.t2 = 84.375e-06
t_slow_min.t3 = 0.0
t_slow_min.t4 = 150e-6
t_slow_min.t5 = t_slow_min.t1
t_slow_min.t6 = t_slow_min.t1
t_slow_min.t7 = 562.5e-6
t_fast_min.t1 = 15.625e-6 * (1.0 - self.fast_frt) - 2e-6
t_fast_min.t2 = 2.4169921875e-06
t_fast_min.t3 = 0.0
t_fast_min.t4 = 31.25e-6
t_fast_min.t5 = t_fast_min.t1
t_fast_min.t6 = t_fast_min.t1
t_fast_min.t7 = 250.0e-6
t_slow_max.t1 = 281.25e-6 * (1.0 + self.slow_frt) + 2e-6
t_slow_max.t2 = 562.5e-6
t_slow_max.t5 = 20e-3
t_slow_max.t6 = 20e-3
t_slow_max.t7 = 20e-3
t_fast_max.t1 = 15.625e-6 * (1.0 + self.fast_frt) + 2e-6
t_fast_max.t2 = 16.11328125e-06
t_fast_max.t5 = 20e-3
t_fast_max.t6 = 20e-3
t_fast_max.t7 = 20e-3
#
# HELPERS FOR TIMEOUTS CHECKS
#
def assertTimeoutsEqual(self, actual, expected, num_digits=8,
prefix="", suffix=""):
for i in range(1, 8):
if expected[i] is not None:
self.assertAlmostEqual(actual[i], expected[i], num_digits,
"{} T{}({})".format(prefix, i, suffix))
def build_t_min(self, rtcal=None, trcal=None, dr=None, temp=None):
ts = self.Timeouts()
ts.t1 = epcstd.link_t1_min(rtcal=rtcal, trcal=trcal, dr=dr, temp=temp)
ts.t2 = epcstd.link_t2_min(trcal=trcal, dr=dr)
ts.t3 = epcstd.link_t3()
ts.t4 = epcstd.link_t4(rtcal=rtcal)
ts.t5 = epcstd.link_t5_min(rtcal=rtcal, trcal=trcal, dr=dr, temp=temp)
ts.t6 = epcstd.link_t6_min(rtcal=rtcal, trcal=trcal, dr=dr, temp=temp)
ts.t7 = epcstd.link_t7_min(trcal=trcal, dr=dr)
return ts
def build_t_max(self, rtcal=None, trcal=None, dr=None, temp=None):
ts = self.Timeouts()
ts.t1 = epcstd.link_t1_max(rtcal=rtcal, trcal=trcal, dr=dr, temp=temp)
ts.t2 = epcstd.link_t2_max(trcal=trcal, dr=dr)
ts.t5 = epcstd.link_t5_max()
ts.t6 = epcstd.link_t6_max()
ts.t7 = epcstd.link_t7_max()
return ts
def build_t_min_with_universal_getter(
self, rtcal=None, trcal=None, dr=None, temp=None):
ts = self.Timeouts()
for i in range(1, 8):
ts[i] = epcstd.min_link_t(i, rtcal=rtcal, trcal=trcal, dr=dr,
temp=temp)
return ts
def build_t_max_with_universal_getter(
self, rtcal=None, trcal=None, dr=None, temp=None):
ts = self.Timeouts()
for i in [1, 2, 5, 6, 7]:
ts[i] = epcstd.max_link_t(i, rtcal=rtcal, trcal=trcal, dr=dr,
temp=temp)
return ts
@staticmethod
def set_default_modelParams(rtcal, trcal, dr, temp):
epcstd.stdParams.rtcal = rtcal
epcstd.stdParams.trcal = trcal
epcstd.stdParams.divide_ratio = dr
epcstd.stdParams.temp_range = temp
#
# TESTS
#
def test_get_pri_with_explicit_parameters(self):
self.assertAlmostEqual(
epcstd.get_pri(trcal=self.slow_trcal, dr=self.slow_dr),
1.0 / self.slow_blf, 8)
self.assertAlmostEqual(
epcstd.get_pri(trcal=self.fast_trcal, dr=self.fast_dr),
1.0 / self.fast_blf, 8)
def test_get_pri_with_implicit_parameters_from_readerParams(self):
epcstd.stdParams.trcal = self.slow_trcal
epcstd.stdParams.divide_ratio = self.slow_dr
self.assertAlmostEqual(epcstd.get_pri(), 1.0 / self.slow_blf, 8)
epcstd.stdParams.trcal = self.fast_trcal
epcstd.stdParams.divide_ratio = self.fast_dr
self.assertAlmostEqual(epcstd.get_pri(), 1.0 / self.fast_blf, 8)
def test_custom_get_tX_min_with_explicit_parameters(self):
actual_timeouts = {
"slow": self.build_t_min(self.slow_rtcal, self.slow_trcal,
self.slow_dr, self.temp),
"fast": self.build_t_min(self.fast_rtcal, self.fast_trcal,
self.fast_dr, self.temp)
}
for key in ["slow", "fast"]:
self.assertTimeoutsEqual(
actual_timeouts[key], self.expected_timeouts[key].t_min,
prefix=key, suffix="min")
def test_custom_get_tX_max_with_explicit_parameters(self):
actual_timeouts = {
"slow": self.build_t_max(self.slow_rtcal, self.slow_trcal,
self.slow_dr, self.temp),
"fast": self.build_t_max(self.fast_rtcal, self.fast_trcal,
self.fast_dr, self.temp)
}
for key in ["slow", "fast"]:
self.assertTimeoutsEqual(
actual_timeouts[key], self.expected_timeouts[key].t_max,
prefix=key, suffix="max")
def test_custom_get_tX_with_implicit_parameters_from_modelParams(self):
# Setting up slow link parameters
self.set_default_modelParams(self.slow_rtcal, self.slow_trcal,
self.slow_dr, self.temp)
t_min = self.build_t_min() # leaving all parameters None
t_max = self.build_t_max() # leaving all parameters None
self.assertTimeoutsEqual(t_min, self.expected_timeouts["slow"].t_min,
prefix="default slow", suffix="min")
self.assertTimeoutsEqual(t_max, self.expected_timeouts["slow"].t_max,
prefix="default slow", suffix="max")
# Setting up fast link parameters
self.set_default_modelParams(self.fast_rtcal, self.fast_trcal,
self.fast_dr, self.temp)
t_min = self.build_t_min() # leaving all parameters None
t_max = self.build_t_max() # leaving all parameters None
self.assertTimeoutsEqual(t_min, self.expected_timeouts["fast"].t_min,
prefix="default fast", suffix="min")
self.assertTimeoutsEqual(t_max, self.expected_timeouts["fast"].t_max,
prefix="default fast", suffix="max")
def test_universal_get_t_min_with_explicit_parameters(self):
actual_timeouts = {
"slow": self.build_t_min_with_universal_getter(
self.slow_rtcal, self.slow_trcal, self.slow_dr, self.temp),
"fast": self.build_t_min_with_universal_getter(
self.fast_rtcal, self.fast_trcal, self.fast_dr, self.temp),
}
for key in ['slow', 'fast']:
self.assertTimeoutsEqual(
actual_timeouts[key], self.expected_timeouts[key].t_min,
num_digits=8, prefix=key, suffix="min")
# Check that get_t_min works for n=1..7 only
with self.assertRaises(ValueError):
epcstd.min_link_t(
0, self.slow_rtcal, self.slow_trcal, self.slow_dr, self.temp)
epcstd.min_link_t(
8, self.slow_rtcal, self.slow_trcal, self.slow_dr, self.temp)
def test_universal_get_t_max_with_explicit_parameters(self):
actual_timeouts = {
"slow": self.build_t_max_with_universal_getter(
self.slow_rtcal, self.slow_trcal, self.slow_dr, self.temp),
"fast": self.build_t_max_with_universal_getter(
self.fast_rtcal, self.fast_trcal, self.fast_dr, self.temp),
}
for key in ["slow", "fast"]:
self.assertTimeoutsEqual(
actual_timeouts[key], self.expected_timeouts[key].t_max,
num_digits=8, prefix=key, suffix="max")
self.assertAlmostEqual(
epcstd.max_link_t(3, self.slow_rtcal, self.slow_trcal, self.slow_dr,
self.temp), float('inf'))
# Check that get_t_max works for n=1..7, n != 3 only
with self.assertRaises(ValueError):
epcstd.max_link_t(0, self.slow_rtcal, self.slow_trcal, self.slow_dr,
self.temp)
epcstd.max_link_t(8, self.slow_rtcal, self.slow_trcal, self.slow_dr,
self.temp)
def test_universal_get_t_min_with_parameters_from_modelParams(self):
# Setting up slow parameters
self.set_default_modelParams(self.slow_rtcal, self.slow_trcal,
self.slow_dr, self.temp)
slow = self.build_t_min_with_universal_getter()
self.assertTimeoutsEqual(slow, self.expected_timeouts["slow"].t_min,
prefix="slow", suffix="min")
# Setting up fast parameters
self.set_default_modelParams(self.fast_rtcal, self.fast_trcal,
self.fast_dr, self.temp)
fast = self.build_t_min_with_universal_getter()
self.assertTimeoutsEqual(fast, self.expected_timeouts['fast'].t_min,
prefix="fast", suffix="min")
def test_universal_get_t_max_with_parameters_from_modelParams(self):
# Setting up slow parameters
self.set_default_modelParams(self.slow_rtcal, self.slow_trcal,
self.slow_dr, self.temp)
slow = self.build_t_max_with_universal_getter()
self.assertTimeoutsEqual(slow, self.expected_timeouts["slow"].t_max,
prefix="slow", suffix="max")
# Setting up fast parameters
self.set_default_modelParams(self.fast_rtcal, self.fast_trcal,
self.fast_dr, self.temp)
fast = self.build_t_max_with_universal_getter()
self.assertTimeoutsEqual(fast, self.expected_timeouts['fast'].t_max,
prefix="fast", suffix="max")
class TestElementaryTimings(unittest.TestCase):
def setUp(self):
self.fast_tari = 6.25e-6
self.fast_rtcal = 15.625e-6
self.fast_trcal = 17.875e-6
self.fast_trext = False
self.fast_temp = epcstd.TempRange.NOMINAL
self.fast_q = 4
self.fast_dr = epcstd.DivideRatio.DR_643
self.fast_m = epcstd.TagEncoding.FM0
self.fast_target = epcstd.InventoryFlag.A
self.fast_sel = epcstd.SelFlag.ALL
self.fast_session = epcstd.Session.S0
self.fast_bank = epcstd.MemoryBank.TID
self.fast_word_ptr = 0
self.fast_word_count = 4
self.fast_data = "89ABCDEF"
self.fast_epc = "00112233445566778899AABB"
self.fast_pc = 0x0000
self.fast_rn = 0x0000
self.fast_crc = 0x0000
self.fast_t = {
"Query": 199.125e-6,
"QueryRep": 59.375e-6,
"ACK": 150.0e-6,
"ReqRN": 293.75e-6,
"Read": 412.5e-6,
"T1(min)": 11.28125e-6,
"T1(max)": 19.96875e-6,
"T2(min)": 2.51367188e-6,
"T2(max)": 16.7578125e-6,
"T3(min)": 0.0e-6,
"T3(max)": float("inf"),
"T4(min)": 31.25e-6,
"T4(max)": float("inf"),
"RN16": 19.27148438e-6,
"Response": 113.11523438e-6,
"Handle": 32.67773438e-6,
"Data": 60.328125e-6
}
def test_get_elementary_timings(self):
d = epcstd.get_elementary_timings(
tari=self.fast_tari, rtcal=self.fast_rtcal, trcal=self.fast_trcal,
temp=self.fast_temp, dr=epcstd.DivideRatio.DR_643,
m=self.fast_m, trext=self.fast_trext, sel=self.fast_sel,
session=self.fast_session, target=self.fast_target, q=self.fast_q,
bank=self.fast_bank, word_ptr=self.fast_word_ptr,
word_count=self.fast_word_count, rn=self.fast_rn,
crc=self.fast_crc, epc=self.fast_epc, mem=self.fast_data,
pc=self.fast_pc)
for k, v in self.fast_t.items():
self.assertIn(k, d, "key {} not found in timings".format(k))
self.assertAlmostEqual(v, d[k], 8, "error in {}".format(k))
|
nilq/baby-python
|
python
|
from __future__ import print_function
from builtins import str
import argparse
import os
import sys
import re
from .version import VERSION
from .utils import get_local_ip, DelimiterArgParser
import atexit
def add_parser_args(parser, config_type):
# General arguments
parser.add_argument(
'--trace_greenlets',
action='store_true',
default=False,
help='Collect stats about each greenlet execution time and switches.')
parser.add_argument(
'--trace_memory',
action='store_true',
default=False,
help='Collect stats about memory for each task. Incompatible with --greenlets > 1')
parser.add_argument(
'--trace_io',
action='store_true',
default=False,
help='Collect stats about all I/O operations')
parser.add_argument(
'--print_mongodb',
action='store_true',
default=False,
help='Print all MongoDB requests')
parser.add_argument(
'--trace_memory_type',
action='store',
default="",
help='Create a .png object graph in trace_memory_output_dir ' +
'with a random object of this type.')
parser.add_argument(
'--trace_memory_output_dir',
action='store',
default="memory_traces",
help='Directory where to output .pngs with object graphs')
parser.add_argument(
'--profile',
action='store_true',
default=False,
help='Run profiling on the whole worker')
parser.add_argument(
'--mongodb_jobs', '--mongodb',
action='store',
default="mongodb://127.0.0.1:27017/mrq",
help='MongoDB URI for the jobs, scheduled_jobs & workers database')
parser.add_argument(
'--mongodb_logs',
action='store',
default="1",
help='MongoDB URI for the logs database. ' +
' "0" will disable remote logs, "1" will use main MongoDB.')
parser.add_argument(
'--mongodb_logs_size',
action='store',
default=16 *
1024 *
1024,
type=int,
help='If provided, sets the log collection to capped to that amount of bytes.')
parser.add_argument(
'--no_mongodb_ensure_indexes',
action='store_true',
default=False,
help='If provided, skip the creation of MongoDB indexes at worker startup.')
parser.add_argument(
'--redis',
action='store',
default="redis://127.0.0.1:6379",
help='Redis URI')
parser.add_argument(
'--redis_prefix',
action='store',
default="mrq",
help='Redis key prefix')
parser.add_argument(
'--redis_max_connections',
action='store',
type=int,
default=1000,
help='Redis max connection pool size')
parser.add_argument(
'--redis_timeout',
action='store',
type=float,
default=30,
help='Redis connection pool timeout to wait for an available connection')
parser.add_argument(
'--name',
default=None,
action='store',
help='Specify a different name')
parser.add_argument(
'--quiet',
default=False,
action='store_true',
help='Don\'t output task logs')
parser.add_argument(
'--config',
'-c',
default=None,
action="store",
help='Path of a config file')
parser.add_argument(
'--worker_class',
default="mrq.worker.Worker",
action="store",
help='Path to a custom worker class')
parser.add_argument(
'--version',
'-v',
default=False,
action="store_true",
help='Prints current MRQ version')
parser.add_argument(
'--no_import_patch',
default=False,
action='store_true',
help='(DEPRECATED) Skips patching __import__ to fix gevent bug #108')
parser.add_argument(
'--add_network_latency',
default="0",
action='store',
type=str,
help='Adds random latency to the network calls, zero to N seconds. Can be a range (1-2)')
parser.add_argument(
'--default_job_result_ttl',
default=7 * 24 * 3600,
action='store',
type=float,
help='Seconds the results are kept in MongoDB when status is success')
parser.add_argument(
'--default_job_abort_ttl',
default=24 * 3600,
action='store',
type=float,
help='Seconds the tasks are kept in MongoDB when status is abort')
parser.add_argument(
'--default_job_cancel_ttl',
default=24 * 3600,
action='store',
type=float,
help='Seconds the tasks are kept in MongoDB when status is cancel')
parser.add_argument(
'--default_job_timeout',
default=3600,
action='store',
type=float,
help='In seconds, delay before interrupting the job')
parser.add_argument(
'--default_job_max_retries',
default=3,
action='store',
type=int,
help='Set the status to "maxretries" after retrying that many times')
parser.add_argument(
'--default_job_retry_delay',
default=3,
action='store',
type=int,
help='Seconds before a job in retry status is requeued again')
parser.add_argument(
'--use_large_job_ids',
action='store_true',
default=False,
help='Do not use compacted job IDs in Redis. For compatibility with 0.1.x only')
# mrq-run-specific arguments
if config_type == "run":
parser.add_argument(
'--queue',
action='store',
default="",
help='Queue the task on this queue instead of running it right away')
parser.add_argument(
'taskpath',
action='store',
help='Task to run')
parser.add_argument(
'taskargs',
action='store',
default='{}',
nargs='*',
help='JSON-encoded arguments, or "key value" pairs')
# Dashboard-specific arguments
elif config_type == "dashboard":
parser.add_argument(
'--dashboard_httpauth',
default="",
action="store",
help='HTTP Auth for the Dashboard. Format is user:pass')
parser.add_argument(
'--dashboard_queue',
default=None,
action="store",
help='Default queue for dashboard actions.')
parser.add_argument(
'--dashboard_port',
default=5555,
action="store",
type=int,
help='Use this port for mrq-dashboard. 5555 by default.')
parser.add_argument(
'--dashboard_ip',
default="0.0.0.0",
action="store",
type=str,
help='Bind the dashboard to this IP. Default is "0.0.0.0", use "127.0.0.1" to restrict access.')
# Worker-specific args
elif config_type == "worker":
parser.add_argument(
'--max_jobs',
default=0,
type=int,
action='store',
help='Gevent: max number of jobs to do before quitting.' +
' Temp workaround for memory leaks')
parser.add_argument(
'--max_memory',
default=0,
type=int,
action='store',
help='Max memory (in Mb) after which the process will be shut down. Use with --processes [1-N]' +
'to have supervisord automatically respawn the worker when this happens')
parser.add_argument(
'--greenlets',
'--gevent', # deprecated
'-g',
default=1,
type=int,
action='store',
help='Max number of greenlets to use')
parser.add_argument(
'--processes',
'-p',
default=0,
type=int,
action='store',
help='Number of processes to launch with supervisord')
default_template = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"supervisord_templates/default.conf"
))
parser.add_argument(
'--supervisord_template',
default=default_template,
action='store',
help='Path of supervisord template to use')
parser.add_argument(
'--scheduler',
default=False,
action='store_true',
help='Run the scheduler')
parser.add_argument(
'--scheduler_interval',
default=60,
action='store',
type=float,
help='Seconds between scheduler checks')
parser.add_argument(
'--report_interval',
default=10,
action='store',
type=float,
help='Seconds between worker reports to MongoDB')
parser.add_argument(
'--report_file',
default="",
action='store',
type=str,
help='Filepath of a json dump of the worker status. Disabled if none')
parser.add_argument(
'queues',
nargs='*',
default=["default"],
help='The queues to listen on (default: \'default\')')
parser.add_argument(
'--subqueues_refresh_interval',
default=10,
action='store',
type=float,
help="Seconds between worker refreshes of the known subqueues")
parser.add_argument(
'--paused_queues_refresh_interval',
default=10,
action='store',
type=float,
help="Seconds between worker refreshes of the paused queues list")
parser.add_argument(
'--subqueues_delimiter',
default='/',
help='Delimiter between main queue and subqueue names',
action=DelimiterArgParser)
parser.add_argument(
'--admin_port',
default=0,
action="store",
type=int,
help='Start an admin server on this port, if provided. Incompatible with --processes')
parser.add_argument(
'--admin_ip',
default="127.0.0.1",
action="store",
type=str,
help='IP for the admin server to listen on. Use "0.0.0.0" to allow access from outside')
parser.add_argument(
'--local_ip',
default=get_local_ip(),
action="store",
type=str,
help='Overwrite the local IP, to be displayed in the dashboard.')
parser.add_argument(
'--max_latency',
default=1.,
type=float,
action='store',
help='Max seconds while worker may sleep waiting for a new job. ' +
'Can be < 1.')
parser.add_argument(
'--dequeue_strategy',
default="sequential",
type=str,
action='store',
help='Strategy for dequeuing multiple queues. Default is \'sequential\',' +
'to dequeue them in command-line order.')
def get_config(
sources=(
"file",
"env"),
env_prefix="MRQ_",
file_path=None,
parser=None,
extra=None,
config_type=None):
""" Returns a config dict merged from several possible sources """
if not parser:
parser = argparse.ArgumentParser()
add_parser_args(parser, config_type)
parser_types = {action.dest: action.type for action in parser._actions if action.dest}
if config_type in ["run"]:
default_config = parser.parse_args(["notask"]).__dict__
else:
default_config = parser.parse_args([]).__dict__
# Keys that can't be passed from the command line
default_config["tasks"] = {}
default_config["scheduled_tasks"] = {}
# Only keep values different from config, actually passed on the command
# line
from_args = {}
if "args" in sources:
for k, v in parser.parse_args().__dict__.items():
if default_config[k] != v:
from_args[k] = v
# If we were given another config file, use it
if file_path is not None:
config_file = file_path
elif from_args.get("config"):
config_file = from_args.get("config")
# If a mrq-config.py file is in the current directory, use it!
elif os.path.isfile(os.path.join(os.getcwd(), "mrq-config.py")):
config_file = os.path.join(os.getcwd(), "mrq-config.py")
else:
config_file = None
from_file = {}
if config_file and "file" in sources:
sys.path.insert(0, os.path.dirname(config_file))
config_module = __import__(os.path.basename(config_file.replace(".py", "")))
sys.path.pop(0)
for k, v in config_module.__dict__.items():
# We only keep variables starting with an uppercase character.
if k[0].isupper():
from_file[k.lower()] = v
# Merge the config in the order given by the user
merged_config = default_config
config_keys = set(list(default_config.keys()) + list(from_file.keys()))
for part in sources:
for name in config_keys:
if part == "env":
value = os.environ.get(env_prefix + name.upper())
if value:
if name == "queues":
value = re.split("\s+", value)
if parser_types.get(name):
value = parser_types[name](value)
merged_config[name] = value
elif part == "args" and name in from_args:
merged_config[name] = from_args[name]
elif part == "file" and name in from_file:
merged_config[name] = from_file[name]
if extra:
merged_config.update(extra)
if merged_config["profile"]:
import cProfile
profiler = cProfile.Profile()
profiler.enable()
def print_profiling():
profiler.print_stats(sort="cumulative")
atexit.register(print_profiling)
if merged_config["version"]:
print("MRQ version: %s" % VERSION)
print("Python version: %s" % sys.version)
sys.exit(1)
if "no_import_patch" in from_args:
print("WARNING: --no_import_patch will be deprecated in MRQ 1.0!")
return merged_config
|
nilq/baby-python
|
python
|
"""add column source_file_dir
Revision ID: 3880a3a819d5
Revises: 2579e237c51a
Create Date: 2019-11-12 16:49:05.040791
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3880a3a819d5'
down_revision = '2579e237c51a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('publishment', sa.Column('source_file_dir', sa.String(length=64), nullable=True, default='target', comment='发布文件的相对目录(相对于源项目的根目录)'))
op.add_column('publishment_staticfile', sa.Column('created_at', sa.DateTime(), nullable=False, comment='创建时间'))
op.add_column('publishment_staticfile', sa.Column('created_by', sa.String(length=32), nullable=False, comment='创建人'))
op.add_column('publishment_staticfile', sa.Column('is_deleted', sa.Integer(), nullable=False, comment='是否删除:0表示正常,1表示已删除'))
op.add_column('publishment_staticfile', sa.Column('last_updated_at', sa.DateTime(), nullable=False, comment='最后更新时间'))
op.add_column('publishment_staticfile', sa.Column('last_updated_by', sa.String(length=32), nullable=False, comment='最后更新人'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('publishment_staticfile', 'last_updated_by')
op.drop_column('publishment_staticfile', 'last_updated_at')
op.drop_column('publishment_staticfile', 'is_deleted')
op.drop_column('publishment_staticfile', 'created_by')
op.drop_column('publishment_staticfile', 'created_at')
op.drop_column('publishment', 'source_file_dir')
# ### end Alembic commands ###
|
nilq/baby-python
|
python
|
from collections import OrderedDict
from django.db.models import Count
from django.db.models.functions import TruncYear
from .models import Reading
def annual_reading_counts(kind="all"):
"""
Returns a list of dicts, one per year of reading. In year order.
Each dict is like this (if kind is 'all'):
{'year': datetime.date(2003, 1, 1),
'book': 12, # only included if kind is 'all' or 'book'
'periodical': 18, # only included if kind is 'all' or 'periodical'
'total': 30, # only included if kind is 'all'
}
We use the end_date of a Reading to count when that thing was read.
kind is one of 'book', 'periodical' or 'all', for both.
"""
if kind == "all":
kinds = ["book", "periodical"]
else:
kinds = [kind]
# This will have keys of years (strings) and dicts of data:
# {
# '2003': {'books': 12, 'periodicals': 18},
# }
counts = OrderedDict()
for k in kinds:
qs = (
Reading.objects.exclude(end_date__isnull=True)
.filter(publication__kind=k)
.annotate(year=TruncYear("end_date"))
.values("year")
.annotate(count=Count("id"))
.order_by("year")
)
for year_data in qs:
year_str = year_data["year"].strftime("%Y")
if year_str not in counts:
counts[year_str] = {
"year": year_data["year"],
}
counts[year_str][k] = year_data["count"]
# Now translate counts into our final list, with totals, and 0s for kinds
# when they have no Readings for that year.
counts_list = []
for year_str, data in counts.items():
year_data = {
"year": data["year"],
}
if kind == "all":
year_data["total"] = 0
for k in kinds:
if k in data:
year_data[k] = data[k]
if kind == "all":
year_data["total"] += data[k]
else:
year_data[k] = 0
counts_list.append(year_data)
return counts_list
|
nilq/baby-python
|
python
|
import os
from CPAC.pipeline import nipype_pipeline_engine as pe
import nipype.algorithms.rapidart as ra
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
from .utils import *
from CPAC.vmhc import *
from nipype.interfaces.afni import preprocess
from CPAC.registration.registration import apply_transform
from CPAC.image_utils import spatial_smoothing
from CPAC.utils.utils import check_prov_for_regtool
def smooth_func_vmhc(wf, cfg, strat_pool, pipe_num, opt=None):
'''
Node Block:
{"name": "smooth_func_vmhc",
"config": "None",
"switch": ["voxel_mirrored_homotopic_connectivity", "run"],
"option_key": ["post_processing", "spatial_smoothing",
"smoothing_method"],
"option_val": ["AFNI", "FSL"],
"inputs": [["desc-cleaned_bold",
"desc-brain_bold",
"desc-preproc_bold",
"bold"],
"space-bold_desc-brain_mask"],
"outputs": ["desc-sm_bold",
"fwhm"]}
'''
fwhm = cfg.post_processing['spatial_smoothing']['fwhm']
smooth = spatial_smoothing(f'smooth_symmetric_{pipe_num}',
fwhm, opt=opt)
node, out = strat_pool.get_data(["desc-cleaned_bold",
"desc-brain_bold",
"desc-preproc_bold",
"bold"])
wf.connect(node, out, smooth, 'inputspec.in_file')
node, out = strat_pool.get_data("space-bold_desc-brain_mask")
wf.connect(node, out, smooth, 'inputspec.mask')
# 'fwhm' output for iterable
outputs = {
"desc-sm_bold": (smooth, 'outputspec.out_file'),
"fwhm": (smooth, 'fwhm_input.fwhm')
}
return (wf, outputs)
def warp_timeseries_to_sym_template(wf, cfg, strat_pool, pipe_num, opt=None):
'''
Node Block:
{"name": "transform_timeseries_to_sym_template",
"config": ["voxel_mirrored_homotopic_connectivity"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-cleaned-sm_bold",
"desc-brain-sm_bold",
"desc-preproc-sm_bold",
"desc-sm_bold"],
"from-bold_to-symtemplate_mode-image_xfm",
"T1w_brain_template_symmetric"],
"outputs": ["space-symtemplate_desc-sm_bold"]}
'''
xfm_prov = strat_pool.get_cpac_provenance(
'from-bold_to-symtemplate_mode-image_xfm')
reg_tool = check_prov_for_regtool(xfm_prov)
num_cpus = cfg.pipeline_setup['system_config'][
'max_cores_per_participant']
num_ants_cores = cfg.pipeline_setup['system_config']['num_ants_threads']
apply_xfm = apply_transform(f'warp_ts_to_sym_template_{pipe_num}',
reg_tool, time_series=True, num_cpus=num_cpus,
num_ants_cores=num_ants_cores,
mem_gb=5.0)
if reg_tool == 'ants':
apply_xfm.inputs.inputspec.interpolation = cfg.registration_workflows[
'functional_registration']['func_registration_to_template'][
'ANTs_pipelines']['interpolation']
elif reg_tool == 'fsl':
apply_xfm.inputs.inputspec.interpolation = cfg.registration_workflows[
'functional_registration']['func_registration_to_template'][
'FNIRT_pipelines']['interpolation']
# smoothed BOLD
connect, resource = strat_pool.get_data(["desc-cleaned-sm_bold",
"desc-brain-sm_bold",
"desc-preproc-sm_bold",
"desc-sm_bold"],
report_fetched=True)
node, out = connect
wf.connect(node, out, apply_xfm, 'inputspec.input_image')
node, out = strat_pool.get_data("T1w_brain_template_symmetric")
wf.connect(node, out, apply_xfm, 'inputspec.reference')
node, out = strat_pool.get_data("from-bold_to-symtemplate_mode-image_xfm")
wf.connect(node, out, apply_xfm, 'inputspec.transform')
outputs = {
f'space-symtemplate_{resource}':
(apply_xfm, 'outputspec.output_image')
}
return (wf, outputs)
def vmhc(wf, cfg, strat_pool, pipe_num, opt=None):
'''Compute Voxel-Mirrored Homotopic Connectivity.
VMHC is the map of brain functional homotopy, the high degree of
synchrony in spontaneous activity between geometrically corresponding
interhemispheric (i.e., homotopic) regions.
Node Block:
{"name": "vmhc",
"config": ["voxel_mirrored_homotopic_connectivity"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["space-symtemplate_desc-cleaned-sm_bold",
"space-symtemplate_desc-brain-sm_bold",
"space-symtemplate_desc-preproc-sm_bold",
"space-symtemplate_desc-sm_bold"]],
"outputs": ["vmhc"]}
'''
# write out a swapped version of the file
# copy and L/R swap file
copy_and_L_R_swap = pe.Node(interface=fsl.SwapDimensions(),
name=f'copy_and_L_R_swap_{pipe_num}',
mem_gb=3.0)
copy_and_L_R_swap.inputs.new_dims = ('-x', 'y', 'z')
node, out = strat_pool.get_data(["space-symtemplate_desc-cleaned-sm_bold",
"space-symtemplate_desc-brain-sm_bold",
"space-symtemplate_desc-preproc-sm_bold",
"space-symtemplate_desc-sm_bold"])
wf.connect(node, out, copy_and_L_R_swap, 'in_file')
# calculate correlation between original and swapped images
pearson_correlation = pe.Node(interface=preprocess.TCorrelate(),
name=f'pearson_correlation_{pipe_num}',
mem_gb=3.0)
pearson_correlation.inputs.pearson = True
pearson_correlation.inputs.polort = -1
pearson_correlation.inputs.outputtype = 'NIFTI_GZ'
wf.connect(node, out, pearson_correlation, 'xset')
wf.connect(copy_and_L_R_swap, 'out_file',
pearson_correlation, 'yset')
outputs = {
'vmhc': (pearson_correlation, 'out_file')
}
return (wf, outputs)
|
nilq/baby-python
|
python
|
##MIT License
##
##Copyright (c) 2019 Jacob Nudel
##Copyright (c) 2019 Yashu Seth
##
##Permission is hereby granted, free of charge, to any person obtaining a copy
##of this software and associated documentation files (the "Software"), to deal
##in the Software without restriction, including without limitation the rights
##to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
##copies of the Software, and to permit persons to whom the Software is
##furnished to do so, subject to the following conditions:
##
##The above copyright notice and this permission notice shall be included in all
##copies or substantial portions of the Software.
##
##THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
##SOFTWARE.
###############################################################################
#based on this blog post: https://yashuseth.blog/2018/07/22/pytorch-neural-network-for-tabular-data-with-categorical-embeddings/
#and this repo: https://github.com/yashu-seth/pytorch-tabular
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.utils.data import Dataset, DataLoader
class TabularDataset(Dataset):
def __init__(self, data, cat_cols=None, output_col=None):
"""
Characterizes a Dataset for PyTorch
Parameters
----------
data: pandas data frame
The data frame object for the input data. It must
contain all the continuous, categorical and the
output columns to be used.
cat_cols: List of strings
The names of the categorical columns in the data.
These columns will be passed through the embedding
layers in the model. These columns must be
label encoded beforehand.
output_col: string
The name of the output variable column in the data
provided.
"""
self.n = data.shape[0]
if output_col:
self.y = data[output_col].astype(np.float32).values.reshape(-1, 1)
else:
self.y = np.zeros((self.n, 1))
self.cat_cols = cat_cols if cat_cols else []
self.cont_cols = [col for col in data.columns
if col not in self.cat_cols + [output_col]]
if self.cont_cols:
self.cont_X = data[self.cont_cols].astype(np.float32).values
else:
self.cont_X = np.zeros((self.n, 1))
if self.cat_cols:
self.cat_X = data[cat_cols].astype(np.int64).values
else:
self.cat_X = np.zeros((self.n, 1))
def __len__(self):
"""
Denotes the total number of samples.
"""
return self.n
def __getitem__(self, idx):
"""
Generates one sample of data.
"""
return [self.y[idx], self.cont_X[idx], self.cat_X[idx]]
class FeedForwardNN(nn.Module):
def __init__(self, emb_dims, no_of_cont, lin_layer_sizes,
output_size, emb_dropout, lin_layer_dropouts):
"""
Parameters
----------
emb_dims: List of two element tuples
This list will contain a two element tuple for each
categorical feature. The first element of a tuple will
denote the number of unique values of the categorical
feature. The second element will denote the embedding
dimension to be used for that feature.
no_of_cont: Integer
The number of continuous features in the data.
lin_layer_sizes: List of integers.
The size of each linear layer. The length will be equal
to the total number
of linear layers in the network.
output_size: Integer
The size of the final output.
emb_dropout: Float
The dropout to be used after the embedding layers.
lin_layer_dropouts: List of floats
The dropouts to be used after each linear layer.
"""
super().__init__()
# Embedding layers
self.emb_layers = nn.ModuleList([nn.Embedding(x, y) for x, y in emb_dims])
no_of_embs = sum([y for x, y in emb_dims])
self.no_of_embs = no_of_embs
self.no_of_cont = no_of_cont
# Linear Layers
first_lin_layer = nn.Linear(self.no_of_embs + self.no_of_cont, lin_layer_sizes[0])
self.lin_layers = nn.ModuleList([first_lin_layer] + [nn.Linear(lin_layer_sizes[i], lin_layer_sizes[i + 1]) for i in range(len(lin_layer_sizes) - 1)])
for lin_layer in self.lin_layers:
nn.init.kaiming_normal_(lin_layer.weight.data)
# Output Layer
self.output_layer = nn.Linear(lin_layer_sizes[-1], output_size)
nn.init.kaiming_normal_(self.output_layer.weight.data)
self.out_act = nn.Sigmoid()
# Batch Norm Layers
self.first_bn_layer = nn.BatchNorm1d(self.no_of_cont)
self.bn_layers = nn.ModuleList([nn.BatchNorm1d(size)
for size in lin_layer_sizes])
# Dropout Layers
self.emb_dropout_layer = nn.Dropout(emb_dropout)
self.droput_layers = nn.ModuleList([nn.Dropout(size)
for size in lin_layer_dropouts])
def forward(self, cont_data, cat_data):
if self.no_of_embs != 0:
x = [emb_layer(cat_data[:, i]) for i,emb_layer in enumerate(self.emb_layers)]
x = torch.cat(x, 1)
x = self.emb_dropout_layer(x)
if self.no_of_cont != 0:
normalized_cont_data = self.first_bn_layer(cont_data)
if self.no_of_embs != 0:
x = torch.cat([x, normalized_cont_data], 1)
else:
x = normalized_cont_data
for lin_layer, dropout_layer, bn_layer in zip(self.lin_layers, self.droput_layers, self.bn_layers):
x = F.relu(lin_layer(x))
x = bn_layer(x)
x = dropout_layer(x)
x = self.output_layer(x)
x = self.out_act(x)
return x
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as f
from torch.autograd import Variable
import os
import numpy as np
from tqdm import tqdm
class ONE_HOT_MLP(nn.Module):
def __init__(self, hidden):
super(ONE_HOT_MLP, self).__init__()
self.cnn = nn.Sequential(
# 1, 124, 32
nn.Conv2d(1, 32, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2, inplace=True),
# 32, 62, 16
nn.Conv2d(32, 64, kernel_size=4, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# 64, 15, 15
nn.Conv2d(64, 128, kernel_size=5, stride=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# 128, 20, 5
)
self.mlp = nn.Sequential(nn.Linear(128 * 10 * 5 * 2 + 1, hidden),
nn.Tanh(),
nn.Linear(hidden, 1))
for p in self.mlp.parameters():
torch.nn.init.normal_(p, mean=0, std=0.1)
torch.nn.init.constant_(self.mlp[0].bias, val=0.)
torch.nn.init.constant_(self.mlp[2].bias, val=0.)
def forward(self, x, t):
mid = self.cnn(x)
return self.mlp(torch.cat((t.reshape(t.shape[0], -1), mid.reshape(mid.shape[0], -1)), dim=1))
def train_one_hot_mlp(params, hidden, device):
mse = torch.nn.MSELoss()
os.environ['CUDA_VISIBLE_DEVICES'] = str(device)
thermal_conductivity_train_loader = torch.load('Data/thermal_conductivity_vae_mlp_train_loader.pkl')
heat_capacity_train_loader = torch.load('Data/heat_capacity_vae_mlp_train_loader.pkl')
heat_capacity_one_hot_mlp = ONE_HOT_MLP(hidden).cuda()
thermal_conductivity_one_hot_mlp = ONE_HOT_MLP(hidden).cuda()
thermal_conductivity_optimizer = optim.Adam(
thermal_conductivity_one_hot_mlp.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
heat_capacity_optimizer = optim.Adam(
heat_capacity_one_hot_mlp.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
thermal_conductivity_total_loss_list = np.ones(params['VAE_epoch_num'] + 10)
heat_capacity_total_loss_list = np.ones(params['VAE_epoch_num'] + 10)
thermal_conductivity_total_loss_list *= 5000000
heat_capacity_total_loss_list *= 5000000
thermal_conductivity_model_file_name = \
'Model_pkl/ONE_HOT_MLP_thermal_conductivity_hidden_' + str(hidden) + '.pkl'
heat_capacity_model_file_name = \
'Model_pkl/ONE_HOT_MLP_heat_capacity_hidden_' + str(hidden) + '.pkl'
for epoch in range(params['VAE_epoch_num']):
total_loss = 0
thermal_conductivity_one_hot_mlp.train()
for i, data in enumerate(tqdm(thermal_conductivity_train_loader, 0)):
one_hot = torch.cat((data[0], data[1]), dim=1)
one_hot = one_hot.reshape(one_hot.shape[0], 1, one_hot.shape[1], one_hot.shape[2])
one_hot = Variable(one_hot).cuda().type(torch.cuda.FloatTensor)
thermal_conductivity_optimizer.zero_grad()
t = data[2].cuda().reshape(data[2].shape[0], 1).type(torch.cuda.FloatTensor)
label = data[3].cuda().reshape(data[3].shape[0], 1).type(torch.cuda.FloatTensor)
prediction = thermal_conductivity_one_hot_mlp(one_hot, t)
loss = mse(prediction, label)
loss.backward()
total_loss += loss.data.item() / 1000
thermal_conductivity_optimizer.step()
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, total_loss / len(thermal_conductivity_train_loader.dataset)))
thermal_conductivity_total_loss_list[epoch] = total_loss / len(thermal_conductivity_train_loader.dataset)
if np.argmin(thermal_conductivity_total_loss_list) == epoch:
torch.save(thermal_conductivity_one_hot_mlp, thermal_conductivity_model_file_name)
print('best result, saving the model to ' + thermal_conductivity_model_file_name)
elif np.argmin(thermal_conductivity_total_loss_list) == epoch - 25:
print('Finish: Training process over due to useless training')
break
for epoch in range(params['VAE_epoch_num']):
total_loss = 0
heat_capacity_one_hot_mlp.train()
for i, data in enumerate(tqdm(heat_capacity_train_loader, 0)):
one_hot = torch.cat((data[0], data[1]), dim=1)
one_hot = one_hot.reshape(one_hot.shape[0], 1, one_hot.shape[1], one_hot.shape[2])
one_hot = Variable(one_hot).cuda().type(torch.cuda.FloatTensor)
heat_capacity_optimizer.zero_grad()
t = data[2].cuda().reshape(data[2].shape[0], 1).type(torch.cuda.FloatTensor)
label = data[3].cuda().reshape(data[3].shape[0], 1).type(torch.cuda.FloatTensor)
prediction = heat_capacity_one_hot_mlp(one_hot, t)
loss = mse(prediction, label)
loss.backward()
total_loss += loss.data.item() / 1000
heat_capacity_optimizer.step()
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, total_loss / len(heat_capacity_train_loader.dataset)))
heat_capacity_total_loss_list[epoch] = total_loss / len(heat_capacity_train_loader.dataset)
if np.argmin(heat_capacity_total_loss_list) == epoch:
torch.save(heat_capacity_one_hot_mlp, heat_capacity_model_file_name)
print('best result, saving the model to ' + heat_capacity_model_file_name)
elif np.argmin(heat_capacity_total_loss_list) == epoch - 25:
print('Finish: Training process over due to useless training')
break
|
nilq/baby-python
|
python
|
import os
import string
from tqdm import tqdm
#from google.colab import drive
#conda install -c huggingface transformers
import matplotlib.pyplot as plt
#% matplotlib inline
import pandas as pd
import seaborn as sns
import numpy as np
import random
import torch
from torch.utils.data import Dataset, DataLoader, random_split, RandomSampler, SequentialSampler
from PoemGenerator.data_2 import PoemDataset
#torch.manual_seed(42)
#random.seed(42)
#np.random.seed(42)
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config, GPT2LMHeadModel
from transformers import AdamW, get_linear_schedule_with_warmup
import nltk
nltk.download('punkt')
# Plot how long the lines of our poems are; SHOULD be bounded from 3 to roughly
# 15 since that's what we did in our cleaning step....
# However, certain mismatches between this tokenizer and what we did for cleaning may make it longer (which is okay(
def plot_poem_length_distributions(df):
doc_lengths = []
for poem in df:
#Uncomment all of this if we want to pass the model LINES as opposed to invidivual poems
# paragraphs = [p for p in poem.split('\n') if p]
#
# for line in paragraphs:
# tokens = nltk.word_tokenize(line)
# doc_lengths.append(len(tokens))
tokens = nltk.word_tokenize(poem)
doc_lengths.append(len(tokens))
doc_lengths = np.array(doc_lengths)
print('Average length (of poems): ', np.average(doc_lengths))
print('Max length (of poems): ', np.max(doc_lengths))
sns.distplot(doc_lengths)
plt.show()
#Model configuration here
def configure_model(tokenizer,num_embed=768, num_layers=6, num_head=4, activation_fn='gelu'):
#n_embd (int, optional, defaults to 768) — Dimensionality of the embeddings and hidden states.
#n_layer (int, optional, defaults to 12) — Number of hidden layers in the Transformer encoder.
#n_head (int, optional, defaults to 12) — Number of attention heads for each attention layer in the Transformer encoder.
#activation_function (str, optional, defaults to "gelu") — Activation function, to be selected in the list ["relu", "silu", "gelu", "tanh", "gelu_new"].
configuration = GPT2Config(n_embd = num_embed, n_layer = num_layers, n_head=num_head, activation_function=activation_fn)
# instantiate the model
model = GPT2LMHeadModel.from_pretrained("gpt2", config=configuration)
model.resize_token_embeddings(len(tokenizer))
return model
def train_model(model, train_dataloader, validation_dataloader, epochs, optimizer, log_period, tokenizer, device, output_dir):
training_stats = []
outer_bar = tqdm(range(epochs), unit="epoch")
for epoch_i in range(0, epochs):
# ========================================
# Training
# ========================================
print("")
print('Training...')
total_train_loss = 0
total_train_ppl = 0
model.train()
for step, batch in tqdm(enumerate(train_dataloader)):
b_input_ids = batch[0].to(device)
b_labels = batch[0].to(device)
b_masks = batch[1].to(device)
model.zero_grad()
outputs = model(b_input_ids,
labels=b_labels,
attention_mask=b_masks,
token_type_ids=None
)
loss = outputs[0]
batch_loss = loss.item()
total_train_loss += batch_loss
total_train_ppl += torch.exp(loss)
# Get sample and save the model every x batches
if step % log_period == 0 and not step == 0:
model.eval()
sample_outputs = model.generate(
bos_token_id=random.randint(1, 30000),
do_sample=True,
top_k=50,
max_length=200,
top_p=0.95,
num_return_sequences=1
)
for i, sample_output in enumerate(sample_outputs):
print("{}: {}".format(i, tokenizer.decode(sample_output, skip_special_tokens=True)))
model.train()
loss.backward()
optimizer.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
avg_train_ppl = total_train_ppl / len(train_dataloader)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
# ========================================
# Validation
# ========================================
print("")
print("Running Validation...")
model.eval()
total_eval_loss = 0
total_eval_perp = 0
# Evaluate data for one epoch
for batch in validation_dataloader:
b_input_ids = batch[0].to(device)
b_labels = batch[0].to(device)
b_masks = batch[1].to(device)
with torch.no_grad():
outputs = model(b_input_ids,
# token_type_ids=None,
attention_mask=b_masks,
labels=b_labels)
loss = outputs[0]
batch_loss = loss.item()
batch_perp = torch.exp(loss)
total_eval_perp += batch_perp
total_eval_loss += batch_loss
avg_val_loss = total_eval_loss / len(validation_dataloader)
avg_val_ppl = total_eval_perp / len(validation_dataloader)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
# Record all statistics from this epoch.
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Training Perplexity': avg_train_ppl,
'Valid. Loss': avg_val_loss,
'Valid. Perplexity': avg_val_ppl
})
# They can then be reloaded using `from_pretrained()`
# Save the model
f_name = 'T_Loss_'+ str(round(avg_train_loss, 3)) + '_V_Loss_' + str(round(avg_val_loss, 3))
true_output_dir = os.path.join(output_dir, f_name)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(true_output_dir)
tokenizer.save_pretrained(true_output_dir)
outer_bar.update(1)
display_training_summary(training_stats, epochs)
def display_training_summary(training_stats, epoch):
# Display summary of training progress
pd.set_option('precision', 2)
df_stats = pd.DataFrame(data=training_stats)
df_stats = df_stats.set_index('epoch')
print(df_stats)
plot_loss_perplexity(df_stats, 'l', epoch)
plot_loss_perplexity(df_stats, 'p', epoch)
def plot_loss_perplexity(df_stats, l_or_p, epochs):
a = ''
if l_or_p == 'l':
a = 'Loss'
if l_or_p == 'p':
a = 'Perplexity'
col_1 = 'Training ' + a
col_2 = 'Valid. ' + a
sns.set(style='darkgrid')
sns.set(font_scale=1.5)
plt.rcParams["figure.figsize"] = (12,6)
plt.plot(df_stats[col_1], 'b-o', label="Training")
plt.plot(df_stats[col_2], 'g-o', label="Validation")
print('\n==================')
print(a)
print(df_stats[col_1])
print(df_stats[col_2])
print('==================')
plt.title("Training & Validation " + a )
plt.xlabel("Epoch")
plt.ylabel(a)
plt.legend()
plt.xticks(range(1, epochs))
plt.show()
#Generate a sequence of tokens
def generate(model, tokenizer, device, prompt="<|startoftext|>", isval = True):
#In terms of generating; may have to play around with top_k and top_p to see if either
#Combining them, or only using one over the other gives more coherent poems
# if isval:
model.eval()
generated = torch.tensor(tokenizer.encode(prompt)).unsqueeze(0)
generated = generated.to(device)
# print(generated)
sample_outputs = model.generate(
generated,
#bos_token_id=random.randint(1,30000),
do_sample=True,
top_k=50, #the K most likely next words are filtered and the probability mass is redistributed among only those K next words.
max_length = 60, #15 max words * 4 number of lines
min_length = 12, #3 words minimum * 4 number of lines
top_p=0.95 #Top-p sampling picks the minimum number of words to exceed together p=[]%
#num_return_sequences=4 #Uncomment this for multiple, independently sampled outputs
)
for i, sample_output in enumerate(sample_outputs):
output = tokenizer.decode(sample_output, skip_special_tokens=True)
if isval:
print("{}: {}\n\n".format(i, output))
return output
# else:
# generated = torch.tensor(tokenizer.encode(prompt)).unsqueeze(0)
# generated = generated.to(device)
# sample_outputs = model.generate(
# generated,
# #bos_token_id=random.randint(1,30000),
# do_sample=True,
# top_k=50, #the K most likely next words are filtered and the probability mass is redistributed among only those K next words.
# max_length = 60, #15 max words * 4 number of lines
# min_length = 12, #3 words minimum * 4 number of lines
# top_p=0.95 #Top-p sampling picks the minimum number of words to exceed together p=[]%
# #num_return_sequences=4 #Uncomment this for multiple, independently sampled outputs
# )
# return tokenizer.decode(sample_outputs[0], skip_special_tokens = True)
def main():
batch_size = 2
epochs = 3
learning_rate = 1e-3
log_period = 100
save_dir = './model_save/'
# Create output directory if needed
if not os.path.exists(save_dir):
os.makedirs(save_dir)
num_embedded = 768
num_layers = 6
num_head = 4 # [4,6,8]
activation_function = 'gelu'
df = pd.read_csv('data/clean_poems.csv')
# Simple cleaning
df.drop_duplicates('Poem', inplace=True) # Drop any duplicate poems
df['Poem'] = df['Poem'].str.translate(str.maketrans('', '', string.punctuation)) # Get rid of punctuation
df['Poem'] = df['Poem'].apply(str.lower) # Make everything lower-case
df['Poem'] = df['Poem'].str.replace('\n', ' ')
print('Read ', len(df['Poem']), ' examples')
#df.to_csv('data/clean_poems.csv', index=False)
# Create a smaller DF to work with for testing puposes
data_percentage = 1.0
df = df.sample(frac=data_percentage, replace=False)
print('Shrank examples to ', len(df['Poem']), ' examples')
poems = df.Poem
tokenizer = GPT2Tokenizer.from_pretrained('gpt2', bos_token='<|startoftext|>', eos_token='<|endoftext|>',
pad_token='<|pad|>')
dataset = PoemDataset(poems, tokenizer, max_length=num_embedded)
# Split into training and validation sets ~ 90% Train, 10% Validation
train_size = int(0.9 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
print('{:>5,} training samples'.format(train_size))
print('{:>5,} validation samples'.format(val_size))
# Create dataloaders for the datasets
train_dataloader = DataLoader(
train_dataset,
sampler=RandomSampler(train_dataset), # Select batches randomly
batch_size=batch_size)
validation_dataloader = DataLoader(
val_dataset,
sampler=SequentialSampler(val_dataset), # Pull out batches sequentially.
batch_size=batch_size)
# Move model to GPU if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = configure_model(tokenizer, num_embed=num_embedded, num_layers=num_layers, num_head=num_head, activation_fn=activation_function)
# Create optimizer
optimizer = AdamW(model.parameters(), lr=learning_rate)
model = model.to(device)
#Train the model
train_model(model, train_dataloader, validation_dataloader, epochs, optimizer, log_period, tokenizer, device, save_dir)
#Generate with the model
generate(model, tokenizer, device, 'I love my cat ')
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import json
import unittest
from mock import Mock, patch
import delighted
get_headers = {
'Accept': 'application/json',
'Authorization': 'Basic YWJjMTIz',
'User-Agent': "Delighted Python %s" % delighted.__version__
}
post_headers = get_headers.copy()
post_headers.update({'Content-Type': 'application/json'})
class DelightedTestCase(unittest.TestCase):
def setUp(self):
super(DelightedTestCase, self).setUp()
delighted.api_key = 'abc123'
self.request_patcher = patch('requests.request')
self.request_mock = self.request_patcher.start()
def tearDown(self):
super(DelightedTestCase, self).tearDown()
self.request_patcher.stop()
def mock_response(self, status_code, headers, data, links=None):
self.mock_multiple_responses([delighted.http_response.HTTPResponse(status_code, headers, data, links)])
def mock_multiple_responses(self, responses):
mock_responses = []
for response in responses:
mock_response = Mock()
mock_response.status_code = response.status_code
mock_response.headers = response.headers
mock_response.text = json.dumps(response.body)
mock_response.links = response.links
mock_responses.append(mock_response)
self.request_mock.side_effect = mock_responses
def mock_error(self, mock):
mock.exceptions.RequestException = Exception
mock.request.side_effect = mock.exceptions.RequestException()
def check_call(self, meth, url, headers, post_data, get_params):
if post_data is not None:
post_data = json.dumps(post_data)
self.request_mock.assert_called_once_with(meth, url,
headers=headers,
data=post_data,
params=get_params)
def check_multiple_call(self, calls):
self.assertEqual(self.request_mock.call_count, len(calls))
for call in calls:
if call['kwargs']['data'] is not None:
call['kwargs']['data'] = json.dumps(call['kwargs']['data'])
self.request_mock.assert_any_call(call['meth'], call['url'], **call['kwargs'])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from typing import Any, List
import configargparse
import paleomix
SUPPRESS = configargparse.SUPPRESS
class ArgumentDefaultsHelpFormatter(configargparse.ArgumentDefaultsHelpFormatter):
"""Modified ArgumentDefaultsHelpFormatter that excludes several constants (True,
False, None) and uses a custom presentation of the default value.
"""
def __init__(self, *args: Any, **kwargs: Any):
# Enable wordwrapping
kwargs.setdefault("width", 79)
super().__init__(*args, **kwargs)
def _get_help_string(self, action: configargparse.Action):
# The following values look silly as part of a help string
if isinstance(action.default, bool) or action.default in [None, [], ()]:
return action.help
# The subclass does not allow modification to the defaults string, so instead
# we access the logic by simply checking if the result was modified.
if super()._get_help_string(action) == action.help:
return action.help
assert action.help is not None
return action.help + " [%(default)s]"
class ArgumentParser(configargparse.ArgumentParser):
"""Supports keys with underscores instead of dashes, for backwards compatibility
with old paleomix config files, provided that these do not use per-host setions.
"""
def __init__(self, *args: Any, **kwargs: Any):
kwargs.setdefault("formatter_class", ArgumentDefaultsHelpFormatter)
# Workaround for configargparse (1.2.3) not considering abbreviations when
# applying options from config files, resulting in config file options
# overriding abbreviated options supplied on the command-line.
kwargs.setdefault("allow_abbrev", False)
super().__init__(*args, **kwargs)
self.add_argument(
"-v",
"--version",
action="version",
version="%(prog)s v" + paleomix.__version__,
)
def get_possible_config_keys(self, *args: Any, **kwargs: Any) -> List[str]:
keys = super().get_possible_config_keys(*args, **kwargs)
for key in keys:
key = key.strip("-").replace("-", "_")
if key not in keys:
keys.append(key)
return keys
def convert_item_to_command_line_arg(self, action, key, value):
# Ignore empty options from old config files
if action and value == "=":
return []
return super().convert_item_to_command_line_arg(action, key, value)
def add_subparsers(self, *args: Any, **kwargs: Any):
subparsers = super().add_subparsers(*args, **kwargs)
# Hack to hide aliases from subcommand help text, since aliases are only used
# for deprecated commands/command-names
subparsers._ChoicesPseudoAction = _ChoicesPseudoAction
return subparsers
class _ChoicesPseudoAction(configargparse.Action):
def __init__(self, name, aliases, help):
super(_ChoicesPseudoAction, self).__init__(
option_strings=[],
dest=name,
help=help,
metavar=name,
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env nix-shell
#!nix-shell -i python -p python3 -I nixpkgs=../../pkgs
# SPDX-FileCopyrightText: 2020 Daniel Fullmer and robotnix contributors
# SPDX-License-Identifier: MIT
import json
import os
import urllib.request
def save(filename, data):
open(filename, 'w').write(json.dumps(data, sort_keys=True, indent=2, separators=(',', ': ')))
def fetch_metadata():
metadata = {}
lineage_build_targets_str = urllib.request.urlopen("https://github.com/LineageOS/hudson/raw/master/lineage-build-targets").read().decode()
for line in lineage_build_targets_str.split("\n"):
line = line.strip()
if line == "":
continue
if line.startswith("#"):
continue
device, variant, branch, updatePeriod = line.split()
metadata[device] = {
'variant': variant,
'branch': branch,
}
###
devices = json.load(urllib.request.urlopen("https://github.com/LineageOS/hudson/raw/master/updater/devices.json"))
for data in devices:
if data['model'] not in metadata:
continue
vendor = data['oem']
vendor = vendor.lower()
# Workaround name inconsistency with LG
if vendor == 'lg':
vendor = 'lge'
# Workaround google device names source tree inconsistency
if data['model'] == 'shamu':
vendor = 'moto'
if data['model'] == 'flox':
vendor = 'asus'
metadata[data['model']].update({
'vendor': vendor,
'name': data['name'],
'lineage_recovery': data.get('lineage_recovery', False)
})
return metadata
if __name__ == '__main__':
metadata = fetch_metadata()
save('device-metadata.json', metadata)
|
nilq/baby-python
|
python
|
from .transformer import TransformerXL
|
nilq/baby-python
|
python
|
# encoding: utf-8
from hoopa import const
from hoopa.middlewares.stats import StatsMiddleware
NAME = "hoopa"
# 最大协程数
WORKER_NUMBERS = 1
# 请求间隔, 可以是两个int组成的list,间隔随机取两个数之间的随机浮点数
DOWNLOAD_DELAY = 3
# pending超时时间,超过这个时间,放回waiting
PENDING_THRESHOLD = 100
# 任务完成不停止
RUN_FOREVER = False
# 队列
# 调度器队列,默认redis, memory, mq
QUEUE_CLS = const.MemoryQueue
# 删除队列(包括数据集,去重队列)
CLEAN_QUEUE = False
# 指定优先级,仅当队列为redis有用
PRIORITY = None
# 下载器aiohttp httpx
DOWNLOADER_CLS = const.AiohttpDownloader
HTTP_CLIENT_KWARGS = None
# 下载中间件
MIDDLEWARES = [
StatsMiddleware
]
# 默认去重,不删除去重队列, 将根据queue的类型来决定
DUPEFILTER_CLS = const.MemoryDupeFilter
# 是否删除去重队列
CLEAN_DUPEFILTER = None
# 去重数据库连接配置
DUPEFILTER_SETTING = None
# 统计器, 默认根据队列决定,可以自行修改
STATS_CLS = const.MemoryStatsCollector
# redis配置信息
# REDIS_SETTING = "redis://127.0.0.1:6379/0?encoding=utf-8"
REDIS_SETTING = {
'host': '127.0.0.1',
'port': 6379,
'db': 0,
'password': ''
}
# MQ
MQ_MAXSIZE = 10
MQ_URI = "amqp://guest:guest@127.0.0.1/"
MQ_API_PORT = 15672
# 其他配置
# 序列化: pickle, ujson, orjson
SERIALIZATION = "ujson"
# 日志配置
LOG_LEVEL = "INFO"
LOG_WRITE_FILE = False
|
nilq/baby-python
|
python
|
from pathlib import Path
SRC = Path(__file__).parent
BLD = SRC.parent.joinpath("bld")
|
nilq/baby-python
|
python
|
from collections import Counter
class Square(Counter):
"""Creates a special purpose counter than can store only one value, and wipes itself when zero."""
def __init__(self):
"""Object should be initialized empty."""
pass
def __setitem__(self, key, cnt):
"""Update the count."""
# If Counter already contains another key, throw an exception.
if len(self.keys()) > 0 and self.keys().isdisjoint(key):
raise KeyError(f"Square already contains key '{list(self.keys()).pop()}', can't modify with key '{key}'")
# If count is being set to zero, remove the key altogether.
if cnt == 0:
super().__delitem__(key)
# Otherwise just assign the value as usual.
else:
super().__setitem__(key, cnt)
|
nilq/baby-python
|
python
|
from django.http import JsonResponse
from django.shortcuts import render
from django.views.generic import View
# model
from timepredictor.models import PrediccionTiempos, UltimosGps
# Create your views here.
class MapHandler(View):
'''This class manages the map where the bus and stops are shown'''
def __init__(self):
self.context={}
def get(self, request):
template = "timePredictor.html"
return render(request, template, self.context)
class GetEstimatedTimes(View):
'''This class requests to the database the estimated times for next stops for a bus'''
def __init__(self):
self.context={}
def get(self, request, licencePlate):
# the position of interest are the ones ocurred in the last 10 minutes
stops = PrediccionTiempos.objects.filter(patente = licencePlate).order_by('-tiempo_tstamp')
#positions = UltimaCargaGps.objects.all()[:10]
response = {}
busDesc = 'bus'
stopsDesc = 'stops'
response[stopsDesc] = []
for stop in stops:
if not busDesc in response:
response[busDesc] = {
'licencePlate': stop.patente,
'AuthRoute': stop.servicio
}
response[stopsDesc].append(
{'stopCode': stop.codigo,
'arrivedEstimatedTime': stop.tiempo_tstamp,
'distanceOnRoute': stop.distancia,
'arrivedEstimatedTimeInSecs': stop.tiempo})
return JsonResponse(response, safe=False)
class GetBusPosition(View):
'''This class requests to the database the position of a bus '''
def __init__(self):
"""the contructor, context are the parameter given to the html template"""
self.context={}
def get(self, request, licencePlate):
# the position of interest are the ones ocurred in the last 10 minutes
positions = UltimosGps.objects.filter(patente = licencePlate)
response = []
for aPosition in positions:
response.append({
'licencePlate': aPosition.patente,
'authRoute': aPosition.servicio,
'userRoute': aPosition.servicio_usuario,
'distOnroute': aPosition.dist_en_ruta,
'distToRoute': aPosition.dist_a_ruta,
'InstVelocity': aPosition.velocidad_instantanea,
'velocity2GPS': aPosition.velocidad_2gps,
'velocity4GPS': aPosition.velocidad_4gps,
'operator': aPosition.operador,
'latitude': aPosition.latitud,
'longitude': aPosition.longitud,
'time': aPosition.tiempo,
'orientation': aPosition.orientacion,
'type': aPosition.tipo,
'capacity': aPosition.capacidad})
return JsonResponse(response, safe=False)
class GetActiveBuses(View):
'''This class requests to the database the buses are doing a trip '''
def __init__(self):
self.context={}
def get(self, request):
activeBuses = UltimosGps.objects.order_by('patente', 'servicio').distinct('patente', 'servicio')
response = []
for activeBus in activeBuses:
response.append([
activeBus.patente,
activeBus.servicio,
activeBus.servicio_usuario])
"""
response.append({
'licencePlate': activeBus.patente,
'authRoute': activeBus.servicio,
'userRoute': activeBus.servicio_usuario,
#'distOnroute': activeBus.dist_en_ruta,
#'distToRoute': activeBus.dist_a_ruta,
#'InstVelocity': activeBus.velocidad_instantanea,
#'velocity2GPS': activeBus.velocidad_2gps,
#'velocity4GPS': activeBus.velocidad_4gps,
#'operator': activeBus.operador,
#'latitude': activeBus.latitud,
#'longitude': activeBus.longitud,
#'time': activeBus.tiempo,
#'orientation': activeBus.orientacion,
#'type': activeBus.tipo,
#'capacity': activeBus.capacidad
})
"""
return JsonResponse(response, safe=False)
|
nilq/baby-python
|
python
|
"""
CLI Module.
Handles CLI for the Repository Updater
"""
from os import environ
from sys import argv
import click
import crayons
from . import APP_FULL_NAME, APP_VERSION
from .github import GitHub
from .repository import Repository
@click.command()
@click.option(
"--token",
hide_input=True,
prompt="GitHub access token",
help="GitHub access token",
metavar="<TOKEN>",
)
@click.option(
"--repository",
prompt="Home Assistant Addons repository to update",
help="The Home Assistant Addons repository to update",
metavar="<orgname/reponame>",
)
@click.option("--addon", help="Update a single/specific add-on", metavar="<TARGET>")
@click.option("--force", is_flag=True, help="Force an update of the add-on repository")
@click.version_option(APP_VERSION, prog_name=APP_FULL_NAME)
def repository_updater(token, repository, addon, force):
"""Community Home Assistant Add-ons Repository Updater."""
click.echo(crayons.blue(APP_FULL_NAME, bold=True))
click.echo(crayons.blue("-" * 51, bold=True))
github = GitHub(token)
click.echo(
"Authenticated with GitHub as %s"
% crayons.yellow(github.get_user().name, bold=True)
)
repository = Repository(github, repository, addon, force)
repository.update()
repository.cleanup()
def git_askpass():
"""
Git credentials helper.
Short & sweet script for use with git clone and fetch credentials.
Requires GIT_USERNAME and GIT_PASSWORD environment variables,
intended to be called by Git via GIT_ASKPASS.
"""
if argv[1] == "Username for 'https://github.com': ":
print(environ["GIT_USERNAME"])
exit()
if argv[1] == "Password for 'https://" "%(GIT_USERNAME)s@github.com': " % environ:
print(environ["GIT_PASSWORD"])
exit()
exit(1)
|
nilq/baby-python
|
python
|
'''Functions to calculate seismic noise in suspended optics.
'''
from __future__ import division
import numpy as np
from scipy.interpolate import PchipInterpolator as interp1d
def seismic_suspension_fitered(sus, in_trans):
"""Seismic displacement noise for single suspended test mass.
:sus: gwinc suspension structure
:in_trans: input translational displacement spectrum
:returns: tuple of displacement noise power spectrum at :f:, and
horizontal and vertical components.
"""
hTable = sus.hTable
vTable = sus.vTable
theta = sus.VHCoupling.theta
# horizontal noise total
nh = (abs(hTable)**2) * in_trans**2
# vertical noise total
nv = (abs(theta * vTable)**2) * in_trans**2
# new total noise
n = nv + nh
return n, nh, nv
def seismic_BSC_ISI(f):
"""Rough seismic noise spectra on aLIGO BSC ISI table.
:f: frequency array in Hz
:returns: tuple of displacement noise power spectrum at :f: for
translational and rotational DOFs.
"""
SEI_F = np.array([0.01, 0.03, 0.1, 0.2, 0.5, 1, 10, 30, 300])
# translational DOFs
SEI_T = np.array([3e-6, 1e-6, 2e-7, 2e-7, 8e-10, 1e-11, 3e-13, 3e-14, 3e-14])
nt = 10**(interp1d(SEI_F, np.log10(SEI_T))(f))
# rotational DOFs
SEI_R = np.array([1e-8, 3e-8, 2e-8, 1e-8, 4e-10, 1e-11, 3e-13, 3e-14, 3e-14])
nr = 10**(interp1d(SEI_F, np.log10(SEI_R))(f))
return nt, nr
def seismic_BSC_ISI_6D(f):
"""Rough seismic noise spectra on aLIGO BSC ISI table with a 6D seismometer.
This largely follows Mow-Lowry and Martynov, arXiv:1801.01468.
:f: frequency array in Hz
:returns: tuple of displacement noise power spectrum at :f: for
translational and rotational DOFs.
"""
# FIXME: merge this with above, using flag
SEI_F = np.array([0.01, 0.03, 0.1, 0.2, 0.5, 1, 10, 100, 300])
SEI_T_self = np.array([1e-7, 1e-9, 3e-11, 6e-12, 3e-13, 1e-13, 3e-14, 1e-14, 1e-14])/1000
nt_self = 10**(interp1d(SEI_F, np.log10(SEI_T_self))(f))
nt_gnd = 10*seismic_ground_NLNM(f)
blend_t = np.abs(100/(1+1j*f/0.01)**4)
nt = np.sqrt(nt_self**2 + (blend_t * nt_gnd)**2)
SEI_R_self = np.array([2e-11, 5e-12, 1e-12, 6e-13, 3e-13, 2e-13, 6e-14, 2e-14, 2e-14])/1000
nr_self = 10**(interp1d(SEI_F, np.log10(SEI_R_self))(f))
nr_gnd = np.abs(1e-7/(1+1j*f/0.001))
blend_r = np.abs(100/(1+1j*f/0.01)**4)
nr = np.sqrt(nr_self**2 + (blend_r * nr_gnd)**2)
return nt, nr
def seismic_ground_NLNM(f):
"""The Peterson new generic ground motion low noise model.
:f: frequency array in Hz
:returns: displacement noise amplitude spectrum at :f:
"""
Pl = np.array([
1.00e-02, 1.00e-01, 1.70e-01, 4.00e-01, 8.00e-01, 1.24e+00,
2.40e+00, 4.30e+00, 5.00e+00, 6.00e+00, 1.00e+01, 1.20e+01,
1.56e+01, 2.19e+01, 3.16e+01, 4.50e+01, 7.00e+01, 1.01e+02,
1.54e+02, 3.28e+02, 6.00e+02, 1.00e+04])
Al = np.array([
-156.72, -162.36, -166.7, -170.0, -166.4, -168.6, -159.98,
-141.1, -71.36, -97.26, -132.18, -205.27, -37.65, -114.37,
-160.58, -187.5, -216.47, -185.0, -168.34, -217.43, -258.28,
-346.88])
Bl = np.array([
5.64, 5.64, 0.0, -8.3, 28.9, 52.48, 29.81,
0.0, -99.77, -66.49, -31.57, 36.16, -104.33, -47.1,
-16.28, 0.0, 15.7, 0.0, -7.61, 11.9, 26.6,
48.75])
nlnm = 10**(np.interp(1/f, Pl, Al+Bl*np.log10(Pl))/20) / (2 * np.pi * f)**2
return nlnm
def seismic_ground_NHNM(f):
"""The Peterson new generic ground motion high noise model.
:f: frequency array in Hz
:returns: displacement noise amplitude spectrum at :f:
"""
Pl = np.array([
1.00e-01, 2.20e-01, 3.20e-01, 8.00e-01, 3.80e+00,
4.60e+00, 6.30e+00, 7.90e+00, 1.54e+01, 2.00e+01,
3.54e+02,
])
Al = np.array([
-108.73, -150.34, -122.31, -116.85, -108.48,
-74.66, 0.66, -93.37, 73.54, -151.52,
-206.66,
])
Bl = np.array([
-17.23, -80.50, -23.87, 32.51, 18.08,
-32.95, -127.18, -22.42, -162.98, 10.01,
31.63,
])
nhnm = 10**(np.interp(1/f, Pl, Al+Bl*np.log10(Pl))/20) / (2 * np.pi * f)**2
return nhnm
|
nilq/baby-python
|
python
|
def func(num):
return x*3
x = 2
func(x)
|
nilq/baby-python
|
python
|
CHECKPOINT = 'model/checkpoint.bin'
MODEL_PATH = 'model/model.bin'
input_path = 'input/train.csv'
LR = 0.01
scheduler_threshold = 0.001
scheduler_patience = 2
scheduler_decay_factor = 0.5
embed_dims = 128
hidden_dims = 128
num_layers = 1
bidirectional = False
dropout = 0.2
out_dims = 128
Batch_Size = 64
Epochs = 100
similarity_thresh = 0.75
margin = 0.25
|
nilq/baby-python
|
python
|
import pygame
# game
from cgame import CGame
def main():
try:
CGame().run()
except Exception as e:
print(e)
if __name__ == '__main__':
pygame.init()
main()
|
nilq/baby-python
|
python
|
from flask import Flask, Request, jsonify, request
from src.driver import FirestoreDriverImpl
from src.interactor import SolverInteractor
from src.repository import RoomRepositoryImpl
from src.rest import BaseException, ClientException, SolverResource
solver_resource = SolverResource(
solver_usecase=SolverInteractor(
room_repository=RoomRepositoryImpl(
firestore_driver=FirestoreDriverImpl()
)
)
)
def solve(request: Request):
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
}
return '', 204, headers
headers = {'Access-Control-Allow-Origin': '*'}
try:
if "content-type" not in request.headers:
raise ClientException("Require HTTP header 'Content-Type'")
content_type = request.headers["content-type"]
if content_type == "application/json":
request_json = request.get_json(silent=True)
if request_json and "room_id" in request_json and "time_limit" in request_json:
room_id = request_json["room_id"]
time_limit = request_json["time_limit"]
c_weight = request_json["c_weight"] if "c_weight" in request_json else 3
timeout = request_json["timeout"] if "timeout" in request_json else 3000
num_unit_step = request_json["num_unit_step"] if "num_unit_step" in request_json else 10
else:
raise ClientException("JSON is invalid. Missing a 'room_id' or 'time_limit' property.")
else:
raise ClientException(f"Unknown content type: {content_type}")
return solver_resource.solve(room_id, time_limit, c_weight, timeout, num_unit_step), 200, headers
except BaseException as e:
return jsonify({"error": e.message}), e.code, headers
if __name__ == "__main__":
app = Flask(__name__)
@app.route("/setlist_solver", methods=["POST"])
def index():
return solve(request)
app.run("127.0.0.1", 8000, debug=True)
|
nilq/baby-python
|
python
|
import netifaces
import time
from collections import namedtuple
from aplus import Promise
from openmtc_server.exc import InterfaceNotFoundException
from openmtc_server.transportdomain.NetworkManager import NetworkManager
Interface = namedtuple("Interface", ("name", "addresses", "hwaddress"))
Address = namedtuple("Address", ("address", "family"))
class GEventNetworkManager(NetworkManager):
def __init__(self, config, *args, **kw):
super(GEventNetworkManager, self).__init__(*args, **kw)
self._api = None
self.config = config
self.polling = True
self.logger.info("GEventNetworkManager loaded")
def initialize(self, api):
self._api = api
self.logger.info("GEventNetworkManager initialized")
self.start()
def start(self):
# self.api.register_connectivity_handler(self.connectivity_request)
self.polling = True
self._api.run_task(self.start_polling)
self.logger.info("GEventNetworkManager started")
def stop(self):
self.polling = False
self.logger.info("GEventNetworkManager stopped")
def connectivity_request(self):
"""Handles connectivity requests"""
# please note: normally we get an rcat argument, default: rcat=0
with Promise() as p:
blacklist = ['lo']
interfaces = netifaces.interfaces()
interface = next((x for x in interfaces if (x not in blacklist)),
None)
if interface is None:
p.reject(InterfaceNotFoundException(
"No interfaces found matching request"))
else:
p.fulfill((self._get_interface(interface), 0))
return p
def start_polling(self, timeout=1):
"""Poll netifaces information and check for differences, for as long as
self.polling == True.
:param timeout: Amount of time to wait between polling
"""
last_interfaces = cur_interfaces = netifaces.interfaces()
cur_interfaces_copy = list(cur_interfaces)
last_ifaddresses = {}
for iface in last_interfaces:
last_ifaddresses[iface] = netifaces.ifaddresses(iface)
self.logger.debug("polling started")
while self.polling:
try:
cur_interfaces = netifaces.interfaces()
cur_interfaces_copy = list(cur_interfaces)
intersection = set(last_interfaces) ^ set(cur_interfaces)
if len(intersection) > 0:
self.logger.debug("difference detected")
self.logger.debug("last interfaces: %s", last_interfaces)
self.logger.debug("current interfaces: %s", cur_interfaces)
for isetface in intersection:
if isetface in cur_interfaces:
# new interface
self.logger.debug("Firing %s event for %s",
"interface_created", isetface)
self._api.events.interface_created.fire(
self._create_interface(
isetface, netifaces.ifaddresses(isetface)))
else:
# removed interface
self.logger.debug("Firing %s event for %s",
"interface_removed", isetface)
self._api.events.interface_removed.fire(
self._create_interface(
isetface, last_ifaddresses[isetface]))
for iface in cur_interfaces:
cur_ifaddresses = netifaces.ifaddresses(iface)
if (iface in last_ifaddresses and
last_ifaddresses[iface] != cur_ifaddresses):
self._check_ifaddresses_diff(last_ifaddresses[iface],
cur_ifaddresses, iface)
last_ifaddresses[iface] = cur_ifaddresses
except Exception as e:
self.logger.exception("Something went wrong during polling: %s",
e)
finally:
# updating last stuff to current stuff
last_interfaces = cur_interfaces_copy
time.sleep(timeout)
self.logger.debug("polling done")
def get_interfaces(self):
"""Returns all known network interfaces
:return Promise([Interface]): a promise for a list of interfaces
"""
with Promise() as p:
interfaces = []
for iface in netifaces.interfaces():
interfaces.append(self._get_interface(iface))
# check if array has duplicates
# does this even work with namedtuple(s)?
# interfaces = list(set(interfaces))
p.fulfill(interfaces)
return p
def get_interface(self, name):
"""Returns an Interface object identified by name
:param name: name of interface
:return Promise(Interface): a promise for an interface
:raise InterfaceNotFoundException: if interface was not found
"""
with Promise() as p:
if name not in netifaces.interfaces():
p.reject(InterfaceNotFoundException("%s was not found" % name))
else:
p.fulfill(self._get_interface(name))
return p
def get_addresses(self, interface=None):
"""Get addresses of a given interface or all addresses if :interface: is
None
:param interface: name of interface
:return: Promise([Address]): a promise for a list of addresses
"""
with Promise() as p:
p.fulfill(self._get_addresses(interface))
return p
def _get_addresses_from_ifaddresses(self, ifaddresses):
"""Get addresses of a given interface
:param ifaddresses: raw addresses of interface (from netifaces)
:return: list of addresses
"""
addresses = []
for family in ifaddresses:
if family != netifaces.AF_LINK: # no hwaddr
for addr in ifaddresses[family]:
a = addr["addr"]
if family == netifaces.AF_INET6:
a = self._remove_ipv6_special_stuff(a)
addresses.append(
Address(address=a, family=family))
return addresses
def _get_addresses(self, iface=None):
"""Get addresses of a given interface
:param iface: name of interface
:return: list of addresses
"""
if iface is None:
interfaces = netifaces.interfaces()
else:
interfaces = [iface]
addresses = []
for interface in interfaces:
n_addresses = netifaces.ifaddresses(interface)
addresses += self._get_addresses_from_ifaddresses(n_addresses)
# check if array has duplicates
# addresses = list(set(addresses))
return addresses
def _create_interface(self, name, ifaddresses):
"""Create Interface tuple based on given interfaces addresses. (function
independent of netifaces)
:param name:
:param ifaddresses:
:return:
"""
addresses = self._get_addresses_from_ifaddresses(ifaddresses)
try:
hwaddress = ifaddresses[netifaces.AF_LINK][0]["addr"]
except (IndexError, KeyError):
self.logger.debug("No hardware address found for %s!", name)
hwaddress = None
return Interface(name=name,
addresses=addresses,
hwaddress=hwaddress)
def _get_interface(self, name):
"""Returns an Interface object identified by name
:param name: name of interface
:return Interface: interface
:raise UnknownInterface: if interface was not found
"""
if name not in netifaces.interfaces():
raise InterfaceNotFoundException("%s was not found" % name)
else:
ifaddresses = netifaces.ifaddresses(name)
addresses = self._get_addresses_from_ifaddresses(ifaddresses)
try:
hwaddress = ifaddresses[netifaces.AF_LINK][0]["addr"]
except (IndexError, KeyError):
self.logger.debug("No hardware address found for %s!", name)
hwaddress = None
return Interface(name=name,
addresses=addresses,
hwaddress=hwaddress)
def _check_ifaddresses_diff(self, lifaddr, cifaddr, iface):
"""parses last and current interface addresses of a given interface and
fires events for discovered differences
:param lifaddr: dict of family:addresses (last addresses)
:param cifaddr: dict of family:addresses (curr addresses)
:param iface: str name of interface (needed only to create interface for
event firing)
"""
self.logger.debug("checking difference of \r\n%s vs \r\n%s", lifaddr,
cifaddr)
intersection = set(lifaddr.keys()) ^ set(cifaddr.keys())
if len(intersection) > 0:
self.logger.debug(
"Sensing a change in address families of interface %s", iface)
# first check if new address family
self.logger.debug("Iterating through %s", intersection)
for isectkey in intersection:
if isectkey in cifaddr.keys():
for addr in cifaddr.get(isectkey, []):
self.logger.debug("Firing %s event for %s of %s",
"address_created", addr, iface)
a = Address(address=addr["addr"], family=isectkey)
self._api.events.address_created.fire(iface, a)
elif isectkey in lifaddr.keys():
for addr in lifaddr.get(isectkey, []):
self.logger.debug("Firing %s event for %s of %s",
"address_removed", addr, iface)
a = Address(address=addr["addr"], family=isectkey)
self._api.events.address_removed.fire(iface, a)
else:
for key in lifaddr.keys():
# check for removed addresses (contained only in lifaddr)
removed_addr = []
for laddr in lifaddr.get(key):
for caddr in cifaddr.get(key):
d = DictDiffer(caddr, laddr)
if len(d.changed()) == 0:
# this means both addresses are the same -> remove
# from removed_addr list
if laddr in removed_addr:
removed_addr.remove(laddr)
break
else:
# else add address to unknown/removed addresses
if laddr not in removed_addr:
removed_addr.append(laddr)
if len(removed_addr) > 0:
self.logger.debug("removed addresses found: %s",
removed_addr)
for raddr in removed_addr:
self.logger.debug("Firing %s event for %s of %s",
"address_removed", raddr, iface)
a = Address(address=raddr["addr"], family=key)
self._api.events.address_removed.fire(iface, a)
# now check for added addresses (contained only in cifaddr)
added_addr = []
for caddr in cifaddr.get(key):
for laddr in lifaddr.get(key):
d = DictDiffer(caddr, laddr)
if len(d.changed()) == 0:
# this means both addresses are the same -> remove
# from added_addr list
if caddr in added_addr:
added_addr.remove(caddr)
break
else:
# else add address to unknown/added addresses
if caddr not in added_addr:
added_addr.append(caddr)
if len(added_addr) > 0:
self.logger.debug("added addresses found: %s", added_addr)
for aaddr in added_addr:
self.logger.debug("Firing %s event for %s of %s",
"address_created", aaddr, iface)
a = Address(address=aaddr["addr"], family=key)
self._api.events.address_created.fire(iface, a)
@staticmethod
def _remove_ipv6_special_stuff(address):
return address.split("%")[0]
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(
past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if
self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if
self.past_dict[o] == self.current_dict[o])
|
nilq/baby-python
|
python
|
import elasticsearch
import json
import click
from toolz import iterate, curry, take
from csv import DictReader
from elasticsearch.helpers import streaming_bulk
nuforc_report_index_name = 'nuforc'
nuforc_report_index_body = {
"mappings": {
"properties": {
"text": {
"type": "text"
},
"stats": {
"type": "text"
},
"date_time": {
"type": "date",
"format": "date_hour_minute_second",
"ignore_malformed": True
},
"report_link": {
"type": "text"
},
"city": {
"type": "keyword"
},
"state": {
"type": "keyword"
},
"shape": {
"type": "keyword"
},
"duration": {
"type": "text"
},
"summary": {
"type": "text"
},
"posted": {
"type": "date",
"format": "date_hour_minute_second",
"ignore_malformed": True
},
"city_latitude": {
"type": "float"
},
"city_longitude": {
"type": "float"
},
"location": {
"type": "geo_point"
}
}
}
}
def nuforc_bulk_action(doc, doc_id):
""" Binds a document / id to an action for use with the _bulk endpoint.
"""
return {
"_op_type": "index",
"_index": nuforc_report_index_name,
"_id": doc_id,
"_source": {
"location": {
"lat": float(doc["city_latitude"]),
"lon": float(doc["city_longitude"])
} if doc["city_latitude"] and doc["city_longitude"] else None,
**doc
}
}
@click.command()
@click.argument("report_file", type=click.File('r'))
def main(report_file):
""" Creates an Elasticsearch index for the NUFORC reports and loads the
processed CSV file into it.
"""
client = elasticsearch.Elasticsearch()
index_client = elasticsearch.client.IndicesClient(client)
# Drop the index if it exists; it will be replaced. This is the most efficient
# way to delete the data from an index according to ES documentation.
if index_client.exists(nuforc_report_index_name):
index_client.delete(nuforc_report_index_name)
# Create the index with the appropriate mapping.
index_client.create(nuforc_report_index_name, nuforc_report_index_body)
reports = DictReader(report_file)
# Zip the reports with an id generator, embedding them in the actions.
report_actions = map(nuforc_bulk_action, reports, iterate(lambda x: x+1, 0))
# Stream the reports into the ES database.
for ok,resp in elasticsearch.helpers.streaming_bulk(client, report_actions):
if not ok:
print(resp)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import numpy as np
import sys
class RobotData:
"""
Stores sensor data at a particular frame
Attributes
----------
position : tuple
(x,y) tuple of the robot position
rotation : float
angle of robot heading clockwise relative to up (north)
forward_dir : tuple
(x,y) unit vector indicating the forward direction of the robot
right_dir : tuple
(x,y) unit vector indicating the right side direction of the robot
delta_time : float
timestep between current frame and the previous frame in seconds
sensor : int[]
radial distances from the robot from 0 to 360 deg with 1 deg steps
degrees are measured clockwise from the positive vertical axis
ex) sensor_array[90] gives the radial distance to any object located at
the right side of the robot
"""
def __init__(self, json_object):
self.position = np.fromiter(json_object["player_position"].values(), dtype = float)
#self.rotation = json_object["player_heading"]
#self.forward_dir = np.fromiter(json_object["player_forward"].values(), dtype = float)
#self.right_dir = np.fromiter(json_object["player_right"].values(), dtype = float)
self.object_sensor = self.formatObjectSensorData(json_object)
#self.delta_time = json_object["delta_time"]
#print(sys.getsizeof(self.object_sensor))
def position(self):
"""
Return the position of the player in the currently accessed data point.
"""
return self.position
def sensor(self, heading):
"""
Return the distance of any object in the specified angle from the forward direction of the user.
Parameters
----------
heading
n int
Angle representing line of sight measured clockwise from the positive vertical axis.
i.e. Given any rotation of the robot, 0 degrees refers to the positive vertical axis.
"""
# Round heading to nearest multiple of 5
canonical_angle = 5*round(heading/5)
canonical_angle = int(canonical_angle % 360 / 5)
return self.object_sensor[canonical_angle]
def __repr__(self):
return (f"Position: {self.position}\n")
#def formatObjectSensorData(self, json_object):
# detected_objects = json_object["object_sensor_data"]["detected_objects"]
# object_data = []
# for object in detected_objects:
# object_data += [GameObject(object)]
# return object_data
def formatObjectSensorData(self, json_object):
vector2_array = np.array(json_object["object_sensor_data"]["detected_objects"])
res = []
for object_info in vector2_array:
pos = np.fromiter(object_info['position'].values(), dtype=float)
name = object_info['name']
res += [GameObject(pos, name)]
return res
class GameObject():
def __init__(self, position, name):
# object_data_json has format
# {"position":{"x":0, "y":0}, "name": "example"}
# Convert {"x":x, "y":y} to and numpy array (x, y)
self.position = position
self.type = name
def __repr__(self):
return (f"(Position: {self.position}, "
f"Name: {self.type})\n")
|
nilq/baby-python
|
python
|
from makeit.utilities.fastfilter_utilities import Highway_self, pos_ct, true_pos, real_pos, set_keras_backend
from makeit.utilities.fingerprinting import create_rxn_Morgan2FP_separately
from rdkit import Chem
from rdkit.Chem import AllChem, DataStructs
from makeit.interfaces.scorer import Scorer
import numpy as np
import csv
from pymongo import MongoClient
from tqdm import tqdm
from keras.models import load_model
from keras import backend as K
import makeit.global_config as gc
from makeit.utilities.io.logger import MyLogger
import os
fast_filter_loc = 'fast_filter'
class FastFilterScorer(Scorer):
def __init__(self):
self.model = None
def set_keras_backend(self, backend):
if K.backend() != backend:
os.environ['KERAS_BACKEND'] = backend
reload(K)
assert K.backend() == backend
def load(self, model_path):
MyLogger.print_and_log('Starting to load fast filter', fast_filter_loc)
self.model = load_model(model_path, custom_objects={
'Highway_self': Highway_self, 'pos_ct': pos_ct, 'true_pos': true_pos, 'real_pos': real_pos})
self.model._make_predict_function()
MyLogger.print_and_log('Done loading fast filter', fast_filter_loc)
def evaluate(self, reactant_smiles, target, **kwargs):
# Strip chirality
# rmol = Chem.MolFromSmiles(reactant_smiles)
# pmol = Chem.MolFromSmiles(target)
# reactant_smiles = Chem.MolToSmiles(rmol, False)
# target = Chem.MolToSmiles(pmol, False)
[pfp, rfp] = create_rxn_Morgan2FP_separately(
reactant_smiles, target, rxnfpsize=2048, pfpsize=2048, useFeatures=False)
pfp = np.asarray(pfp, dtype='float32')
rfp = np.asarray(rfp, dtype='float32')
rxnfp = pfp - rfp
score = self.model.predict(
[pfp.reshape(1, 2048), rxnfp.reshape(1, 2048)])
outcome = {'smiles': target,
'template_ids': [],
'num_examples': 0
}
all_outcomes = []
all_outcomes.append([{'rank': 1.0,
'outcome': outcome,
'score': float(score[0][0]),
'prob': float(score[0][0]),
}])
return all_outcomes
def filter_with_threshold(self, reactant_smiles, target, threshold):
[pfp, rfp] = create_rxn_Morgan2FP_separately(
reactant_smiles, target, rxnfpsize=2048, pfpsize=2048, useFeatures=False)
pfp = np.asarray(pfp, dtype='float32')
rfp = np.asarray(rfp, dtype='float32')
rxnfp = pfp - rfp
score = self.model.predict([pfp.reshape(1, 2048), rxnfp.reshape(1, 2048)])
filter_flag = (score > threshold)
return filter_flag, float(score)
if __name__ == "__main__":
ff = FastFilterScorer()
ff.load(model_path=gc.FAST_FILTER_MODEL['trained_model_path'])
score = ff.evaluate('OCC(C(C(C=O)O)O)O', 'O=C[C@H](/C=C(/CO)\O)O.O')
print(score)
score = ff.evaluate('OCC(C(C(C=O)O)O)O', 'O[C@@H]1C(=O)[C@H]([C@@H]([C@H]1O)O)O.[H][H]')
print(score)
score = ff.evaluate('OCC(C(C(C=O)O)O)O', 'OC[C@@H](CC(C=O)(O)O)O')
print(score)
score = ff.evaluate('OCC(C(C(C=O)O)O)O', 'OCO[C@@H]([C@H](CO)O)C=O')
print(score)
score = ff.evaluate('OCC(C(C(C=O)O)O)O', 'OC[C@@H]1OC(=O)[C@H]([C@H]1O)O.[H][H]')
print(score)
score = ff.evaluate('OCC(C(C(C=O)O)O)O', 'O=C[C@H](C[C@H](C(O)O)O)O')
print(score)
score = ff.evaluate('OCC(C(C(C=O)O)O)O', 'OCC[C@@H](C(C=O)(O)O)O')
print(score)
score = ff.evaluate('OCC(C(C(C=O)O)O)O', 'O[C@H]1CC(=O)[C@H]([C@@H]1O)O.O')
print(score)
score = ff.evaluate('OCC(C(C(C=O)O)O)O', 'O[C@@H]([C@H](CO)O)OCC=O')
print(score)
score = ff.evaluate('OCC(C(C(C=O)O)O)O', 'O=CO[C@H]([C@@H](CO)O)CO')
print(score)
"""
flag, sco = ff.filter_with_threshold('CCO.CC(=O)O', 'CCOC(=O)C', 0.75)
print(flag)
print(sco)
"""
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#from distutils.core import setup
import glob
from setuptools import setup
readme = open('README.md').read()
setup(
name='ReMoTE',
version='0.1',
description='Registration of Mobyle Tools in Elixir',
long_description=readme,
author='Hervé Ménager',
author_email='hmenager@pasteur.fr',
url='https://github.com/bioinfo-center-pasteur-fr/ReMoTE.git',
packages=['remote'],
install_requires=[
'lxml','requests'
],
license="BSD",
entry_points={
'console_scripts': ['remote=remote:main'],
},
include_package_data=True,
zip_safe=False
)
|
nilq/baby-python
|
python
|
class StateMachine:
"""A simple state machine"""
def __init__(self):
self.__states = {} #:dict[string] -> [(check, event, next)]
self.actions = {} #:dict[string] -> action
self.currentState = "start" #:string
self.addState("start")
def addState(self, name):
"""register a state name"""
if name not in self.__states:
self.__states[name] = []
def addTransition(self, fromState, toState, condition, event):
transition = (condition, event, toState)
self.__states[fromState].append(transition)
def update(self):
"""Update the state machine"""
transitions = self.__states[self.currentState]
for (check, event, nextState) in transitions:
if check():
self.currentState = nextState
print "sm new state: ", nextState
event()
action = self.actions.get(self.currentState)
if action is not None:
action()
|
nilq/baby-python
|
python
|
import numpy as np
def meshTensor(value):
"""**meshTensor** takes a list of numbers and tuples
that have the form::
mT = [ float, (cellSize, numCell), (cellSize, numCell, factor) ]
For example, a time domain mesh code needs
many time steps at one time::
[(1e-5, 30), (1e-4, 30), 1e-3]
Means take 30 steps at 1e-5 and then 30 more at 1e-4,
and then one step of 1e-3.
Tensor meshes can also be created by increase factors::
[(10.0, 5, -1.3), (10.0, 50), (10.0, 5, 1.3)]
When there is a third number in the tuple, it
refers to the increase factor, if this number
is negative this section of the tensor is flipped right-to-left.
"""
if type(value) is not list:
raise Exception('meshTensor must be a list of scalars and tuples.')
proposed = []
for v in value:
if np.isscalar(v):
proposed += [float(v)]
elif type(v) is tuple and len(v) == 2:
proposed += [float(v[0])]*int(v[1])
elif type(v) is tuple and len(v) == 3:
start = float(v[0])
num = int(v[1])
factor = float(v[2])
pad = ((np.ones(num)*np.abs(factor))**(np.arange(num)+1))*start
if factor < 0: pad = pad[::-1]
proposed += pad.tolist()
else:
raise Exception('meshTensor must contain only scalars and len(2) or len(3) tuples.')
return np.array(proposed)
|
nilq/baby-python
|
python
|
import numpy
C_3 = numpy.array([1, 2]) / 3
a_3 = numpy.array([[3, -1], [1, 1]]) / 2
sigma_3 = numpy.array([[[1, 0], [-2, 1]], [[1, 0], [-2, 1]]])
C_5 = numpy.array([1, 6, 3]) / 10
a_5 = numpy.array([[11, -7, 2], [2, 5, -1], [-1, 5, 2]]) / 6
sigma_5 = numpy.array([[[40, 0, 0],
[-124, 100, 0],
[44, -76, 16] ],
[[16, 0, 0],
[-52, 52, 0],
[20, -52, 16] ],
[[16, 0, 0],
[-76, 44, 0],
[100, -124, 40] ] ]) / 12
C_all = { 2 : C_3,
3 : C_5 }
a_all = { 2 : a_3,
3 : a_5 }
sigma_all = { 2 : sigma_3,
3 : sigma_5 }
def weno3_upwind(q):
order = 2
epsilon = 1e-16
alpha = numpy.zeros(order)
beta = numpy.zeros(order)
q_stencils = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l):
beta[k] += sigma_3[k, l, m] * q[1 + k - l] * q[1 + k - m]
alpha[k] = C_3[k] / (epsilon + beta[k]**2)
for l in range(order):
q_stencils[k] += a_3[k, l] * q[1 + k - l]
w = alpha / numpy.sum(alpha)
return numpy.dot(w, q_stencils)
def weno3(q, simulation):
Nvars, Npoints = q.shape
q_minus = numpy.zeros_like(q)
q_plus = numpy.zeros_like(q)
for i in range(2, Npoints-2):
for Nv in range(Nvars):
q_plus [Nv, i] = weno3_upwind(q[Nv, i-1:i+2])
q_minus[Nv, i] = weno3_upwind(q[Nv, i+1:i-2:-1])
return q_minus, q_plus
def weno5_upwind(q):
order = 3
epsilon = 1e-16
alpha = numpy.zeros(order)
beta = numpy.zeros(order)
q_stencils = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l):
beta[k] += sigma_5[k, l, m] * q[2 + k - l] * q[2 + k - m]
alpha[k] = C_5[k] / (epsilon + beta[k]**2)
for l in range(order):
q_stencils[k] += a_5[k, l] * q[2 + k - l]
w = alpha / numpy.sum(alpha)
return numpy.dot(w, q_stencils)
def weno5(q, simulation):
Nvars, Npoints = q.shape
q_minus = numpy.zeros_like(q)
q_plus = numpy.zeros_like(q)
for i in range(3, Npoints-3):
for Nv in range(Nvars):
q_plus [Nv, i] = weno5_upwind(q[Nv, i-2:i+3])
q_minus[Nv, i] = weno5_upwind(q[Nv, i+2:i-3:-1])
return q_minus, q_plus
def weno_upwind(q, order):
a = a_all[order]
C = C_all[order]
sigma = sigma_all[order]
epsilon = 1e-16
alpha = numpy.zeros(order)
beta = numpy.zeros(order)
q_stencils = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l):
beta[k] += sigma[k, l, m] * q[order-1+k-l] * q[order-1+k-m]
alpha[k] = C[k] / (epsilon + beta[k]**2)
for l in range(order):
q_stencils[k] += a[k, l] * q[order-1+k-l]
w = alpha / numpy.sum(alpha)
return numpy.dot(w, q_stencils)
def weno(q, simulation, order):
Nvars, Npoints = q.shape
q_minus = numpy.zeros_like(q)
q_plus = numpy.zeros_like(q)
for i in range(order, Npoints-order):
for Nv in range(Nvars):
q_plus [Nv, i] = weno_upwind(q[Nv, i+1-order:i+order], order)
q_minus[Nv, i] = weno_upwind(q[Nv, i+order-1:i-order:-1], order)
return q_minus, q_plus
|
nilq/baby-python
|
python
|
from bs4 import BeautifulSoup
import urllib, requests, re
import json
import datetime
from datetime import date
def cnt1():
year=datetime.datetime.now().year
month=datetime.datetime.now().month
month=str(month)
day=datetime.datetime.now().day
if len(str(month)) == 1 :
month="0"+str(month)
fanta=str(year)+str(month)+str(day)
url="http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19SidoInfStateJson?serviceKey=JGMlPMEcTuNV8sbu5JRfjhwjPXMdCv1OJ1qQefm0vVuKWGKtGHAcJEWtm63GOVyMQYAcI%2BoXUBe0nsJ4w3RiZw%3D%3D&pageNo=1&numOfRows=10&startCreateDt="+fanta+"&endCreateDt="+fanta #call back url
test_url="http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19SidoInfStateJson?serviceKey=JGMlPMEcTuNV8sbu5JRfjhwjPXMdCv1OJ1qQefm0vVuKWGKtGHAcJEWtm63GOVyMQYAcI%2BoXUBe0nsJ4w3RiZw%3D%3D&pageNo=1&numOfRows=10&startCreateDt=20200821&endCreateDt=20200821"
print(url)
cola=requests.get(url).text
sida=BeautifulSoup(cola, "html.parser")
items=sida.find("items")
result = [0, 0, 0]
for item in items :
try:
hapgae=item.find("gubun").string
except:
continue
if hapgae == "합계" :
try:
incdec=item.find("incdec").string
result[0]=incdec
except:
pass
if hapgae == "부산" :
try:
incdec=item.find("incdec").string
result[1]=incdec
except:
pass
if hapgae == "합계" :
try:
deathcnt=item.find("deathcnt").string
result[2]=deathcnt
except:
pass
return result
def cnt2():
year=datetime.datetime.now().year
month=datetime.datetime.now().month
month=str(month)
day=datetime.datetime.now().day
if len(str(month)) == 1 :
month="0"+str(month)
fanta=str(year)+str(month)+str(day)
if (day == 1):
fanta1 = str(year)+str(month)+"28"
else:
fanta1 = str(year)+str(month)+str(int(day)-1)
url="http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19InfStateJson?serviceKey=0XeO7nbthbiRoMUkYGGah20%2BfXizwc0A6BfjrkL6qhh2%2Fsl8j9PzfSLGKnqR%2F1v%2F%2B6AunxntpLfoB3Ryd3OInQ%3D%3D&pageNo=1&numOfRows=10&startCreateDt="+fanta1+"&endCreateDt="+fanta #call back url
test_url="http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19InfStateJson?serviceKey=0XeO7nbthbiRoMUkYGGah20%2BfXizwc0A6BfjrkL6qhh2%2Fsl8j9PzfSLGKnqR%2F1v%2F%2B6AunxntpLfoB3Ryd3OInQ%3D%3D&pageNo=1&numOfRows=10&startCreateDt=20200821&endCreateDt=20200822"
cola=requests.get(url).text
sida=BeautifulSoup(cola, "html.parser")
items=sida.find("items")
result = [0, 0, 0]
for item in items :
try:
decidecnt=item.find("decidecnt").string
result[0]=decidecnt
except:
pass
try:
clearcnt=item.find("clearcnt").string
result[1]=clearcnt
except:
pass
try:
deathcnt=item.find("deathcnt").string
result[2]=deathcnt
except:
pass
return result
if __name__ == "__main__":
print(cnt1())
print(cnt2())
|
nilq/baby-python
|
python
|
"""Time
Calculate the time of a code to run.
Code example: product of the first 100.000 numbers.
"""
import time
def product():
p = 1
for i in range(1, 100000):
p = p * i
return p
start = time.time()
prod = product()
end = time.time()
print('The result is %s digits long.' % len(str(prod)))
print('Took %s seconds to calculate.' % (end - start))
# The result is 456569 digits long.
# Took 3.54418683052063 seconds to calculate.
|
nilq/baby-python
|
python
|
from roboclaw import Roboclaw
from time import sleep
rc = Roboclaw("/dev/ttyACM0",115200)
rc.Open()
address=0x80
#rc.ForwardM1(address, 50)
# sleep (5)
rc.ForwardM1(address, 0)
|
nilq/baby-python
|
python
|
from pathlib import Path
from typing import Optional
import config # type: ignore
import hvac
from aiohttp_micro import AppConfig as BaseConfig # type: ignore
from config.abc import Field
from passport.client import PassportConfig
class StorageConfig(config.PostgresConfig):
host = config.StrField(default="localhost", env="POSTGRES_HOST")
port = config.IntField(default=5432, env="POSTGRES_PORT")
user = config.StrField(default="postgres", vault_path="micro/wallet/postgres:user", env="POSTGRES_USER")
password = config.StrField(default="postgres", vault_path="micro/wallet/postgres:password", env="POSTGRES_PASSWORD")
database = config.StrField(default="postgres", env="POSTGRES_DATABASE")
min_pool_size = config.IntField(default=1, env="POSTGRES_MIN_POOL_SIZE")
max_pool_size = config.IntField(default=2, env="POSTGRES_MAX_POOL_SIZE")
@property
def uri(self) -> str:
return "postgresql://{user}:{password}@{host}:{port}/{database}".format(
user=self.user, password=self.password, host=self.host, port=self.port, database=self.database,
)
class AppConfig(BaseConfig):
db = config.NestedField[StorageConfig](StorageConfig)
passport = config.NestedField[PassportConfig](PassportConfig)
sentry_dsn = config.StrField(vault_path="micro/wallet/sentry:dsn", env="SENTRY_DSN")
class VaultConfig(config.Config):
enabled = config.BoolField(default=False, env="VAULT_ENABLED")
host = config.StrField(env="VAULT_HOST")
auth_method = config.StrField(default="approle", env="VAULT_AUTH_METHOD")
service_name = config.StrField(default=None, env="VAULT_SERVICE_NAME")
role_id = config.StrField(default=None, env="VAULT_ROLE_ID")
secret_id = config.StrField(default=None, env="VAULT_SECRET_ID")
class VaultProvider(config.ValueProvider):
def __init__(self, config: VaultConfig, mount_point: str) -> None:
self.client = hvac.Client(url=config.host)
self.mount_point = mount_point
if config.auth_method == "approle":
self.client.auth.approle.login(role_id=config.role_id, secret_id=config.secret_id)
elif config.auth_method == "kubernetes":
path = Path("/var/run/secrets/kubernetes.io/serviceaccount/token")
with path.open("r") as fp:
token = fp.read()
self.client.auth.kubernetes.login(role=config.service_name, jwt=token)
def load(self, field: Field) -> Optional[str]:
value = None
if field.vault_path:
path, key = field.vault_path, None
if ":" in field.vault_path:
path, key = field.vault_path.split(":")
secret_response = self.client.secrets.kv.v2.read_secret_version(path=path, mount_point=self.mount_point)
if key:
value = secret_response["data"]["data"][key]
return value
|
nilq/baby-python
|
python
|
import uuid
from operator import attrgetter
from typing import List
from confluent_kafka import Consumer, TopicPartition
from confluent_kafka.admin import AdminClient, TopicMetadata
from kaskade.config import Config
from kaskade.kafka import TIMEOUT
from kaskade.kafka.group_service import GroupService
from kaskade.kafka.mappers import metadata_to_partition, metadata_to_topic
from kaskade.kafka.models import Topic
class TopicService:
def __init__(self, config: Config) -> None:
if config is None or config.kafka is None:
raise Exception("Config not found")
self.config = config
def list(self) -> List[Topic]:
config = self.config.kafka.copy()
config["group.id"] = str(uuid.uuid4())
consumer = Consumer(config)
admin_client = AdminClient(self.config.kafka)
groups_service = GroupService(self.config)
raw_topics: List[TopicMetadata] = list(
admin_client.list_topics(timeout=TIMEOUT).topics.values()
)
topics = []
for raw_topic in raw_topics:
topic = metadata_to_topic(raw_topic)
topics.append(topic)
topic.groups = groups_service.find_by_topic_name(topic.name)
topic.partitions = []
for raw_partition in raw_topic.partitions.values():
partition = metadata_to_partition(raw_partition)
topic.partitions.append(partition)
low, high = consumer.get_watermark_offsets(
TopicPartition(topic.name, raw_partition.id),
timeout=TIMEOUT,
cached=False,
)
partition.low = low
partition.high = high
return sorted(topics, key=attrgetter("name"))
if __name__ == "__main__":
config = Config("../../kaskade.yml")
topic_service = TopicService(config)
topic_list = topic_service.list()
print(topic_list)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from __future__ import print_function, division
import os
import sys
sys.path.append(os.path.dirname(sys.path[0]))
try:
import cPickle as pickle # python 2
except ImportError:
import pickle # python 3
import socket
import argparse
from random import randint
from numpy import unravel_index, log, maximum
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.widgets import Slider
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from cryoio import mrc
from geometry import gen_dense_beamstop_mask
from notimplemented import correlation
def plot_projs(mrcs_files, log_scale=True, plot_randomly=True):
for mrcs in mrcs_files:
image_stack = mrc.readMRCimgs(mrcs, 0)
size = image_stack.shape
N = size[0]
mask = gen_dense_beamstop_mask(N, 2, 0.003, psize=9)
print('image size: {0}x{1}, number of images: {2}'.format(*size))
print('Select indices randomly:', plot_randomly)
fig, axes = plt.subplots(3, 3, figsize=(12.9, 9.6))
for i, ax in enumerate(axes.flat):
row, col = unravel_index(i, (3, 3))
if plot_randomly:
num = randint(0, size[2])
else:
num = i
print('index:', num)
if log_scale:
img = log(maximum(image_stack[:, :, num], 1e-6)) * mask
else:
img = image_stack[:, :, num] * mask
im = ax.imshow(img, origin='lower') # cmap='Greys'
ticks = [0, int(N/4.0), int(N/2.0), int(N*3.0/4.0), int(N-1)]
if row == 2:
ax.set_xticks([])
else:
ax.set_xticks(ticks)
if col == 0:
ax.set_yticks([])
else:
ax.set_yticks(ticks)
fig.subplots_adjust(right=0.8)
cbarar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbarar_ax)
fig.suptitle('{} before normalization'.format(mrcs))
# fig.tight_layout()
plt.show()
def plot_projs_with_slider(mrcs_files, log_scale=True, show_ac_image=False):
for mrcs in mrcs_files:
image_stack = mrc.readMRCimgs(mrcs, 0)
size = image_stack.shape
N = size[0]
mask = gen_dense_beamstop_mask(N, 2, 0.003, psize=9)
print('image size: {0}x{1}, number of images: {2}'.format(*size))
# plot projections
fig = plt.figure(figsize=(8, 8))
gs = GridSpec(2, 2, width_ratios=[1, 0.075], height_ratios=[1, 0.075], )
# original
ax = fig.add_subplot(gs[0, 0])
curr_img = image_stack[:, :, 0] * mask
if show_ac_image:
curr_ac_img = correlation.calc_full_ac(curr_img, 0.95) * mask
curr_img = curr_ac_img
if log_scale:
curr_img = log(curr_img)
im = ax.imshow(curr_img, origin='lower')
ticks = [0, int(N/4.0), int(N/2.0), int(N/4.0*3), int(N-1)]
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_title('Slice Viewer (log scale: {}) for {}'.format(log_scale, os.path.basename(mrcs)))
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("right", size="7%", pad="2%")
cbar = fig.colorbar(im, cax=cax) # colorbar
# slider
ax_slider = fig.add_subplot(gs[1, 0])
idx_slider = Slider(ax_slider, 'index:', 0, size[2]-1, valinit=0, valfmt='%d')
def update(val):
idx = int(idx_slider.val)
curr_img = image_stack[:, :, idx] * mask
if show_ac_image:
curr_ac_img = correlation.calc_full_ac(curr_img, 0.95) * mask
curr_img = curr_ac_img
if log_scale:
curr_img = log(curr_img)
im.set_data(curr_img)
cbar.set_clim(vmin=curr_img.min(), vmax=curr_img.max())
cbar.draw_all()
fig.canvas.draw_idle()
idx_slider.on_changed(update)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("mrcs_files", help="list of mrcs files.", nargs='+')
parser.add_argument("-l", "--log_scale", help="show image in log scale.",
action="store_true")
parser.add_argument("-r", "--plot_randomly", help="plot image with random index.",
action="store_true")
parser.add_argument("-a", "--show_ac_image", help="plot image with angular correlation.",
action="store_true")
args = parser.parse_args()
log_scale = args.log_scale
mrcs_files = args.mrcs_files
plot_randomly = args.plot_randomly
show_ac_image = args.show_ac_image
print('mrcs_files:', mrcs_files)
print('log_scale:', log_scale)
print('plot_randomly:', plot_randomly)
print('show_ac_image', show_ac_image)
if plot_randomly:
plot_projs(mrcs_files, log_scale=log_scale)
else:
plot_projs_with_slider(
mrcs_files, log_scale=log_scale, show_ac_image=show_ac_image)
|
nilq/baby-python
|
python
|
from highton.models import Party
from highton.highton_constants import HightonConstants
class AssociatedParty(
Party,
):
"""
:ivar id: fields.IntegerField(name=HightonConstants.ID)
:ivar author_id: fields.IntegerField(name=HightonConstants.AUTHOR_ID)
:ivar background: fields.StringField(name=HightonConstants.BACKGROUND)
:ivar company_id: fields.IntegerField(name=HightonConstants.COMPANY_ID)
:ivar created_at: fields.DatetimeField(name=HightonConstants.CREATED_AT)
:ivar first_name: fields.StringField(name=HightonConstants.FIRST_NAME)
:ivar name: fields.StringField(name=HightonConstants.NAME)
:ivar group_id: fields.IntegerField(name=HightonConstants.GROUP_ID)
:ivar last_name: fields.StringField(name=HightonConstants.LAST_NAME)
:ivar owner_id: fields.IntegerField(name=HightonConstants.OWNER_ID)
:ivar title: fields.StringField(name=HightonConstants.TITLE)
:ivar updated_at: fields.DatetimeField(name=HightonConstants.UPDATED_AT)
:ivar visible_to: fields.StringField(name=HightonConstants.VISIBLE_TO)
:ivar company_name: fields.StringField(name=HightonConstants.COMPANY_NAME)
:ivar linkedin_url: fields.StringField(name=HightonConstants.LINKEDIN_URL)
:ivar avatar_url: fields.StringField(name=HightonConstants.AVATAR_URL)
:ivar type: fields.StringField(name=HightonConstants.TYPE)
:ivar tags: fields.ListField(name=HightonConstants.TAGS, init_class=Tag)
:ivar contact_data: fields.ObjectField(name=HightonConstants.CONTACT_DATA, init_class=ContactData)
:ivar subject_datas: fields.ListField(name=HightonConstants.SUBJECT_DATAS, init_class=SubjectData)
"""
TAG_NAME = HightonConstants.ASSOCIATED_PARTY
|
nilq/baby-python
|
python
|
import unittest
from column import *
class ColumnTest(unittest.TestCase):
def test_polar2coord(self):
# test55
self.assertEqual(polar2coord( (46, 2.808) ), (1.9506007042488642, -2.019906159350932) )
self.assertEqual(polar2coord( (196, 1.194) ), (-1.1477464649503528, 0.32911100284549705) )
self.assertEqual(polar2coord( (89, 1.762) ), (0.030751140142492875, -1.7617316388655615) )
self.assertEqual(polar2coord( (269, 1.21) ), (-0.021117411789113007, 1.2098157111392334) )
def test_col_dist(self):
self.assertAlmostEqual(col_dist( (89, 1.762), (269, 1.21) ), 2.972)
self.assertAlmostEqual(col_dist( (46, 2.808), (196, 1.194) ), 3.888, 3) # hmmm, probably bug??
def test_analyse_pose(self):
p = analyse_pose(prev_pose=None, new_data=[(89, 1.762), (269, 1.21)])
self.assertEqual(p[0], (2.6194324487249787e-16, 0.2760000000000001, -0.017453292519943098))
# select best matching pair
p = analyse_pose(None, [(46, 2.8080000000000003), (169, 5.9500000000000002), (196, 1.194)])
self.assertEqual(p[0], (0.43115108250453843, 0.8306320134455586, -0.922098513045361))
if __name__ == "__main__":
unittest.main()
# vim: expandtab sw=4 ts=4
|
nilq/baby-python
|
python
|
from helpers import *
import shutil
league_info_file = 'league_info.json'
ros_URL = 'https://5ahmbwl5qg.execute-api.us-east-1.amazonaws.com/dev/rankings'
def_expert = 'subvertadown'
kick_expert = 'subvertadown'
weekly_method = 'borischen'
yaml_config_temp = '_config_template.yml'
output_file = 'summary.txt'
yaml_config = '_config.yml'
league_info = parse_league_info(league_info_file)
# ros_ranks,ros_ranked_dudes = parse_ros_ranks(ros_file)
ros_ranked_dudes,ros_ranks = get_ros_stuff(ros_URL)
# def_rank = parse_simple_file(def_file)
# kick_rank = parse_simple_file(kick_file)
def_rank = get_reddit_expert_rank(def_expert,'DEF')
kick_rank = get_reddit_expert_rank(kick_expert,'K')
summary = []
shutil.copy(yaml_config_temp,yaml_config)
for l in league_info:
my_dudes,my_pos,rostered_dudes,my_def,rostered_def,my_kick,rostered_kick,starters = get_dudes(l)
my_ros_dudes,my_ros_ranks,unowned_ros_dudes,unowned_ros_ranks = get_ranks(my_dudes, rostered_dudes, ros_ranked_dudes, ros_ranks)
stream_def_advice = get_stream(my_def,rostered_def,def_rank)
stream_kick_advice = get_stream(my_kick,rostered_kick,kick_rank)
weekly_team,weekly_tiers,potential_stream_names,potential_stream_pos,potential_stream_tiers = get_weekly(my_dudes,rostered_dudes,weekly_method)
txt = get_summary_text(l,my_ros_dudes,my_ros_ranks,unowned_ros_dudes,unowned_ros_ranks,stream_def_advice,stream_kick_advice,weekly_team,weekly_tiers,potential_stream_names,potential_stream_pos,potential_stream_tiers,starters)
summary.extend(txt)
md_file = write_league_md(l,my_ros_dudes,my_ros_ranks,unowned_ros_dudes,unowned_ros_ranks,stream_def_advice,stream_kick_advice,weekly_team,weekly_tiers,potential_stream_names,potential_stream_pos,potential_stream_tiers,starters)
with open(yaml_config, 'a') as f:
f.writelines([' - title: ' + l['nickname'] + '\n',' url: ' + md_file + '\n'])
with open(output_file,'w') as f:
f.writelines(summary)
print('Done')
|
nilq/baby-python
|
python
|
import re
from typing import List
import pytest
from ja_timex.pattern.place import Pattern
from ja_timex.tag import TIMEX
from ja_timex.tagger import BaseTagger
from ja_timex.timex import TimexParser
@pytest.fixture(scope="module")
def p():
# Custom Taggerで必要となる要素と、TimexParserの指定
def parse_kouki(re_match: re.Match, pattern: Pattern) -> TIMEX:
args = re_match.groupdict()
span = re_match.span()
year = int(args["calendar_year"]) - 660
return TIMEX(
type="DATE",
value=f"{year}-XX-XX",
text=re_match.group(),
mod=pattern.option.get("mod"),
parsed=args,
span=span,
pattern=pattern,
)
custom_pattern = [
Pattern(
re_pattern="皇紀(?P<calendar_year>[0-9]{1,4})年",
parse_func=parse_kouki,
option={},
)
]
class CustomTagger(BaseTagger):
def __init__(self, patterns: List[Pattern] = custom_pattern) -> None:
self.patterns = patterns
return TimexParser(custom_tagger=CustomTagger())
def test_custom_tagger_kouki(p):
# Custom Taggerあり
timexes = p.parse("西暦2021年は皇紀2681年です")
assert len(timexes) == 2
assert timexes[0].value == "2021-XX-XX"
assert timexes[0].text == "西暦2021年"
assert timexes[1].value == "2021-XX-XX"
assert timexes[1].text == "皇紀2681年"
assert timexes[1].parsed == {"calendar_year": "2681"}
def test_without_custom_tagger():
# Custom Taggerなし
p = TimexParser()
timexes = p.parse("西暦2021年は皇紀2681年です")
assert len(timexes) == 2
assert timexes[0].value == "2021-XX-XX"
assert timexes[0].text == "西暦2021年"
# そのまま2681年と解釈される
assert timexes[1].value == "2681-XX-XX"
assert timexes[1].text == "2681年"
assert timexes[1].parsed == {"calendar_day": "XX", "calendar_month": "XX", "calendar_year": "2681"}
|
nilq/baby-python
|
python
|
from patternpieces import PatternPieces
from piece import Piece
from piecesbank import PiecesBank
from ui import UI
from board import Board
from arbiter import Arbiter
from ai import Ai
import json
import random
def main():
pb = PiecesBank()
app = UI()
### DO NOT FUCKING REMOVE THIS. I DARE YOU. ###
app.preloadPieces(pb.pieceslist)
ai = Ai()
arbiter = Arbiter()
board = Board()
app.setBatchMethod(lambda loop, fitness, mutation: ai.main_function(pb, app, arbiter, board, loop, fitness, mutation))
# app.drawTable(board)
app.drawTable(generatedSolvedPuzzle(pb))
### DO NOT FUCKING REMOVE THIS EITHER. ###
app.mainloop()
def generatedSolvedPuzzle(pb):
ret = Board()
for y in range(16):
for x in range(16):
ret[x, y] = pb.pieceslist[x + y * 16]
if (y != 0):
if (y % 2 == 0):
ret[x, y].upEdge = PatternPieces.YELLOWFLOWERINBLUE
else:
ret[x, y].upEdge = PatternPieces.BLUESTARINYELLOW
else:
ret[x, y].upEdge = PatternPieces.EDGE
if (x != 15):
if (x % 2 == 0):
ret[x, y].rightEdge = PatternPieces.BLUEGEARINPINK
else:
ret[x, y].rightEdge = PatternPieces.YELLOWSTARINPURPLE
else:
ret[x, y].rightEdge = PatternPieces.EDGE
if (y != 15):
if (y % 2 == 1):
ret[x, y].downEdge = PatternPieces.YELLOWFLOWERINBLUE
else:
ret[x, y].downEdge = PatternPieces.BLUESTARINYELLOW
else:
ret[x, y].downEdge = PatternPieces.EDGE
if (x != 0):
if (x % 2 == 1):
ret[x, y].leftEdge = PatternPieces.BLUEGEARINPINK
else:
ret[x, y].leftEdge = PatternPieces.YELLOWSTARINPURPLE
else:
ret[x, y].leftEdge = PatternPieces.EDGE
return ret
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import re
import wrapt
from .bash import CommandBlock
class ConfigurationError(Exception):
pass
def add_comment(action):
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
def _execute(*args, **kwargs):
return CommandBlock() + '' + 'echo "{} {}"'.format(action, instance.description) + wrapped(*args, **kwargs)
return _execute(*args, **kwargs)
return wrapper
class Entity:
__shortname = 'x'
def __init__(self):
self.links = []
def add_to(self, master):
master.entities.append(self)
return self # chaining ;)
@property
def endpoints(self):
endpoints = []
for l in self.links:
if l.e1.entity == self:
endpoints.append(l.e1)
if l.e2.entity == self:
endpoints.append(l.e2)
return endpoints
@add_comment('creating')
def create(self):
self.check_configuration()
return CommandBlock()
@add_comment('configuring')
def configure(self):
return CommandBlock()
@add_comment('destroying')
def destroy(self):
return CommandBlock()
def check_configuration(self):
if self.name is None:
raise ConfigurationError("name is missing")
@property
def entity_type_name(self):
hierarchy = []
ccls = self.__class__
while ccls is not object:
try:
hierarchy.append(getattr(ccls, '_' + ccls.__name__ + '__shortname'))
except AttributeError:
pass
ccls = ccls.__bases__[0]
return '-'.join(reversed(hierarchy))
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
try:
self.name_id = re.search(r'(\d+)$', self.__name).group()
except:
self.name_id = None
@property
def description(self):
return self.name
def __str__(self):
return self.description
def __repr__(self):
return self.__str__()
class Netns(Entity):
__shortname = 'ns'
def __init__(self, name=None):
super().__init__()
self.name = name
self.routes = []
self.configure_commands = []
def add_route(self, destination, endpoint):
self.routes.append((destination, endpoint))
def add_configure_command(self, command, inside_ns=True):
if inside_ns:
self.configure_commands.append("ip netns exec {self.name} " + command)
else:
self.configure_commands.append(command)
def create(self):
return super().create() + "ip netns add {self.name}".format(self=self)
def configure(self):
cmds = CommandBlock()
for r in self.routes:
cmds += "ip netns exec {self.name} ip route add " + r[0] + " via " + r[1].ip_address + " proto static"
for c in self.configure_commands:
cmds += c
return super().configure() + cmds.format(self=self)
def destroy(self):
return super().destroy() + "ip netns delete {self.name}".format(self=self)
class DockerContainer(Entity):
pass
class OVS(Entity):
__shortname = 'ovs'
def __init__(self, name=None):
super().__init__()
self.name = name
def create(self):
return super().create() + "ovs-vsctl add-br {self.name}".format(self=self)
def configure(self):
return None
def destroy(self):
return super().destroy() + "ovs-vsctl del-br {self.name}".format(self=self)
class Endpoint:
@classmethod
def get(cls, arg):
if isinstance(arg, cls):
return arg
if isinstance(arg, Entity):
return cls(arg)
if isinstance(arg, tuple):
return cls(*arg)
def __init__(self, entity, ip_address=None, name=None):
self.entity = entity
self.name = name
self.ip_address = None
self.ip_size = None
if ip_address is not None:
if '/' in ip_address:
parts = ip_address.split('/')
self.ip_address = parts[0]
self.ip_size = int(parts[1])
else:
self.ip_address = ip_address
self.ip_size = 24
def __str__(self):
return '{self.name} ({self.ip_address}/{self.ip_size})'.format(self=self)
def __repr__(self):
return self.__str__()
def disable_offloading(self):
return 'ethtool -K {self.name} tx off gso off sg off gro off'.format(self=self)
class Link:
@staticmethod
def declare(e1, e2, link_type=None, **kwargs):
e1 = Endpoint.get(e1)
e2 = Endpoint.get(e2)
if type(e1.entity) is OVS and type(e2.entity) is OVS:
if link_type is None:
link_type = 'patch'
if link_type == 'veth':
return Link_OVS_OVS_veth(e1, e2, **kwargs)
elif link_type == 'patch':
return Link_OVS_OVS_patch(e1, e2, **kwargs)
else:
raise ConfigurationError('unrecognized type: {}'.format(link_type))
if (type(e1.entity) is OVS and type(e2.entity) is Netns) or (type(e1.entity) is Netns and type(e2.entity) is OVS):
# make sure e1 is the OVS
if type(e1.entity) is Netns and type(e2.entity) is OVS:
e2, e1 = e1, e2
if link_type is None:
link_type = 'port'
if link_type == 'veth':
return Link_OVS_Netns_veth(e1, e2, **kwargs)
elif link_type == 'port':
return Link_OVS_Netns_port(e1, e2, **kwargs)
else:
raise ConfigurationError('unrecognized type: {}'.format(link_type))
if type(e1.entity) is Netns and type(e2.entity) is Netns:
if link_type is not None and link_type != 'veth':
raise ConfigurationError('unrecognized type: {}'.format(link_type))
return Link_Netns_Netns_veth(e1, e2, **kwargs)
def __init__(self, e1, e2, disable_offloading=False, **kwargs):
self.e1 = e1
self.e2 = e2
self.disable_offloading = disable_offloading
e1.entity.links.append(self)
e2.entity.links.append(self)
@add_comment('creating')
def create(self):
return CommandBlock()
@add_comment('destroying')
def destroy(self):
return CommandBlock()
@property
def description(self):
return "link between {self.e1.entity.name} and {self.e2.entity.name} of type {self.__class__.__name__} ({self.e1.name} to {self.e2.name})".format(self=self)
def __str__(self):
return self.description
def __repr__(self):
return self.__str__()
# ensure no double links are configured (they'll be skipped by Master)
# links will be skipped EVEN IF they're of DIFFERENT TYPES
# but they are NOT skipped if they have different ip addresses
def __key(self):
return tuple(sorted([hash(self.e1), hash(self.e2)]))
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
return self.__key() == other.__key()
def __ne__(self, other):
return not self.__eq__(other)
class Link_OVS_OVS_veth(Link):
def __init__(self, e1, e2, **kwargs):
super().__init__(e1, e2, **kwargs)
def assign_attributes(self):
# veth names are limited to 15 chars(!)
if self.e1.name is None:
self.e1.name = 'veth-ovs-{e1.entity.name_id}-{e2.entity.name_id}'.format(**self.__dict__)
if self.e2.name is None:
self.e2.name = 'veth-ovs-{e2.entity.name_id}-{e1.entity.name_id}'.format(**self.__dict__)
def create(self):
self.assign_attributes()
cmds = CommandBlock()
# create the links
cmds += "ip link add {e1.name} type veth peer name {e2.name}"
# configure one side
cmds += "ovs-vsctl add-port {e1.entity.name} {e1.name}"
cmds += "ip link set {e1.name} up"
if self.disable_offloading:
cmds += self.e1.disable_offloading()
# configure the other side
cmds += "ovs-vsctl add-port {e2.entity.name} {e2.name}"
cmds += "ip link set {e2.name} up"
if self.disable_offloading:
cmds += self.e2.disable_offloading()
return super().create() + cmds.format(**self.__dict__)
def destroy(self):
self.assign_attributes()
return super().destroy() + "ip link delete {e1.name}".format(**self.__dict__)
class Link_OVS_OVS_patch(Link):
def __init__(self, e1, e2, **kwargs):
super().__init__(e1, e2, **kwargs)
def assign_attributes(self):
if self.e1.name is None:
self.e1.name = 'patch-{e2.entity.name}-{e1.entity.name_id}'.format(**self.__dict__)
if self.e2.name is None:
self.e2.name = 'patch-{e1.entity.name}-{e2.entity.name_id}'.format(**self.__dict__)
def create(self):
self.assign_attributes()
cmds = CommandBlock()
cmds += "ovs-vsctl add-port {e1.entity.name} {e1.name} -- set Interface {e1.name} type=patch options:peer={e2.name}"
cmds += "ovs-vsctl add-port {e2.entity.name} {e2.name} -- set Interface {e2.name} type=patch options:peer={e1.name}"
return super().create() + cmds.format(**self.__dict__)
def destroy(self):
return None # destroyed by the bridge
class Link_OVS_Netns_veth(Link):
# e1 is the ovs, e2 is the netns
def __init__(self, e1, e2, **kwargs):
super().__init__(e1, e2, **kwargs)
def assign_attributes(self):
if self.e1.name is None:
self.e1.name = 'v-ovs{e1.entity.name_id}-ns{e2.entity.name_id}'.format(**self.__dict__)
if self.e2.name is None:
self.e2.name = 'v-ns{e2.entity.name_id}-ovs{e1.entity.name_id}'.format(**self.__dict__)
def create(self):
self.assign_attributes()
cmds = CommandBlock()
# create the links
cmds += "ip link add {e1.name} type veth peer name {e2.name}"
# configure ovs side
cmds += "ovs-vsctl add-port {e1.entity.name} {e1.name}"
cmds += "ip link set {e1.name} up"
if self.disable_offloading:
cmds += self.e1.disable_offloading()
# configure namespace side
cmds += "ip link set {e2.name} netns {e2.entity.name}"
cmds += "ip netns exec {e2.entity.name} ip link set dev {e2.name} up"
if self.e2.ip_address is not None:
cmds += "ip netns exec {e2.entity.name} ip address add {e2.ip_address}/{e2.ip_size} dev {e2.name}"
if self.disable_offloading:
cmds += ("ip netns exec {e2.entity.name} " + self.e2.disable_offloading())
return super().create() + cmds.format(**self.__dict__)
def destroy(self):
self.assign_attributes()
return super().destroy() + "ip link delete {e1.name}".format(**self.__dict__)
class Link_OVS_Netns_port(Link):
# e1 is the ovs, e2 is the netns
def __init__(self, e1, e2, **kwargs):
super().__init__(e1, e2, **kwargs)
def assign_attributes(self):
if self.e2.name is None:
self.e2.name = 'p-{e1.entity.name}-{e2.entity.name_id}'.format(**self.__dict__)
def create(self):
self.assign_attributes()
cmds = CommandBlock()
cmds += "ovs-vsctl add-port {e1.entity.name} {e2.name} -- set Interface {e2.name} type=internal"
cmds += "ip link set {e2.name} netns {e2.entity.name}"
cmds += "ip netns exec {e2.entity.name} ip link set dev {e2.name} up"
if self.e2.ip_address is not None:
cmds += "ip netns exec {e2.entity.name} ip address add {e2.ip_address}/{e2.ip_size} dev {e2.name}"
if self.disable_offloading:
cmds += ("ip netns exec {e2.entity.name} " + self.e2.disable_offloading())
return super().create() + cmds.format(**self.__dict__)
def destroy(self):
return None # destroyed by the bridge
class Link_Netns_Netns_veth(Link):
def __init__(self, e1, e2, **kwargs):
super().__init__(e1, e2, **kwargs)
def assign_attributes(self):
# veth names are limited to 15 chars(!)
if self.e1.name is None:
self.e1.name = 'veth-ns-{e1.entity.name_id}-{e2.entity.name_id}'.format(**self.__dict__)
if self.e2.name is None:
self.e2.name = 'veth-ns-{e2.entity.name_id}-{e1.entity.name_id}'.format(**self.__dict__)
def create(self):
self.assign_attributes()
cmds = CommandBlock()
# create the links
cmds += "ip link add {e1.name} type veth peer name {e2.name}"
# configure one side
cmds += "ip link set {e1.name} netns {e1.entity.name}"
cmds += "ip netns exec {e1.entity.name} ip link set dev {e1.name} up"
if self.e1.ip_address is not None:
cmds += "ip netns exec {e1.entity.name} ip address add {e1.ip_address}/{e1.ip_size} dev {e1.name}"
if self.disable_offloading:
cmds += ("ip netns exec {e1.entity.name} " + self.e1.disable_offloading())
# configure the other side
cmds += "ip link set {e2.name} netns {e2.entity.name}"
cmds += "ip netns exec {e2.entity.name} ip link set dev {e2.name} up"
if self.e2.ip_address is not None:
cmds += "ip netns exec {e2.entity.name} ip address add {e2.ip_address}/{e2.ip_size} dev {e2.name}"
if self.disable_offloading:
cmds += ("ip netns exec {e2.entity.name} " + self.e2.disable_offloading())
return super().create() + cmds.format(**self.__dict__)
def destroy(self):
self.assign_attributes()
return super().destroy() + "ip netns exec {e1.entity.name} ip link delete {e1.name}".format(**self.__dict__)
class Master:
def __init__(self):
self.entities = []
def add(self, entity):
self.entities.append(entity)
def find_unique_attribute(self, entity, attribute_name, fmt, n_limit=None):
if getattr(entity, attribute_name) is not None:
return
n = 1
good_attr = False
while not good_attr:
proposed_attr = fmt.format(entity=entity, n=n)
good_attr = all([getattr(e, attribute_name) != proposed_attr for e in self.entities])
n += 1
if n_limit is not None and n > n_limit:
raise ConfigurationError('unable to find a good value')
setattr(entity, attribute_name, proposed_attr)
def assign_attributes(self):
for entity in self.entities:
self.find_unique_attribute(entity, 'name', '{entity.entity_type_name}{n}')
# self.find_unique_attribute(entity, 'ip_address', '10.112.{n}.1', 255)
@property
def links(self):
links = []
links_set = set()
links_set_add = links_set.add
for e in self.entities:
links += [l for l in e.links if not (l in links_set or links_set_add(l))]
return links
def __get_commands(self, collection, fn):
commands = CommandBlock()
for obj in collection:
commands += getattr(obj, fn)()
return commands
def setup(self):
self.assign_attributes()
return self.__get_commands(self.entities, 'create') + self.__get_commands(self.links, 'create') + self.__get_commands(self.entities, 'configure')
def cleanup(self):
return self.__get_commands(self.links, 'destroy') + self.__get_commands(self.entities, 'destroy')
def get_script(self, enable_routing=True, include_calls=True):
res = CommandBlock.root_check()
res += 'function opg_setup {'
res += 'set -e'
if enable_routing:
res += 'sysctl -w net.ipv4.ip_forward=1'
res += self.setup()
res += ''
res += 'set +e'
res += 'sleep 1'
res += '}'
res += ''
res += 'function opg_cleanup {'
res += 'set +e'
res += self.cleanup()
res += ''
if enable_routing:
res += 'sysctl -w net.ipv4.ip_forward=0'
res += 'sleep 1'
res += '}'
if include_calls:
res += ''
res += 'trap opg_cleanup EXIT'
res += 'opg_setup'
return res
|
nilq/baby-python
|
python
|
import logging
from .registry import Registry
from .parsers import RegistryPDFParser
from .securityhandler import security_handler_factory
from .types import IndirectObject, Stream, Array, Dictionary, IndirectReference, obj_factory
from .utils import cached_property, from_pdf_datetime
class PDFDocument(object):
"""
Represents PDF document structure
:param fobj: file-like object: binary file descriptor, BytesIO stream etc.
:param password: Optional. Password to access PDF content. Defaults to the empty string.
"""
#: contains PDF file header data
header = None
#: contains PDF file trailer data
trailer = None
#: references to document's Catalog instance
root = None
def __init__(self, fobj, password=''):
""" Constructor method
"""
self.registry = Registry()
self.parser = RegistryPDFParser(fobj, self.registry)
self.header = self.parser.header
self.trailer = self.parser.trailer
if self.encrypt and self.encrypt.Filter != "Standard":
raise ValueError("Unsupported encryption handler {}".format(self.encrypt.Filter))
self.root = self.obj_by_ref(self.trailer.root)
if self.encrypt:
sec_handler = security_handler_factory(self.trailer.id, self.encrypt, password)
self.parser.set_security_handler(sec_handler)
@cached_property
def encrypt(self):
"""
Document's Encrypt dictionary (if present)
:return: dict or None
"""
res = None
obj = self.trailer.encrypt
if obj:
res = self.obj_by_ref(obj) if isinstance(obj, IndirectReference) else obj
return res
def build(self, obj, visited=None, lazy=True):
"""
Resolves all indirect references for the object.
:param obj: an object from the document
:type obj: one of supported PDF types
:param lazy: don't resolve subsequent indirect references if True (default).
:type lazy: bool
:param visited: Shouldn't be used. Internal param containing already resolved objects
to not fall into infinite loops
"""
logging.debug("Buliding {}".format(obj))
if visited is None:
visited = []
on_return = None
if isinstance(obj, IndirectReference):
if obj not in visited:
visited.append(obj)
on_return = visited.pop
obj = self.obj_by_ref(obj)
# resolve subsequent references for Arrays, Dictionaries and Streams
if isinstance(obj, Array):
obj = [self.build(o, visited, lazy) for o in obj]
elif isinstance(obj, Dictionary):
if not lazy:
obj = {k: self.build(o, visited, lazy) for k, o in obj.items()}
obj = obj_factory(self, obj)
elif isinstance(obj, Stream):
if not lazy:
obj.dictionary = {k: (self.build(o, visited, lazy)) for k, o in obj.dictionary.items()}
obj = obj_factory(self, obj)
elif isinstance(obj, IndirectObject):
# normally this shouldn't happen, but ponentially we can build it
logging.warning("Attempt to build an indirect object. Possibly a bug.")
obj = self.build(obj.val, visited, lazy)
if on_return:
on_return()
return obj
def locate_object(self, num, gen):
return self.parser.locate_object(num, gen)
def obj_by_ref(self, objref):
obj = self.parser.locate_object(objref.num, objref.gen)
return obj_factory(self, obj)
def deep_obj_by_ref(self, obj, maxdepth=100):
counter = maxdepth
while isinstance(obj, IndirectObject) and counter:
obj = self.obj_by_ref(obj)
counter -= 1
if isinstance(obj, IndirectObject):
raise ValueError("Max reference depth exceeded")
return obj
def pages(self):
"""
Yields document pages one by one.
:return: :class:`~pdfreader.types.objects.Page` generator.
"""
return self.root.Pages.pages()
@property
def metadata(self):
"""
Returns document metadata from file's trailer info dict
:return: dict, if metadata exists `None` otherwise.
"""
res = None
info = self.trailer.info
if info:
res = self.locate_object(info.num, info.gen)
for k, v in res.items():
if isinstance(v, bytes):
try:
res[k] = v.decode()
if k in ('CreationDate', 'ModDate'):
res[k] = from_pdf_datetime(res[k])
except (UnicodeDecodeError, ValueError, TypeError):
pass
return res
if __name__ == "__main__":
import doctest
doctest.testmod()
|
nilq/baby-python
|
python
|
from __future__ import print_function
import logging
import re
from datetime import datetime
from collections import Counter
from itertools import chain
import json
import boto3
import spacy
import textacy
from lxml import etree
from fuzzywuzzy import process
from django.utils.functional import cached_property
from django.conf import settings
from botocore.exceptions import ClientError
from cwapi.models import SpeakerWordCounts
from cwapi.es_docs import CRECDoc
# from cwapi.es_docs import AttributedSegmentDoc
import parser.text_utils as text_utils
from scraper.crec_scraper import crec_s3_key
logger = logging.getLogger(__name__)
DEFAULT_XML_NS = {'ns': 'http://www.loc.gov/mods/v3'}
SPACY_NLP = spacy.load('en')
APPROX_MATCH_THRESHOLD = 90
HOUSE_GENERIC_SPEAKERS = [
'The CLERK', 'The Acting CLERK', 'The ACTING CLERK',
'The SPEAKER pro tempore', 'The SPEAKER'
'The Acting SPEAKER pro tempore', 'The ACTING SPEAKER pro tempore',
'The Acting CHAIR', 'The ACTING CHAIR', 'The Acting CHAIRMAN',
'The ACTING CHAIRMAN', 'The CHAIRMAN', 'The CHAIRWOMAN',
]
SENATE_GENERIC_SPEAKERS = [
'The PRESIDING OFFICER', 'The PRESIDENT pro tempore',
'The Acting PRESIDENT pro tempore', 'The ACTING PRESIDENT pro tempore',
'The VICE PRESIDENT', 'The CHIEF JUSTICE', 'Mr. Counsel', 'Mrs. Counsel',
'Ms. Counsel'
]
GENERIC_SPEAKERS = HOUSE_GENERIC_SPEAKERS + SENATE_GENERIC_SPEAKERS
class CRECParser(object):
def __init__(self,
xml_tree,
date_issued,
xml_namespace=DEFAULT_XML_NS):
self._xml_tree = xml_tree
self._xml_namespace = xml_namespace
self.date_issued = date_issued
self.s3 = boto3.client('s3', aws_access_key_id=settings.AWS_ACCESS_KEY, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)
def _get_by_xpath(self, xml_tree, xpath):
return xml_tree.xpath(xpath, namespaces=self._xml_namespace)
@cached_property
def id(self):
"""@ID field in mods metadata, usually corresponds to filename minus the
file extension.
Example:
"id-CREC-2017-01-20-pt1-PgD55"
"""
return self._get_by_xpath(self._xml_tree, 'string(@ID)')
@cached_property
def title(self):
"""Title of CREC document.
"""
return self._get_by_xpath(
self._xml_tree, 'string(ns:titleInfo/ns:title)'
)
@cached_property
def title_part(self):
"""Section of daily batch of CREC docs, usually one of "Daily Digest",
"Extensions of Remarks", "House", "Senate".
"""
return self._get_by_xpath(
self._xml_tree, 'string(ns:titleInfo/ns:partName)'
)
@cached_property
def pdf_url(self):
"""Location on gpo.gov for the pdf version of this CREC doc.
"""
return self._get_by_xpath(
self._xml_tree,
'string(ns:location/ns:url[@displayLabel="PDF rendition"])'
)
@cached_property
def html_url(self):
"""Location on gpo.gov for the html version of this CREC doc.
"""
return self._get_by_xpath(
self._xml_tree,
'string(ns:location/ns:url[@displayLabel="HTML rendition"])'
)
@cached_property
def page_start(self):
"""CREC docs are grouped into one large pdf each day, this indicates the
page on which this document starts (a single page can include more than
one doc).
"""
return self._get_by_xpath(
self._xml_tree,
'string(ns:part[@type="article"]/ns:extent/ns:start)'
)
@cached_property
def page_end(self):
"""CREC docs are grouped into one large pdf each day, this indicates the
page on which this document ends (a single page can include more than
one doc).
"""
return self._get_by_xpath(
self._xml_tree,
'string(ns:part[@type="article"]/ns:extent/ns:end)'
)
@cached_property
def speakers(self):
"""List of names of people identified as speakers in this doc. Names
usually corrrespond to the ``official_full`` field in bioguide data.
Can be empty.
Examples:
``['Mitch McConnell', 'Roy Blunt', 'Charles E. Schumer']``
``['Charles E. Schumer']``
``[]``
"""
return self._get_by_xpath(
self._xml_tree, 'ns:name[@type="personal"]/ns:namePart/text()'
)
@cached_property
def speaker_ids(self):
"""Maps a short name version of a speaker's name with their bioguideid,
this is used for matching segments within this doc to the speaker for
that segment. Can be empty.
Examples:
``{'Mr. GALLEGO': 'G000574'}``
``{'Mr. SMITH': 'S000583', 'Mr. LATTA': 'L000566'}``
``{}``
"""
speaker_ids_ = {}
persons = self._get_by_xpath(
self._xml_tree, 'ns:extension/ns:congMember'
)
for person in persons:
parsed_name = self._get_by_xpath(
person, 'string(ns:name[@type="parsed"])'
)
sanitized_name = re.sub(' of .*$', '', parsed_name)
if person.get('role') == 'SPEAKING':
speaker_ids_[sanitized_name] = person.get('bioGuideId')
return speaker_ids_
@cached_property
def content(self):
"""The text of this CREC doc (may be plain text or html).
"""
s3_key = crec_s3_key(self.id.strip('id-') + '.htm', self.date_issued)
try:
response = self.s3.get_object(
Bucket=settings.CREC_STAGING_S3_BUCKET, Key=s3_key
)
content = response['Body'].read().decode('utf-8')
return content
except ClientError as e:
# TODO: Proper error handling for missing CREC file.
print(s3_key)
@cached_property
def is_daily_digest(self):
"""True if this doc is a daily digest. The Daily Digest is an
aggregation of all CREC docs for a day, we omit it in favor of parsing
each individually.
"""
tokens = self.id.split('-')
return any([
tokens[-1].startswith('PgD'),
tokens[-2].startswith('PgD'),
(self.title_part and self.title_part.startswith('Daily Digest'))
])
@cached_property
def is_front_matter(self):
"""True if this is a front matter page. These are effectively cover
pages and do not contain relevant data.
"""
tokens = self.id.split('-')
if len(tokens) > 0:
return self.id.split('-')[-1].startswith('FrontMatter')
else:
return False
def is_skippable(self):
"""Returns True if this is one of the type of documents in the daily
aggregation of CREC docs that does not contain relevant data and should
not be uploaded to elasticsearch.
"""
return self.is_daily_digest or self.is_front_matter
@cached_property
def textacy_text(self):
"""An instance of ``textacy.Doc`` containing preprocessed data from the
``content`` field.
"""
text = text_utils.preprocess(self.content)
return textacy.Doc(SPACY_NLP(text))
@cached_property
def named_entity_counts(self):
"""A nested-dict mapping named entity type to a histogram dict of any
named entities of that type contained within ``content``. See
`https://spacy.io/usage/linguistic-features#section-named-entities`_
for a list and decriptions of these types.
Example:
::
{
'PERSON': {
'Benjamin S. Carson': 1, 'Elaine L. Chao': 1
},
'ORG': {
'Senate': 15, 'Chamber Action Routine Proceedings': 1
}
}
"""
named_entities = text_utils.get_named_entities(self.textacy_text)
named_entity_counts_ = {}
if any(named_entities):
named_entity_types = text_utils.get_named_entity_types(
named_entities
)
named_entity_freqs = text_utils.get_named_entity_frequencies(
named_entities
)
for ne_type in named_entity_types.keys():
# TODO: Better type name for type == ''?
if ne_type == 'PERSON':
named_entity_counts_[ne_type] = {
text_utils.camel_case(ne, force=False): named_entity_freqs[ne]
for ne in named_entity_types[ne_type]
}
else:
named_entity_counts_[ne_type] = {
ne: named_entity_freqs[ne]
for ne in named_entity_types[ne_type]
}
return named_entity_counts_
@cached_property
def noun_chunks_counts(self):
"""A dict mapping noun chunks type to number of occurrences within
``content``.
Example:
::
{
'united states trade representative': 1,
'unanimous consent agreement': 1,
}
"""
noun_chunks = text_utils.get_noun_chunks(self.textacy_text)
noun_chunks = text_utils.named_entity_dedupe(
noun_chunks,
chain(*[d.keys() for d in self.named_entity_counts.values()])
)
return dict(Counter(noun_chunks))
@cached_property
def segments(self):
"""List of segments of ``content`` attributed to individual speakers.
Speakers can be indviduals identified by name (and usually bioGuideId)
or generic speakers (roles). A sentence containing an individual or
generic speaker (exact or approximate match) marks the beginning of
new segment.
Example:
::
[
{
'id': 'id-CREC-2017-01-20-pt1-PgS348-1',
'speaker': 'Mr. McCONNELL',
'text': 'THANKING FORMER PRESIDENT OBAMA. Mr. McCONNELL.Mr. President, I wish to offer a few words regarding...',
'bioguide_id': 'M000355'
},
{
'id': 'id-CREC-2017-01-20-pt1-PgS348-2-1',
'speaker': 'Mr. DURBIN',
'text': 'NOMINATIONS. Mr. DURBIN. Mr. President, I listened carefully to the statement by theRepublican lead...',
'bioguide_id': 'D000563'
}
]
"""
sents = (sent.string for sent in self.textacy_text.spacy_doc.sents)
previous = None
current = None
segment_index = 0
segment_sents = []
segments_ = []
individual_speakers = self.speaker_ids.keys()
for sent in chain(sents, ('<EOF>',)):
speaker = next(
filter(lambda person: person in sent, chain(
individual_speakers, GENERIC_SPEAKERS)), None)
if speaker is not None:
current = speaker
logger.debug(
'Found speaker: {}, previous speaker {}'.format(current, previous))
else:
speaker, score = process.extractOne(sent, chain(
individual_speakers, GENERIC_SPEAKERS))
if score > APPROX_MATCH_THRESHOLD:
current = speaker
logger.debug(
'Found speaker: {} (approx. score {}/100), previous speaker: {}'.format(
current, score, previous))
if previous != current or sent == '<EOF>':
if segment_sents:
segment_index += 1
segment = {
'id': '{}-{}'.format(self.id, segment_index),
'speaker': previous,
'text': ' '.join(segment_sents),
'bioguide_id': None,
}
if segment['speaker'] in self.speaker_ids:
segment['bioguide_id'] = self.speaker_ids[segment['speaker']]
segments_.append(segment)
previous = current
segment_sents = [sent]
else:
segment_sents.append(sent)
return segments_
def to_es_doc(self):
"""Returns the CRECParser as a dict ready to be uploaded to
elasticsearch.
Returns:
dict: A dict representation of this document.
"""
return CRECDoc(
title=self.title,
title_part=self.title_part,
date_issued=self.date_issued,
content=self.content,
crec_id=self.id,
pdf_url=self.pdf_url,
html_url=self.html_url,
page_start=self.page_start,
page_end=self.page_end,
speakers=','.join(self.speakers),
segments=self.segments,
)
def upload_speaker_word_counts(crec_parser):
"""Creates new entries of the SpeakerWordCounts ORM model containing
counts of named entities and noun chunks within this document.
Args:
crec_parser (:class:`parser.crec_parser.CRECParser`): A CRECParser
instance representing a single CREC document.
"""
if crec_parser.speaker_ids:
named_entities = {'named_entities_{0}'.format(ne_type): counts
for ne_type, counts in crec_parser.named_entity_counts.items()}
for bioguide_id in crec_parser.speaker_ids.values():
speaker_counts = SpeakerWordCounts(
bioguide_id=bioguide_id,
crec_id=crec_parser.id,
date=crec_parser.date_issued,
named_entities=json.dumps(crec_parser.named_entity_counts),
noun_chunks=json.dumps(crec_parser.noun_chunks_counts)
)
speaker_counts.save()
def extract_crecs_from_mods(mods_file_obj, xml_namespace=DEFAULT_XML_NS):
"""Takes a file-like object containing mods.xml data for a single day,
extracts each "constituent" (a single CREC document from that day) and
creates a new CRECParser instance for that document. Returns all CRECParser
instances created for a single day as a list.
Args:
mods_file_obj (file): An open file, StringIO or BytesIO buffer
containing mods.xml data.
xml_namespace (dict): The xml_namespaces argument to use with the lxml
parser.
Returns:
list of :class:`parser.crec_parser.CRECParser`: A list of parsed CREC
docs for a single day.
"""
xml_tree = None
xml_tree = etree.parse(mods_file_obj)
constituents = xml_tree.xpath(
'//ns:relatedItem[@type="constituent"]',
namespaces=xml_namespace,
)
date_issued_str = xml_tree.xpath(
'string(//ns:originInfo/ns:dateIssued)',
namespaces=xml_namespace,
)
date_issued = datetime.strptime(date_issued_str, '%Y-%m-%d')
return [CRECParser(c, date_issued) for c in constituents]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import time
import datetime
from web3.exceptions import TransactionNotFound, BlockNotFound
from web3.middleware import construct_sign_and_send_raw_middleware
from config import (
CONFIRMATIONS,
TARGET,
TARGET_TIME,
ACCOUNT,
BASE_PRICE,
web3,
INSTANCE,
)
def get_transaction_and_receipt(tx_hash):
""" Function that tries to get the transaction receipt.
Args:
hash (hex string) - Hash of transaction to be checked.
Return:
Tuple - Transaction and receipt or (None, None) if it doesn't
exist.
"""
try:
tx_inst = web3.eth.getTransaction(tx_hash)
tx_receipt = web3.eth.getTransactionReceipt(tx_hash)
return tx_inst, tx_receipt
except TransactionNotFound:
return None, None
except TypeError as error:
if "Exactly one of the passed values can be specified." in str(error):
return None, None
except Exception:
print("Unpredicted exception has occured")
raise
def await_confirmations(block_hash):
""" Function that waits for enough confirmations of block and decides to
start over again in case of fork.
Args:
block_hash (hex string) - Hash of block to be checked.
Return:
(Bool) - Returns 'True' in case of succeeding in getting enough
confirmations. Returns 'False' in case of block outdating.
"""
while True:
try:
block_number = web3.eth.getBlock(block_hash).number
except BlockNotFound:
# Fork occured.
return False
except Exception:
print("Unpredicted exception has occured")
raise
last_block = web3.eth.blockNumber
if (last_block - block_number) >= CONFIRMATIONS:
return True
time.sleep(3)
def increase_price(current_price, current_nonce, pending):
""" Function that increases the gas price. Is called periodically
according to time spent in this iteration.
Args:
current_price (int) - Current gas price in Wei;
current_nonce (int) - Current nonce;
pending[] - Array of transactions sent with this nonce (in case if
transaction with lower price would be mined before).
Return:
current_price (int) - New gas price;
pending[] - Array of transactions with same nonce with added one.
"""
try:
current_price += int(current_price / 10)
tx_hash = process_transaction(current_price, current_nonce)
if tx_hash is not None:
pending.append(tx_hash)
except ValueError as error:
# One of the txs in pending was mined during increasing process.
if "nonce too low" in str(error):
return current_price, pending
if "known transaction" in str(error):
current_price += int(current_price / 10)
return current_price, pending
except Exception:
print("Unpredicted exception has occured")
raise
return current_price, pending
def adjust_price(iteration, current_price, global_start, last_tx_time):
""" Function that decides to lower or increase the price, according to the
time of previous transaction and the progress in reaching TARGET in
TARGET_TIME.
Args:
iteration (int) - Number of previous successful transactions. Iterator
which changes with the changing of nonce;
current_price (int) - Current gas price in Wei;
global_start (float/Unix format) - The start of the whole process;
last_tx_time (float/Unix format) - Time spent in previous iteration.
Return:
current_price (int) - New gas price after adjustments.
"""
if iteration > 0:
target_ratio = TARGET_TIME / TARGET
actual_ratio = (time.time() - global_start) / iteration
# If we check only the duration of the latest tx, it will increase
# the price very rapidly, ignoring the global progress.
# So it is necessary to control the price according to plan.
if actual_ratio < target_ratio:
current_price -= int(current_price / 10)
elif last_tx_time >= target_ratio:
current_price += int(current_price / 10)
return current_price
def process_transaction(gas_price, nonce):
""" Function that tries to form, sign and send the transaction with given
parameters.
Args:
gas_price (int) - Desired gas price;
nonce (int) - Desired nonce.
Return:
(Hex string) or None - Transaction hash or None if error occured.
"""
try:
tx_builder = INSTANCE.functions.increment().buildTransaction(
{"gasPrice": gas_price, "nonce": nonce}
)
return web3.eth.sendTransaction(tx_builder)
except ValueError as error:
# Web3 hasn't updated the nonce yet.
if "replacement transaction underpriced" in str(error):
return None
if "nonce too low" in str(error):
return None
raise error
def process_iteration(iteration, current_price, global_start, last_tx_time):
""" Function that deals with the processing of transactions with same
nonce till it's farmed and confirmed. Sub-main function of program.
Args:
iteration (int) - Number of previous successful transactions. Iterator
which changes with the changing of nonce;
current_price (int) - Current gas price in Wei;
global_start (float/Unix format) - The start of the whole process;
last_tx_time (float/Unix format) - Time spent in previous iteration.
Return:
current_price (int) - The price of successful transaction among the
others with same nonce;
time.time() - time_start (float/Unix format) - Time spent in this
iteration.
"""
in_pending = 1
current_progress = iteration + 1
current_price = adjust_price(
iteration, current_price, global_start, last_tx_time
)
while True:
# Checking whether web3 updated the nonce after previous transaction.
current_nonce = web3.eth.getTransactionCount(ACCOUNT.address)
pending = [process_transaction(current_price, current_nonce)]
if pending[0] is None:
pending = []
else:
break
time_start = time.time()
if ((current_progress % 10 == 0) and (iteration is not (TARGET - 1))) or (
iteration == 0
):
status = "Header"
else:
status = "Pending"
print_log(
current_progress,
time.ctime(),
current_nonce,
current_price,
status,
pending[-1],
)
while True:
for some_tx in pending:
tx_inst, tx_receipt = get_transaction_and_receipt(some_tx)
if tx_receipt is not None:
current_price = tx_inst.gasPrice
print_log(
current_progress,
time.ctime(),
current_nonce,
current_price,
"Mined",
some_tx,
)
tx_block_hash = tx_receipt.blockHash
if not await_confirmations(tx_block_hash):
# The fork occured. Rolling back to txs in pending
continue
current_time = time.ctime()
print_log(
current_progress,
current_time,
current_nonce,
current_price,
"Success",
some_tx,
)
return current_price, time.time() - time_start
# Increasing of price is available once in 25 seconds.
if (time.time() - time_start) >= 25 * in_pending:
in_pending += 1
current_price, pending = increase_price(
current_price, current_nonce, pending
)
print_log(
current_progress,
time.ctime(),
current_nonce,
current_price,
"Pending",
pending[-1],
)
time.sleep(1)
def print_log(progress, time, nonce, price, status, tx_hash):
""" Function that deals with printing the log in particular format.
Args:
progress (int) - The biased value representing the current iteration
in format understandable to man (iteration + 1);
time (float/Unix format) - The time of event;
current_nonce (int) - Current nonce;
current_price (int) - Current gas price in Wei;
status (string) - String value representing the type of event;
tx_hash (hex string) - The hash of event transaction.
"""
# If "Header" status is present, it prints the string twice.
# First time it print the header itself, second - the information itself.
if status == "Header":
print(
" {} | {} | {} | {} | {} | {} ".format(
"#".ljust(len(str(progress))),
"Date & Time".ljust(len(time)),
"Nonce".ljust(7),
"Gas Price".ljust(10),
"Status".ljust(7),
"Tx hash",
)
)
status = "Pending"
print(
" {} | {} | {} | {} | {} | {}".format(
progress,
time,
str(nonce).ljust(7),
str(price).ljust(10),
status.ljust(7),
web3.toHex(tx_hash),
)
)
if __name__ == "__main__":
web3.middleware_onion.add(construct_sign_and_send_raw_middleware(ACCOUNT))
web3.eth.defaultAccount = ACCOUNT.address
current_price = BASE_PRICE
last_tx_time = 0
global_start = time.time()
print("Started at {}.".format(time.ctime()))
for iteration in range(TARGET):
current_price, last_tx_time = process_iteration(
iteration, current_price, global_start, last_tx_time
)
print(
"Finished {} transactions in {}.".format(
TARGET,
str(datetime.timedelta(seconds=(time.time() - global_start))),
)
)
|
nilq/baby-python
|
python
|
from pirate_sources.models import VideoSource, URLSource, IMGSource
from django.contrib import admin
admin.site.register(VideoSource)
admin.site.register(URLSource)
admin.site.register(IMGSource)
|
nilq/baby-python
|
python
|
import os
import sys
import numpy as np
import cv2
import ailia
from logging import getLogger
logger = getLogger(__name__)
def preprocessing_img(img):
if len(img.shape) < 3:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGRA)
elif img.shape[2] == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
elif img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGRA)
return img
def load_image(image_path):
if os.path.isfile(image_path):
img = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
else:
logger.error(f'{image_path} not found.')
sys.exit()
return preprocessing_img(img)
def hsv_to_rgb(h, s, v):
bgr = cv2.cvtColor(
np.array([[[h, s, v]]], dtype=np.uint8), cv2.COLOR_HSV2BGR)[0][0]
return (int(bgr[0]), int(bgr[1]), int(bgr[2]), 255)
def letterbox_convert(frame, det_shape):
"""
Adjust the size of the frame from the webcam to the ailia input shape.
Parameters
----------
frame: numpy array
det_shape: tuple
ailia model input (height,width)
Returns
-------
resized_img: numpy array
Resized `img` as well as adapt the scale
"""
height, width = det_shape[0], det_shape[1]
f_height, f_width = frame.shape[0], frame.shape[1]
scale = np.max((f_height / height, f_width / width))
# padding base
img = np.zeros(
(int(round(scale * height)), int(round(scale * width)), 3),
np.uint8
)
start = (np.array(img.shape) - np.array(frame.shape)) // 2
img[
start[0]: start[0] + f_height,
start[1]: start[1] + f_width
] = frame
resized_img = cv2.resize(img, (width, height))
return resized_img
def reverse_letterbox(detections, img, det_shape):
h, w = img.shape[0], img.shape[1]
pad_x = pad_y = 0
if det_shape != None:
scale = np.max((h / det_shape[0], w / det_shape[1]))
start = (det_shape[0:2] - np.array(img.shape[0:2]) / scale) // 2
pad_x = start[1]*scale
pad_y = start[0]*scale
new_detections = []
for detection in detections:
logger.debug(detection)
r = ailia.DetectorObject(
category=detection.category,
prob=detection.prob,
x=(detection.x*(w+pad_x*2) - pad_x)/w,
y=(detection.y*(h+pad_y*2) - pad_y)/h,
w=(detection.w*(w+pad_x*2))/w,
h=(detection.h*(h+pad_y*2))/h,
)
new_detections.append(r)
return new_detections
def plot_results(detector, img, category, segm_masks=None, logging=True):
"""
:param detector: ailia.Detector, or list of ailia.DetectorObject
:param img: ndarray data of image
:param category: list of category_name
:param segm_masks:
:param logging: output log flg
:return:
"""
h, w = img.shape[0], img.shape[1]
count = detector.get_object_count() if hasattr(detector, 'get_object_count') else len(detector)
if logging:
print(f'object_count={count}')
# prepare color data
colors = []
for idx in range(count):
obj = detector.get_object(idx) if hasattr(detector, 'get_object') else detector[idx]
# print result
if logging:
print(f'+ idx={idx}')
print(
f' category={obj.category}[ {category[obj.category]} ]'
)
print(f' prob={obj.prob}')
print(f' x={obj.x}')
print(f' y={obj.y}')
print(f' w={obj.w}')
print(f' h={obj.h}')
color = hsv_to_rgb(256 * obj.category / (len(category) + 1), 255, 255)
colors.append(color)
# draw segmentation area
if segm_masks:
for idx in range(count):
mask = np.repeat(np.expand_dims(segm_masks[idx], 2), 3, 2).astype(np.bool)
color = colors[idx][:3]
fill = np.repeat(np.repeat([[color]], img.shape[0], 0), img.shape[1], 1)
img[:, :, :3][mask] = img[:, :, :3][mask] * 0.7 + fill[mask] * 0.3
# draw bounding box
for idx in range(count):
obj = detector.get_object(idx) if hasattr(detector, 'get_object') else detector[idx]
top_left = (int(w * obj.x), int(h * obj.y))
bottom_right = (int(w * (obj.x + obj.w)), int(h * (obj.y + obj.h)))
color = colors[idx]
cv2.rectangle(img, top_left, bottom_right, color, 4)
# draw label
for idx in range(count):
obj = detector.get_object(idx) if hasattr(detector, 'get_object') else detector[idx]
fontScale = w / 2048
text = category[obj.category] + " " + str(int(obj.prob*100)/100)
textsize = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale, 1)[0]
tw = textsize[0]
th = textsize[1]
margin = 3
top_left = (int(w * obj.x), int(h * obj.y))
bottom_right = (int(w * obj.x) + tw + margin, int(h * obj.y) + th + margin)
color = colors[idx]
cv2.rectangle(img, top_left, bottom_right, color, thickness=-1)
text_color = (255,255,255,255)
cv2.putText(
img,
text,
(top_left[0], top_left[1] + th),
cv2.FONT_HERSHEY_SIMPLEX,
fontScale,
text_color,
1
)
return img
def write_predictions(file_name, detector, img=None, category=None):
h, w = (img.shape[0], img.shape[1]) if img is not None else (1, 1)
count = detector.get_object_count() if hasattr(detector, 'get_object_count') else len(detector)
with open(file_name, 'w') as f:
for idx in range(count):
obj = detector.get_object(idx) if hasattr(detector, 'get_object') else detector[idx]
label = category[obj.category] if category else obj.category
f.write('%s %f %d %d %d %d\n' % (
label.replace(' ', '_'),
obj.prob,
int(w * obj.x), int(h * obj.y),
int(w * obj.w), int(h * obj.h),
))
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.